Repository: serai-dex/serai Branch: develop Commit: 737dbcbaa78a Files: 594 Total size: 2.4 MB Directory structure: gitextract_j2hgunrb/ ├── .gitattributes ├── .github/ │ ├── LICENSE │ ├── actions/ │ │ ├── bitcoin/ │ │ │ └── action.yml │ │ ├── build-dependencies/ │ │ │ └── action.yml │ │ ├── monero/ │ │ │ └── action.yml │ │ ├── monero-wallet-rpc/ │ │ │ └── action.yml │ │ └── test-dependencies/ │ │ └── action.yml │ ├── nightly-version │ └── workflows/ │ ├── common-tests.yml │ ├── coordinator-tests.yml │ ├── crypto-tests.yml │ ├── daily-deny.yml │ ├── full-stack-tests.yml │ ├── lint.yml │ ├── message-queue-tests.yml │ ├── mini-tests.yml │ ├── monthly-nightly-update.yml │ ├── networks-tests.yml │ ├── no-std.yml │ ├── pages.yml │ ├── processor-tests.yml │ ├── reproducible-runtime.yml │ └── tests.yml ├── .gitignore ├── .rustfmt.toml ├── AGPL-3.0 ├── CONTRIBUTING.md ├── Cargo.toml ├── LICENSE ├── README.md ├── audits/ │ ├── Cypher Stack crypto March 2023/ │ │ ├── LICENSE │ │ └── README.md │ └── Cypher Stack networks bitcoin August 2023/ │ ├── LICENSE │ └── README.md ├── common/ │ ├── db/ │ │ ├── Cargo.toml │ │ ├── LICENSE │ │ └── src/ │ │ ├── create_db.rs │ │ ├── lib.rs │ │ ├── mem.rs │ │ ├── parity_db.rs │ │ └── rocks.rs │ ├── env/ │ │ ├── Cargo.toml │ │ ├── LICENSE │ │ └── src/ │ │ └── lib.rs │ ├── patchable-async-sleep/ │ │ ├── Cargo.toml │ │ ├── LICENSE │ │ ├── README.md │ │ └── src/ │ │ └── lib.rs │ ├── request/ │ │ ├── Cargo.toml │ │ ├── LICENSE │ │ ├── README.md │ │ └── src/ │ │ ├── lib.rs │ │ ├── request.rs │ │ └── response.rs │ ├── std-shims/ │ │ ├── Cargo.toml │ │ ├── LICENSE │ │ ├── README.md │ │ └── src/ │ │ ├── collections.rs │ │ ├── io.rs │ │ ├── lib.rs │ │ └── sync.rs │ └── zalloc/ │ ├── Cargo.toml │ ├── LICENSE │ ├── build.rs │ └── src/ │ └── lib.rs ├── coordinator/ │ ├── Cargo.toml │ ├── LICENSE │ ├── README.md │ ├── src/ │ │ ├── cosign_evaluator.rs │ │ ├── db.rs │ │ ├── main.rs │ │ ├── p2p.rs │ │ ├── processors.rs │ │ ├── substrate/ │ │ │ ├── cosign.rs │ │ │ ├── db.rs │ │ │ └── mod.rs │ │ ├── tests/ │ │ │ ├── mod.rs │ │ │ └── tributary/ │ │ │ ├── chain.rs │ │ │ ├── dkg.rs │ │ │ ├── handle_p2p.rs │ │ │ ├── mod.rs │ │ │ ├── sync.rs │ │ │ └── tx.rs │ │ └── tributary/ │ │ ├── db.rs │ │ ├── handle.rs │ │ ├── mod.rs │ │ ├── scanner.rs │ │ ├── signing_protocol.rs │ │ ├── spec.rs │ │ └── transaction.rs │ └── tributary/ │ ├── Cargo.toml │ ├── LICENSE │ ├── README.md │ ├── src/ │ │ ├── block.rs │ │ ├── blockchain.rs │ │ ├── lib.rs │ │ ├── mempool.rs │ │ ├── merkle.rs │ │ ├── provided.rs │ │ ├── tendermint/ │ │ │ ├── mod.rs │ │ │ └── tx.rs │ │ ├── tests/ │ │ │ ├── block.rs │ │ │ ├── blockchain.rs │ │ │ ├── mempool.rs │ │ │ ├── merkle.rs │ │ │ ├── mod.rs │ │ │ ├── p2p.rs │ │ │ ├── tendermint.rs │ │ │ └── transaction/ │ │ │ ├── mod.rs │ │ │ ├── signed.rs │ │ │ └── tendermint.rs │ │ └── transaction.rs │ └── tendermint/ │ ├── Cargo.toml │ ├── LICENSE │ ├── README.md │ ├── src/ │ │ ├── block.rs │ │ ├── ext.rs │ │ ├── lib.rs │ │ ├── message_log.rs │ │ ├── round.rs │ │ └── time.rs │ └── tests/ │ └── ext.rs ├── crypto/ │ ├── ciphersuite/ │ │ ├── Cargo.toml │ │ ├── LICENSE │ │ ├── README.md │ │ ├── kp256/ │ │ │ ├── Cargo.toml │ │ │ ├── LICENSE │ │ │ ├── README.md │ │ │ └── src/ │ │ │ └── lib.rs │ │ └── src/ │ │ ├── lib.md │ │ └── lib.rs │ ├── dalek-ff-group/ │ │ ├── Cargo.toml │ │ ├── LICENSE │ │ ├── README.md │ │ └── src/ │ │ ├── ciphersuite.rs │ │ ├── field.rs │ │ └── lib.rs │ ├── dkg/ │ │ ├── Cargo.toml │ │ ├── LICENSE │ │ ├── README.md │ │ ├── dealer/ │ │ │ ├── Cargo.toml │ │ │ ├── LICENSE │ │ │ ├── README.md │ │ │ └── src/ │ │ │ └── lib.rs │ │ ├── musig/ │ │ │ ├── Cargo.toml │ │ │ ├── LICENSE │ │ │ ├── README.md │ │ │ └── src/ │ │ │ ├── lib.rs │ │ │ └── tests.rs │ │ ├── pedpop/ │ │ │ ├── Cargo.toml │ │ │ ├── LICENSE │ │ │ ├── README.md │ │ │ └── src/ │ │ │ ├── encryption.rs │ │ │ ├── lib.rs │ │ │ └── tests.rs │ │ ├── promote/ │ │ │ ├── Cargo.toml │ │ │ ├── LICENSE │ │ │ ├── README.md │ │ │ └── src/ │ │ │ ├── lib.rs │ │ │ └── tests.rs │ │ ├── recovery/ │ │ │ ├── Cargo.toml │ │ │ ├── LICENSE │ │ │ ├── README.md │ │ │ └── src/ │ │ │ └── lib.rs │ │ └── src/ │ │ └── lib.rs │ ├── dleq/ │ │ ├── Cargo.toml │ │ ├── LICENSE │ │ ├── README.md │ │ └── src/ │ │ ├── cross_group/ │ │ │ ├── aos.rs │ │ │ ├── bits.rs │ │ │ ├── mod.rs │ │ │ ├── scalar.rs │ │ │ └── schnorr.rs │ │ ├── lib.rs │ │ └── tests/ │ │ ├── cross_group/ │ │ │ ├── aos.rs │ │ │ ├── mod.rs │ │ │ ├── scalar.rs │ │ │ └── schnorr.rs │ │ └── mod.rs │ ├── ed448/ │ │ ├── Cargo.toml │ │ ├── LICENSE │ │ ├── README.md │ │ └── src/ │ │ ├── backend.rs │ │ ├── ciphersuite.rs │ │ ├── field.rs │ │ ├── lib.rs │ │ ├── point.rs │ │ └── scalar.rs │ ├── ff-group-tests/ │ │ ├── Cargo.toml │ │ ├── LICENSE │ │ ├── README.md │ │ └── src/ │ │ ├── field.rs │ │ ├── group.rs │ │ ├── lib.rs │ │ └── prime_field.rs │ ├── frost/ │ │ ├── Cargo.toml │ │ ├── LICENSE │ │ ├── README.md │ │ └── src/ │ │ ├── algorithm.rs │ │ ├── curve/ │ │ │ ├── dalek.rs │ │ │ ├── ed448.rs │ │ │ ├── kp256.rs │ │ │ └── mod.rs │ │ ├── lib.rs │ │ ├── nonce.rs │ │ ├── sign.rs │ │ └── tests/ │ │ ├── literal/ │ │ │ ├── dalek.rs │ │ │ ├── ed448.rs │ │ │ ├── kp256.rs │ │ │ ├── mod.rs │ │ │ └── vectors/ │ │ │ ├── frost-ed25519-sha512.json │ │ │ ├── frost-ed448-shake256.json │ │ │ ├── frost-p256-sha256.json │ │ │ ├── frost-ristretto255-sha512.json │ │ │ └── frost-secp256k1-sha256.json │ │ ├── mod.rs │ │ ├── nonces.rs │ │ └── vectors.rs │ ├── multiexp/ │ │ ├── Cargo.toml │ │ ├── LICENSE │ │ ├── README.md │ │ └── src/ │ │ ├── batch.rs │ │ ├── lib.rs │ │ ├── pippenger.rs │ │ ├── straus.rs │ │ └── tests/ │ │ ├── batch.rs │ │ └── mod.rs │ ├── schnorr/ │ │ ├── Cargo.toml │ │ ├── LICENSE │ │ ├── README.md │ │ └── src/ │ │ ├── aggregate.rs │ │ ├── lib.rs │ │ └── tests/ │ │ ├── mod.rs │ │ └── rfc8032.rs │ ├── schnorrkel/ │ │ ├── Cargo.toml │ │ ├── LICENSE │ │ ├── README.md │ │ └── src/ │ │ ├── lib.rs │ │ └── tests.rs │ └── transcript/ │ ├── Cargo.toml │ ├── LICENSE │ ├── README.md │ └── src/ │ ├── lib.rs │ ├── merlin.rs │ └── tests.rs ├── deny.toml ├── docs/ │ ├── .gitignore │ ├── .ruby-version │ ├── Gemfile │ ├── _config.yml │ ├── amm/ │ │ └── index.md │ ├── cross_chain/ │ │ └── index.md │ ├── economics/ │ │ ├── genesis.md │ │ ├── index.md │ │ ├── post.md │ │ └── pre.md │ ├── index.md │ ├── infrastructure/ │ │ ├── coordinator.md │ │ ├── index.md │ │ ├── message_queue.md │ │ ├── processor.md │ │ └── serai.md │ ├── integrating/ │ │ └── index.md │ ├── protocol_changes/ │ │ └── index.md │ └── validator/ │ └── index.md ├── message-queue/ │ ├── Cargo.toml │ ├── LICENSE │ ├── README.md │ └── src/ │ ├── client.rs │ ├── lib.rs │ ├── main.rs │ ├── messages.rs │ └── queue.rs ├── mini/ │ ├── Cargo.toml │ ├── LICENSE │ ├── README.md │ └── src/ │ ├── lib.rs │ └── tests/ │ ├── activation_race/ │ │ └── mod.rs │ └── mod.rs ├── networks/ │ ├── bitcoin/ │ │ ├── Cargo.toml │ │ ├── LICENSE │ │ ├── README.md │ │ ├── src/ │ │ │ ├── crypto.rs │ │ │ ├── lib.rs │ │ │ ├── rpc.rs │ │ │ ├── tests/ │ │ │ │ ├── crypto.rs │ │ │ │ └── mod.rs │ │ │ └── wallet/ │ │ │ ├── mod.rs │ │ │ └── send.rs │ │ └── tests/ │ │ ├── rpc.rs │ │ ├── runner.rs │ │ └── wallet.rs │ └── ethereum/ │ ├── .gitignore │ ├── Cargo.toml │ ├── LICENSE │ ├── README.md │ ├── alloy-simple-request-transport/ │ │ ├── Cargo.toml │ │ ├── LICENSE │ │ ├── README.md │ │ └── src/ │ │ └── lib.rs │ ├── build.rs │ ├── contracts/ │ │ ├── Deployer.sol │ │ ├── IERC20.sol │ │ ├── Router.sol │ │ ├── Sandbox.sol │ │ └── Schnorr.sol │ ├── relayer/ │ │ ├── Cargo.toml │ │ ├── LICENSE │ │ ├── README.md │ │ └── src/ │ │ └── main.rs │ └── src/ │ ├── abi/ │ │ └── mod.rs │ ├── crypto.rs │ ├── deployer.rs │ ├── erc20.rs │ ├── lib.rs │ ├── machine.rs │ ├── router.rs │ └── tests/ │ ├── abi/ │ │ └── mod.rs │ ├── contracts/ │ │ ├── ERC20.sol │ │ └── Schnorr.sol │ ├── crypto.rs │ ├── mod.rs │ ├── router.rs │ └── schnorr.rs ├── orchestration/ │ ├── Cargo.toml │ ├── README.md │ ├── dev/ │ │ ├── coordinator/ │ │ │ └── .folder │ │ ├── message-queue/ │ │ │ └── .folder │ │ ├── networks/ │ │ │ ├── bitcoin/ │ │ │ │ └── run.sh │ │ │ ├── ethereum/ │ │ │ │ └── run.sh │ │ │ ├── ethereum-relayer/ │ │ │ │ └── .folder │ │ │ ├── monero/ │ │ │ │ ├── hashes-v0.18.3.4.txt │ │ │ │ └── run.sh │ │ │ └── monero-wallet-rpc/ │ │ │ └── run.sh │ │ ├── processor/ │ │ │ ├── bitcoin/ │ │ │ │ └── .folder │ │ │ ├── ethereum/ │ │ │ │ └── .folder │ │ │ └── monero/ │ │ │ └── .folder │ │ └── serai/ │ │ └── run.sh │ ├── runtime/ │ │ └── Dockerfile │ ├── src/ │ │ ├── coordinator.rs │ │ ├── docker.rs │ │ ├── ethereum_relayer.rs │ │ ├── main.rs │ │ ├── message_queue.rs │ │ ├── mimalloc.rs │ │ ├── networks/ │ │ │ ├── bitcoin.rs │ │ │ ├── ethereum/ │ │ │ │ ├── consensus/ │ │ │ │ │ ├── lighthouse.rs │ │ │ │ │ ├── mod.rs │ │ │ │ │ └── nimbus.rs │ │ │ │ ├── execution/ │ │ │ │ │ ├── anvil.rs │ │ │ │ │ ├── mod.rs │ │ │ │ │ └── reth.rs │ │ │ │ └── mod.rs │ │ │ ├── mod.rs │ │ │ └── monero.rs │ │ ├── processor.rs │ │ └── serai.rs │ └── testnet/ │ ├── coordinator/ │ │ └── .folder │ ├── message-queue/ │ │ └── .folder │ ├── networks/ │ │ ├── bitcoin/ │ │ │ └── run.sh │ │ ├── ethereum/ │ │ │ ├── consensus/ │ │ │ │ ├── lighthouse/ │ │ │ │ │ └── run.sh │ │ │ │ └── nimbus/ │ │ │ │ └── run.sh │ │ │ ├── execution/ │ │ │ │ ├── geth/ │ │ │ │ │ └── run.sh │ │ │ │ └── reth/ │ │ │ │ └── run.sh │ │ │ └── run.sh │ │ ├── ethereum-relayer/ │ │ │ └── .folder │ │ └── monero/ │ │ ├── hashes-v0.18.3.4.txt │ │ └── run.sh │ ├── processor/ │ │ ├── bitcoin/ │ │ │ └── .folder │ │ ├── ethereum/ │ │ │ └── .folder │ │ └── monero/ │ │ └── .folder │ └── serai/ │ └── run.sh ├── patches/ │ ├── directories-next/ │ │ ├── Cargo.toml │ │ └── src/ │ │ └── lib.rs │ ├── home/ │ │ ├── Cargo.toml │ │ └── src/ │ │ └── lib.rs │ ├── matches/ │ │ ├── Cargo.toml │ │ └── src/ │ │ └── lib.rs │ └── option-ext/ │ ├── Cargo.toml │ └── src/ │ └── lib.rs ├── processor/ │ ├── Cargo.toml │ ├── LICENSE │ ├── README.md │ ├── messages/ │ │ ├── Cargo.toml │ │ ├── LICENSE │ │ └── src/ │ │ └── lib.rs │ └── src/ │ ├── additional_key.rs │ ├── batch_signer.rs │ ├── coordinator.rs │ ├── cosigner.rs │ ├── db.rs │ ├── key_gen.rs │ ├── lib.rs │ ├── main.rs │ ├── multisigs/ │ │ ├── db.rs │ │ ├── mod.rs │ │ ├── scanner.rs │ │ └── scheduler/ │ │ ├── mod.rs │ │ ├── smart_contract.rs │ │ └── utxo.rs │ ├── networks/ │ │ ├── bitcoin.rs │ │ ├── ethereum.rs │ │ ├── mod.rs │ │ └── monero.rs │ ├── plan.rs │ ├── signer.rs │ ├── slash_report_signer.rs │ └── tests/ │ ├── addresses.rs │ ├── batch_signer.rs │ ├── cosigner.rs │ ├── key_gen.rs │ ├── literal/ │ │ └── mod.rs │ ├── mod.rs │ ├── scanner.rs │ ├── signer.rs │ └── wallet.rs ├── rust-toolchain.toml ├── spec/ │ ├── DKG Exclusions.md │ ├── Getting Started.md │ ├── Serai.md │ ├── coordinator/ │ │ ├── Coordinator.md │ │ └── Tributary.md │ ├── cryptography/ │ │ ├── Distributed Key Generation.md │ │ └── FROST.md │ ├── integrations/ │ │ ├── Bitcoin.md │ │ ├── Ethereum.md │ │ ├── Instructions.md │ │ └── Monero.md │ ├── policy/ │ │ └── Canonical Chain.md │ ├── processor/ │ │ ├── Multisig Rotation.md │ │ ├── Processor.md │ │ ├── Scanning.md │ │ └── UTXO Management.md │ └── protocol/ │ ├── Constants.md │ ├── In Instructions.md │ └── Validator Sets.md ├── substrate/ │ ├── abi/ │ │ ├── Cargo.toml │ │ ├── LICENSE │ │ └── src/ │ │ ├── babe.rs │ │ ├── coins.rs │ │ ├── dex.rs │ │ ├── economic_security.rs │ │ ├── emissions.rs │ │ ├── genesis_liquidity.rs │ │ ├── grandpa.rs │ │ ├── in_instructions.rs │ │ ├── lib.rs │ │ ├── liquidity_tokens.rs │ │ ├── signals.rs │ │ ├── system.rs │ │ ├── timestamp.rs │ │ ├── tx.rs │ │ └── validator_sets.rs │ ├── client/ │ │ ├── Cargo.toml │ │ ├── LICENSE │ │ ├── src/ │ │ │ ├── lib.rs │ │ │ ├── networks/ │ │ │ │ ├── bitcoin.rs │ │ │ │ ├── mod.rs │ │ │ │ └── monero.rs │ │ │ ├── serai/ │ │ │ │ ├── coins.rs │ │ │ │ ├── dex.rs │ │ │ │ ├── genesis_liquidity.rs │ │ │ │ ├── in_instructions.rs │ │ │ │ ├── liquidity_tokens.rs │ │ │ │ ├── mod.rs │ │ │ │ └── validator_sets.rs │ │ │ └── tests/ │ │ │ ├── mod.rs │ │ │ └── networks/ │ │ │ ├── bitcoin.rs │ │ │ ├── mod.rs │ │ │ └── monero.rs │ │ └── tests/ │ │ ├── batch.rs │ │ ├── burn.rs │ │ ├── common/ │ │ │ ├── dex.rs │ │ │ ├── genesis_liquidity.rs │ │ │ ├── in_instructions.rs │ │ │ ├── mod.rs │ │ │ ├── tx.rs │ │ │ └── validator_sets.rs │ │ ├── dex.rs │ │ ├── dht.rs │ │ ├── emissions.rs │ │ ├── genesis_liquidity.rs │ │ ├── time.rs │ │ └── validator_sets.rs │ ├── coins/ │ │ ├── pallet/ │ │ │ ├── Cargo.toml │ │ │ ├── LICENSE │ │ │ └── src/ │ │ │ ├── lib.rs │ │ │ ├── mock.rs │ │ │ └── tests.rs │ │ └── primitives/ │ │ ├── Cargo.toml │ │ ├── LICENSE │ │ └── src/ │ │ └── lib.rs │ ├── dex/ │ │ └── pallet/ │ │ ├── Cargo.toml │ │ ├── LICENSE-AGPL3 │ │ ├── LICENSE-APACHE2 │ │ └── src/ │ │ ├── benchmarking.rs │ │ ├── lib.rs │ │ ├── mock.rs │ │ ├── tests.rs │ │ ├── types.rs │ │ └── weights.rs │ ├── economic-security/ │ │ └── pallet/ │ │ ├── Cargo.toml │ │ ├── LICENSE │ │ └── src/ │ │ └── lib.rs │ ├── emissions/ │ │ ├── pallet/ │ │ │ ├── Cargo.toml │ │ │ ├── LICENSE │ │ │ └── src/ │ │ │ └── lib.rs │ │ └── primitives/ │ │ ├── Cargo.toml │ │ ├── LICENSE │ │ └── src/ │ │ └── lib.rs │ ├── genesis-liquidity/ │ │ ├── pallet/ │ │ │ ├── Cargo.toml │ │ │ ├── LICENSE │ │ │ └── src/ │ │ │ └── lib.rs │ │ └── primitives/ │ │ ├── Cargo.toml │ │ ├── LICENSE │ │ └── src/ │ │ └── lib.rs │ ├── in-instructions/ │ │ ├── pallet/ │ │ │ ├── Cargo.toml │ │ │ ├── LICENSE │ │ │ └── src/ │ │ │ └── lib.rs │ │ └── primitives/ │ │ ├── Cargo.toml │ │ ├── LICENSE │ │ └── src/ │ │ ├── lib.rs │ │ └── shorthand.rs │ ├── node/ │ │ ├── Cargo.toml │ │ ├── LICENSE │ │ ├── build.rs │ │ └── src/ │ │ ├── chain_spec.rs │ │ ├── cli.rs │ │ ├── command.rs │ │ ├── keystore.rs │ │ ├── main.rs │ │ ├── rpc.rs │ │ └── service.rs │ ├── primitives/ │ │ ├── Cargo.toml │ │ ├── LICENSE │ │ └── src/ │ │ ├── account.rs │ │ ├── amount.rs │ │ ├── balance.rs │ │ ├── block.rs │ │ ├── constants.rs │ │ ├── lib.rs │ │ └── networks.rs │ ├── runtime/ │ │ ├── Cargo.toml │ │ ├── LICENSE │ │ ├── build.rs │ │ └── src/ │ │ ├── abi.rs │ │ └── lib.rs │ ├── signals/ │ │ ├── pallet/ │ │ │ ├── Cargo.toml │ │ │ ├── LICENSE │ │ │ └── src/ │ │ │ └── lib.rs │ │ └── primitives/ │ │ ├── Cargo.toml │ │ ├── LICENSE │ │ └── src/ │ │ └── lib.rs │ └── validator-sets/ │ ├── pallet/ │ │ ├── Cargo.toml │ │ ├── LICENSE │ │ └── src/ │ │ └── lib.rs │ └── primitives/ │ ├── Cargo.toml │ ├── LICENSE │ └── src/ │ └── lib.rs └── tests/ ├── coordinator/ │ ├── Cargo.toml │ ├── LICENSE │ └── src/ │ ├── lib.rs │ └── tests/ │ ├── batch.rs │ ├── key_gen.rs │ ├── mod.rs │ ├── rotation.rs │ └── sign.rs ├── docker/ │ ├── Cargo.toml │ ├── LICENSE │ ├── README.md │ └── src/ │ └── lib.rs ├── full-stack/ │ ├── Cargo.toml │ ├── LICENSE │ └── src/ │ ├── lib.rs │ └── tests/ │ ├── mint_and_burn.rs │ └── mod.rs ├── message-queue/ │ ├── Cargo.toml │ ├── LICENSE │ └── src/ │ └── lib.rs ├── no-std/ │ ├── Cargo.toml │ ├── LICENSE │ ├── README.md │ └── src/ │ └── lib.rs ├── processor/ │ ├── Cargo.toml │ ├── LICENSE │ └── src/ │ ├── lib.rs │ ├── networks.rs │ └── tests/ │ ├── batch.rs │ ├── key_gen.rs │ ├── mod.rs │ └── send.rs └── reproducible-runtime/ ├── Cargo.toml ├── LICENSE └── src/ └── lib.rs ================================================ FILE CONTENTS ================================================ ================================================ FILE: .gitattributes ================================================ # Auto detect text files and perform LF normalization * text=auto * text eol=lf *.pdf binary ================================================ FILE: .github/LICENSE ================================================ MIT License Copyright (c) 2022-2023 Luke Parker Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: .github/actions/bitcoin/action.yml ================================================ name: bitcoin-regtest description: Spawns a regtest Bitcoin daemon inputs: version: description: "Version to download and run" required: false default: "27.0" runs: using: "composite" steps: - name: Bitcoin Daemon Cache id: cache-bitcoind uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 with: path: bitcoin.tar.gz key: bitcoind-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }} - name: Download the Bitcoin Daemon if: steps.cache-bitcoind.outputs.cache-hit != 'true' shell: bash run: | RUNNER_OS=linux RUNNER_ARCH=x86_64 FILE=bitcoin-${{ inputs.version }}-$RUNNER_ARCH-$RUNNER_OS-gnu.tar.gz wget https://bitcoincore.org/bin/bitcoin-core-${{ inputs.version }}/$FILE mv $FILE bitcoin.tar.gz - name: Extract the Bitcoin Daemon shell: bash run: | tar xzvf bitcoin.tar.gz cd bitcoin-${{ inputs.version }} sudo mv bin/* /bin && sudo mv lib/* /lib - name: Bitcoin Regtest Daemon shell: bash run: PATH=$PATH:/usr/bin ./orchestration/dev/networks/bitcoin/run.sh -daemon ================================================ FILE: .github/actions/build-dependencies/action.yml ================================================ name: build-dependencies description: Installs build dependencies for Serai runs: using: "composite" steps: - name: Remove unused packages shell: bash run: | # Ensure the repositories are synced sudo apt update -y # Actually perform the removals sudo apt remove -y "*powershell*" "*nuget*" "*bazel*" "*ansible*" "*terraform*" "*heroku*" "*aws*" azure-cli sudo apt remove -y "*nodejs*" "*npm*" "*yarn*" "*java*" "*kotlin*" "*golang*" "*swift*" "*julia*" "*fortran*" "*android*" sudo apt remove -y "*apache2*" "*nginx*" "*firefox*" "*chromium*" "*chrome*" "*edge*" sudo apt remove -y --allow-remove-essential -f shim-signed *python3* # This removal command requires the prior removals due to unmet dependencies otherwise sudo apt remove -y "*qemu*" "*sql*" "*texinfo*" "*imagemagick*" # Reinstall python3 as a general dependency of a functional operating system sudo apt install -y python3 --fix-missing if: runner.os == 'Linux' - name: Remove unused packages shell: bash run: | (gem uninstall -aIx) || (exit 0) brew uninstall --force "*msbuild*" "*powershell*" "*nuget*" "*bazel*" "*ansible*" "*terraform*" "*heroku*" "*aws*" azure-cli brew uninstall --force "*nodejs*" "*npm*" "*yarn*" "*java*" "*kotlin*" "*golang*" "*swift*" "*julia*" "*fortran*" "*android*" brew uninstall --force "*apache2*" "*nginx*" "*firefox*" "*chromium*" "*chrome*" "*edge*" brew uninstall --force "*qemu*" "*sql*" "*texinfo*" "*imagemagick*" brew cleanup if: runner.os == 'macOS' - name: Install dependencies shell: bash run: | if [ "$RUNNER_OS" == "Linux" ]; then sudo apt install -y ca-certificates protobuf-compiler libclang-dev elif [ "$RUNNER_OS" == "Windows" ]; then choco install protoc elif [ "$RUNNER_OS" == "macOS" ]; then brew install protobuf llvm HOMEBREW_ROOT_PATH=/opt/homebrew # Apple Silicon if [ $(uname -m) = "x86_64" ]; then HOMEBREW_ROOT_PATH=/usr/local; fi # Intel ls $HOMEBREW_ROOT_PATH/opt/llvm/lib | grep "libclang.dylib" # Make sure this installed `libclang` echo "DYLD_LIBRARY_PATH=$HOMEBREW_ROOT_PATH/opt/llvm/lib:$DYLD_LIBRARY_PATH" >> "$GITHUB_ENV" fi - name: Install solc shell: bash run: | cargo +1.89 install svm-rs --version =0.5.18 svm install 0.8.26 svm use 0.8.26 - name: Remove preinstalled Docker shell: bash run: | docker system prune -a --volumes sudo apt remove -y *docker* # Install uidmap which will be required for the explicitly installed Docker sudo apt install uidmap if: runner.os == 'Linux' - name: Update system dependencies shell: bash run: | sudo apt update -y sudo apt upgrade -y sudo apt autoremove -y sudo apt clean if: runner.os == 'Linux' - name: Install rootless Docker uses: docker/setup-docker-action@b60f85385d03ac8acfca6d9996982511d8620a19 with: rootless: true set-host: true if: runner.os == 'Linux' # - name: Cache Rust # uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 ================================================ FILE: .github/actions/monero/action.yml ================================================ name: monero-regtest description: Spawns a regtest Monero daemon inputs: version: description: "Version to download and run" required: false default: v0.18.3.4 runs: using: "composite" steps: - name: Monero Daemon Cache id: cache-monerod uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 with: path: /usr/bin/monerod key: monerod-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }} - name: Download the Monero Daemon if: steps.cache-monerod.outputs.cache-hit != 'true' # Calculates OS/ARCH to demonstrate it, yet then locks to linux-x64 due # to the contained folder not following the same naming scheme and # requiring further expansion not worth doing right now shell: bash run: | RUNNER_OS=${{ runner.os }} RUNNER_ARCH=${{ runner.arch }} RUNNER_OS=${RUNNER_OS,,} RUNNER_ARCH=${RUNNER_ARCH,,} RUNNER_OS=linux RUNNER_ARCH=x64 FILE=monero-$RUNNER_OS-$RUNNER_ARCH-${{ inputs.version }}.tar.bz2 wget https://downloads.getmonero.org/cli/$FILE tar -xvf $FILE sudo mv monero-x86_64-linux-gnu-${{ inputs.version }}/monerod /usr/bin/monerod sudo chmod 777 /usr/bin/monerod sudo chmod +x /usr/bin/monerod - name: Monero Regtest Daemon shell: bash run: PATH=$PATH:/usr/bin ./orchestration/dev/networks/monero/run.sh --detach ================================================ FILE: .github/actions/monero-wallet-rpc/action.yml ================================================ name: monero-wallet-rpc description: Spawns a Monero Wallet-RPC. inputs: version: description: "Version to download and run" required: false default: v0.18.3.4 runs: using: "composite" steps: - name: Monero Wallet RPC Cache id: cache-monero-wallet-rpc uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 with: path: monero-wallet-rpc key: monero-wallet-rpc-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }} - name: Download the Monero Wallet RPC if: steps.cache-monero-wallet-rpc.outputs.cache-hit != 'true' # Calculates OS/ARCH to demonstrate it, yet then locks to linux-x64 due # to the contained folder not following the same naming scheme and # requiring further expansion not worth doing right now shell: bash run: | RUNNER_OS=${{ runner.os }} RUNNER_ARCH=${{ runner.arch }} RUNNER_OS=${RUNNER_OS,,} RUNNER_ARCH=${RUNNER_ARCH,,} RUNNER_OS=linux RUNNER_ARCH=x64 FILE=monero-$RUNNER_OS-$RUNNER_ARCH-${{ inputs.version }}.tar.bz2 wget https://downloads.getmonero.org/cli/$FILE tar -xvf $FILE mv monero-x86_64-linux-gnu-${{ inputs.version }}/monero-wallet-rpc monero-wallet-rpc - name: Monero Wallet RPC shell: bash run: | ./monero-wallet-rpc --allow-mismatched-daemon-version \ --daemon-address 0.0.0.0:18081 --daemon-login serai:seraidex \ --disable-rpc-login --rpc-bind-port 18082 \ --wallet-dir ./ \ --detach ================================================ FILE: .github/actions/test-dependencies/action.yml ================================================ name: test-dependencies description: Installs test dependencies for Serai inputs: monero-version: description: "Monero version to download and run as a regtest node" required: false default: v0.18.3.4 bitcoin-version: description: "Bitcoin version to download and run as a regtest node" required: false default: "27.1" runs: using: "composite" steps: - name: Install Build Dependencies uses: ./.github/actions/build-dependencies - name: Install Foundry uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773 with: version: nightly-f625d0fa7c51e65b4bf1e8f7931cd1c6e2e285e9 cache: false - name: Run a Monero Regtest Node uses: ./.github/actions/monero with: version: ${{ inputs.monero-version }} - name: Run a Bitcoin Regtest Node uses: ./.github/actions/bitcoin with: version: ${{ inputs.bitcoin-version }} - name: Run a Monero Wallet-RPC uses: ./.github/actions/monero-wallet-rpc ================================================ FILE: .github/nightly-version ================================================ nightly-2025-11-01 ================================================ FILE: .github/workflows/common-tests.yml ================================================ name: common/ Tests on: push: branches: - develop paths: - "common/**" pull_request: paths: - "common/**" workflow_dispatch: jobs: test-common: runs-on: ubuntu-latest steps: - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac - name: Build Dependencies uses: ./.github/actions/build-dependencies - name: Run Tests run: | GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \ -p std-shims \ -p zalloc \ -p patchable-async-sleep \ -p serai-db \ -p serai-env \ -p simple-request ================================================ FILE: .github/workflows/coordinator-tests.yml ================================================ name: Coordinator Tests on: push: branches: - develop paths: - "common/**" - "crypto/**" - "networks/**" - "message-queue/**" - "coordinator/**" - "orchestration/**" - "tests/docker/**" - "tests/coordinator/**" pull_request: paths: - "common/**" - "crypto/**" - "networks/**" - "message-queue/**" - "coordinator/**" - "orchestration/**" - "tests/docker/**" - "tests/coordinator/**" workflow_dispatch: jobs: build: runs-on: ubuntu-latest steps: - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac - name: Install Build Dependencies uses: ./.github/actions/build-dependencies - name: Run coordinator Docker tests run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-coordinator-tests ================================================ FILE: .github/workflows/crypto-tests.yml ================================================ name: crypto/ Tests on: push: branches: - develop paths: - "common/**" - "crypto/**" pull_request: paths: - "common/**" - "crypto/**" workflow_dispatch: jobs: test-crypto: runs-on: ubuntu-latest steps: - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac - name: Build Dependencies uses: ./.github/actions/build-dependencies - name: Run Tests run: | GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \ -p flexible-transcript \ -p ff-group-tests \ -p dalek-ff-group \ -p minimal-ed448 \ -p ciphersuite \ -p ciphersuite-kp256 \ -p multiexp \ -p schnorr-signatures \ -p dleq \ -p dkg \ -p dkg-recovery \ -p dkg-dealer \ -p dkg-promote \ -p dkg-musig \ -p dkg-pedpop \ -p modular-frost \ -p frost-schnorrkel ================================================ FILE: .github/workflows/daily-deny.yml ================================================ name: Daily Deny Check on: schedule: - cron: "0 0 * * *" jobs: deny: name: Run cargo deny runs-on: ubuntu-latest steps: - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac - name: Advisory Cache uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 with: path: ~/.cargo/advisory-db key: rust-advisory-db - name: Install cargo deny run: cargo +1.89 install cargo-deny --version =0.18.3 - name: Run cargo deny run: cargo deny -L error --all-features check --hide-inclusion-graph ================================================ FILE: .github/workflows/full-stack-tests.yml ================================================ name: Full Stack Tests on: push: branches: - develop pull_request: workflow_dispatch: jobs: build: runs-on: ubuntu-latest steps: - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac - name: Install Build Dependencies uses: ./.github/actions/build-dependencies - name: Run Full Stack Docker tests run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-full-stack-tests ================================================ FILE: .github/workflows/lint.yml ================================================ name: Lint on: push: branches: - develop pull_request: workflow_dispatch: jobs: clippy: strategy: matrix: os: [ubuntu-latest, macos-15-intel, macos-latest, windows-latest] runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac - name: Get nightly version to use id: nightly shell: bash run: echo "version=$(cat .github/nightly-version)" >> $GITHUB_OUTPUT - name: Build Dependencies uses: ./.github/actions/build-dependencies - name: Install nightly rust run: rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c rust-src -c clippy - name: Run Clippy run: cargo +${{ steps.nightly.outputs.version }} clippy --all-features --all-targets -- -D warnings -A clippy::items_after_test_module # Also verify the lockfile isn't dirty # This happens when someone edits a Cargo.toml yet doesn't do anything # which causes the lockfile to be updated # The above clippy run will cause it to be updated, so checking there's # no differences present now performs the desired check - name: Verify lockfile shell: bash run: git diff | wc -l | LC_ALL="en_US.utf8" grep -x -e "^[ ]*0" deny: runs-on: ubuntu-latest steps: - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac - name: Advisory Cache uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 with: path: ~/.cargo/advisory-db key: rust-advisory-db - name: Install cargo deny run: cargo +1.89 install cargo-deny --version =0.18.4 - name: Run cargo deny run: cargo deny -L error --all-features check --hide-inclusion-graph fmt: runs-on: ubuntu-latest steps: - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac - name: Get nightly version to use id: nightly shell: bash run: echo "version=$(cat .github/nightly-version)" >> $GITHUB_OUTPUT - name: Install nightly rust run: rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -c rustfmt - name: Run rustfmt run: cargo +${{ steps.nightly.outputs.version }} fmt -- --check machete: runs-on: ubuntu-latest steps: - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac - name: Verify all dependencies are in use run: | cargo +1.89 install cargo-machete --version =0.8.0 cargo +1.89 machete ================================================ FILE: .github/workflows/message-queue-tests.yml ================================================ name: Message Queue Tests on: push: branches: - develop paths: - "common/**" - "crypto/**" - "message-queue/**" - "orchestration/**" - "tests/docker/**" - "tests/message-queue/**" pull_request: paths: - "common/**" - "crypto/**" - "message-queue/**" - "orchestration/**" - "tests/docker/**" - "tests/message-queue/**" workflow_dispatch: jobs: build: runs-on: ubuntu-latest steps: - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac - name: Install Build Dependencies uses: ./.github/actions/build-dependencies - name: Run message-queue Docker tests run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-message-queue-tests ================================================ FILE: .github/workflows/mini-tests.yml ================================================ name: mini/ Tests on: push: branches: - develop paths: - "mini/**" pull_request: paths: - "mini/**" workflow_dispatch: jobs: test-common: runs-on: ubuntu-latest steps: - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac - name: Build Dependencies uses: ./.github/actions/build-dependencies - name: Run Tests run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p mini-serai ================================================ FILE: .github/workflows/monthly-nightly-update.yml ================================================ name: Monthly Nightly Update on: schedule: - cron: "0 0 1 * *" jobs: update: name: Update nightly runs-on: ubuntu-latest steps: - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac with: submodules: "recursive" - name: Write nightly version run: echo $(date +"nightly-%Y-%m"-01) > .github/nightly-version - name: Create the commit run: | git config user.name "GitHub Actions" git config user.email "<>" git checkout -b $(date +"nightly-%Y-%m") git add .github/nightly-version git commit -m "Update nightly" git push -u origin $(date +"nightly-%Y-%m") - name: Pull Request uses: actions/github-script@d7906e4ad0b1822421a7e6a35d5ca353c962f410 with: script: | const { repo, owner } = context.repo; const result = await github.rest.pulls.create({ title: (new Date()).toLocaleString( false, { month: "long", year: "numeric" } ) + " - Rust Nightly Update", owner, repo, head: "nightly-" + (new Date()).toISOString().split("-").splice(0, 2).join("-"), base: "develop", body: "PR auto-generated by a GitHub workflow." }); github.rest.issues.addLabels({ owner, repo, issue_number: result.data.number, labels: ["improvement"] }); ================================================ FILE: .github/workflows/networks-tests.yml ================================================ name: networks/ Tests on: push: branches: - develop paths: - "common/**" - "crypto/**" - "networks/**" pull_request: paths: - "common/**" - "crypto/**" - "networks/**" workflow_dispatch: jobs: test-networks: runs-on: ubuntu-latest steps: - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac - name: Test Dependencies uses: ./.github/actions/test-dependencies - name: Run Tests run: | GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \ -p bitcoin-serai \ -p alloy-simple-request-transport \ -p ethereum-serai \ -p serai-ethereum-relayer \ ================================================ FILE: .github/workflows/no-std.yml ================================================ name: no-std build on: push: branches: - develop paths: - "common/**" - "crypto/**" - "networks/**" - "tests/no-std/**" pull_request: paths: - "common/**" - "crypto/**" - "networks/**" - "tests/no-std/**" workflow_dispatch: jobs: build: runs-on: ubuntu-latest steps: - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac - name: Install Build Dependencies uses: ./.github/actions/build-dependencies - name: Install RISC-V Toolchain run: sudo apt update && sudo apt install -y gcc-riscv64-unknown-elf gcc-multilib && rustup target add riscv32imac-unknown-none-elf - name: Verify no-std builds run: CFLAGS=-I/usr/include cargo build --target riscv32imac-unknown-none-elf -p serai-no-std-tests ================================================ FILE: .github/workflows/pages.yml ================================================ # MIT License # # Copyright (c) 2022 just-the-docs # Copyright (c) 2022-2024 Luke Parker # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. name: Deploy Rust docs and Jekyll site to Pages on: push: branches: - "develop" workflow_dispatch: permissions: contents: read pages: write id-token: write # Only allow one concurrent deployment concurrency: group: "pages" cancel-in-progress: true jobs: # Build job build: runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac - name: Setup Ruby uses: ruby/setup-ruby@44511735964dcb71245e7e55f72539531f7bc0eb with: bundler-cache: true cache-version: 0 working-directory: "${{ github.workspace }}/docs" - name: Setup Pages id: pages uses: actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b - name: Build with Jekyll run: cd ${{ github.workspace }}/docs && bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}" env: JEKYLL_ENV: production - name: Get nightly version to use id: nightly shell: bash run: echo "version=$(cat .github/nightly-version)" >> $GITHUB_OUTPUT - name: Build Dependencies uses: ./.github/actions/build-dependencies - name: Buld Rust docs run: | rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c rust-docs RUSTDOCFLAGS="--cfg docsrs" cargo +${{ steps.nightly.outputs.version }} doc --workspace --no-deps --all-features mv target/doc docs/_site/rust - name: Upload artifact uses: actions/upload-pages-artifact@7b1f4a764d45c48632c6b24a0339c27f5614fb0b with: path: "docs/_site/" # Deployment job deploy: environment: name: github-pages url: ${{ steps.deployment.outputs.page_url }} runs-on: ubuntu-latest needs: build steps: - name: Deploy to GitHub Pages id: deployment uses: actions/deploy-pages@d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e ================================================ FILE: .github/workflows/processor-tests.yml ================================================ name: Processor Tests on: push: branches: - develop paths: - "common/**" - "crypto/**" - "networks/**" - "message-queue/**" - "processor/**" - "orchestration/**" - "tests/docker/**" - "tests/processor/**" pull_request: paths: - "common/**" - "crypto/**" - "networks/**" - "message-queue/**" - "processor/**" - "orchestration/**" - "tests/docker/**" - "tests/processor/**" workflow_dispatch: jobs: build: runs-on: ubuntu-latest steps: - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac - name: Install Build Dependencies uses: ./.github/actions/build-dependencies - name: Run processor Docker tests run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-processor-tests ================================================ FILE: .github/workflows/reproducible-runtime.yml ================================================ name: Reproducible Runtime on: push: branches: - develop paths: - "Cargo.lock" - "common/**" - "crypto/**" - "substrate/**" - "orchestration/runtime/**" - "tests/reproducible-runtime/**" pull_request: paths: - "Cargo.lock" - "common/**" - "crypto/**" - "substrate/**" - "orchestration/runtime/**" - "tests/reproducible-runtime/**" workflow_dispatch: jobs: build: runs-on: ubuntu-latest steps: - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac - name: Install Build Dependencies uses: ./.github/actions/build-dependencies - name: Run Reproducible Runtime tests run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-reproducible-runtime-tests ================================================ FILE: .github/workflows/tests.yml ================================================ name: Tests on: push: branches: - develop paths: - "common/**" - "crypto/**" - "networks/**" - "message-queue/**" - "processor/**" - "coordinator/**" - "substrate/**" pull_request: paths: - "common/**" - "crypto/**" - "networks/**" - "message-queue/**" - "processor/**" - "coordinator/**" - "substrate/**" workflow_dispatch: jobs: test-infra: runs-on: ubuntu-latest steps: - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac - name: Build Dependencies uses: ./.github/actions/build-dependencies - name: Run Tests run: | GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \ -p serai-message-queue \ -p serai-processor-messages \ -p serai-processor \ -p tendermint-machine \ -p tributary-chain \ -p serai-coordinator \ -p serai-orchestrator \ -p serai-docker-tests test-substrate: runs-on: ubuntu-latest steps: - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac - name: Build Dependencies uses: ./.github/actions/build-dependencies - name: Run Tests run: | GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \ -p serai-primitives \ -p serai-coins-primitives \ -p serai-coins-pallet \ -p serai-dex-pallet \ -p serai-validator-sets-primitives \ -p serai-validator-sets-pallet \ -p serai-genesis-liquidity-primitives \ -p serai-genesis-liquidity-pallet \ -p serai-emissions-primitives \ -p serai-emissions-pallet \ -p serai-economic-security-pallet \ -p serai-in-instructions-primitives \ -p serai-in-instructions-pallet \ -p serai-signals-primitives \ -p serai-signals-pallet \ -p serai-abi \ -p serai-runtime \ -p serai-node test-serai-client: runs-on: ubuntu-latest steps: - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac - name: Build Dependencies uses: ./.github/actions/build-dependencies - name: Run Tests run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client ================================================ FILE: .gitignore ================================================ target # Don't commit any `Cargo.lock` which aren't the workspace's Cargo.lock !./Cargo.lock # Don't commit any `Dockerfile`, as they're auto-generated, except the only one which isn't Dockerfile Dockerfile.fast-epoch !orchestration/runtime/Dockerfile .test-logs .vscode ================================================ FILE: .rustfmt.toml ================================================ edition = "2021" tab_spaces = 2 max_width = 100 # Let the developer decide based on the 100 char line limit use_small_heuristics = "Max" error_on_line_overflow = true error_on_unformatted = true imports_granularity = "Crate" reorder_imports = false reorder_modules = false unstable_features = true spaces_around_ranges = true binop_separator = "Back" ================================================ FILE: AGPL-3.0 ================================================ GNU AFFERO GENERAL PUBLIC LICENSE Version 3, 19 November 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU Affero General Public License is a free, copyleft license for software and other kinds of works, specifically designed to ensure cooperation with the community in the case of network server software. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, our General Public Licenses are intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. Developers that use our General Public Licenses protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License which gives you legal permission to copy, distribute and/or modify the software. A secondary benefit of defending all users' freedom is that improvements made in alternate versions of the program, if they receive widespread use, become available for other developers to incorporate. Many developers of free software are heartened and encouraged by the resulting cooperation. However, in the case of software used on network servers, this result may fail to come about. The GNU General Public License permits making a modified version and letting the public access it on a server without ever releasing its source code to the public. The GNU Affero General Public License is designed specifically to ensure that, in such cases, the modified source code becomes available to the community. It requires the operator of a network server to provide the source code of the modified version running there to the users of that server. Therefore, public use of a modified version, on a publicly accessible server, gives the public access to the source code of the modified version. An older license, called the Affero General Public License and published by Affero, was designed to accomplish similar goals. This is a different license, not a version of the Affero GPL, but Affero has released a new version of the Affero GPL which permits relicensing under this license. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU Affero General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Remote Network Interaction; Use with the GNU General Public License. Notwithstanding any other provision of this License, if you modify the Program, your modified version must prominently offer all users interacting with it remotely through a computer network (if your version supports such interaction) an opportunity to receive the Corresponding Source of your version by providing access to the Corresponding Source from a network server at no charge, through some standard or customary means of facilitating copying of software. This Corresponding Source shall include the Corresponding Source for any work covered by version 3 of the GNU General Public License that is incorporated pursuant to the following paragraph. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the work with which it is combined will remain governed by version 3 of the GNU General Public License. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU Affero General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU Affero General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU Affero General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU Affero General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If your software can interact with users remotely through a computer network, you should also make sure that it provides a way for users to get its source. For example, if your program is a web application, its interface could display a "Source" link that leads users to an archive of the code. There are many ways you could offer source, and different solutions will be better for different programs; see section 13 for the specific requirements. You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU AGPL, see . ================================================ FILE: CONTRIBUTING.md ================================================ # Contributing Contributions come in a variety of forms. Developing Serai, helping document it, using its libraries in another project, using and testing it, and simply sharing it are all valuable ways of contributing. This document will specifically focus on contributions to this repository in the form of code and documentation. ### Rules - Stable native Rust, nightly wasm and tools. - `cargo fmt` must be used. - `cargo clippy` must pass, except for the ignored rules (`type_complexity` and `dead_code`). - The CI must pass. - Only use uppercase variable names when relevant to cryptography. - Use a two-space ident when possible. - Put a space after comment markers. - Don't use multiple newlines between sections of code. - Have a newline before EOF. ### Guidelines - Sort inputs as core, std, third party, and then Serai. - Comment code reasonably. - Include tests for new features. - Sign commits. ### Submission All submissions should be through GitHub. Contributions to a crate will be licensed according to the crate's existing license, with the crate's copyright holders (distinct from authors) having the right to re-license the crate via a unanimous decision. ================================================ FILE: Cargo.toml ================================================ [workspace] resolver = "2" members = [ # std patches "patches/matches", # Rewrites/redirects "patches/option-ext", "patches/directories-next", "common/std-shims", "common/zalloc", "common/patchable-async-sleep", "common/db", "common/env", "common/request", "crypto/transcript", "crypto/ff-group-tests", "crypto/dalek-ff-group", "crypto/ed448", "crypto/ciphersuite", "crypto/ciphersuite/kp256", "crypto/multiexp", "crypto/schnorr", "crypto/dleq", "crypto/dkg", "crypto/dkg/recovery", "crypto/dkg/dealer", "crypto/dkg/promote", "crypto/dkg/musig", "crypto/dkg/pedpop", "crypto/frost", "crypto/schnorrkel", "networks/bitcoin", "networks/ethereum/alloy-simple-request-transport", "networks/ethereum", "networks/ethereum/relayer", "message-queue", "processor/messages", "processor", "coordinator/tributary/tendermint", "coordinator/tributary", "coordinator", "substrate/primitives", "substrate/coins/primitives", "substrate/coins/pallet", "substrate/dex/pallet", "substrate/validator-sets/primitives", "substrate/validator-sets/pallet", "substrate/genesis-liquidity/primitives", "substrate/genesis-liquidity/pallet", "substrate/emissions/primitives", "substrate/emissions/pallet", "substrate/economic-security/pallet", "substrate/in-instructions/primitives", "substrate/in-instructions/pallet", "substrate/signals/primitives", "substrate/signals/pallet", "substrate/abi", "substrate/runtime", "substrate/node", "substrate/client", "orchestration", "mini", "tests/no-std", "tests/docker", "tests/message-queue", "tests/processor", "tests/coordinator", "tests/full-stack", "tests/reproducible-runtime", ] # Always compile Monero (and a variety of dependencies) with optimizations due # to the extensive operations required for Bulletproofs [profile.dev.package] subtle = { opt-level = 3 } curve25519-dalek = { opt-level = 3 } ff = { opt-level = 3 } group = { opt-level = 3 } crypto-bigint = { opt-level = 3 } dalek-ff-group = { opt-level = 3 } minimal-ed448 = { opt-level = 3 } multiexp = { opt-level = 3 } monero-oxide = { opt-level = 3 } [profile.release] panic = "unwind" overflow-checks = true [patch.crates-io] # Dependencies from monero-oxide which originate from within our own tree std-shims = { path = "common/std-shims" } simple-request = { path = "common/request" } dalek-ff-group = { path = "crypto/dalek-ff-group" } flexible-transcript = { path = "crypto/transcript" } modular-frost = { path = "crypto/frost" } # https://github.com/rust-lang-nursery/lazy-static.rs/issues/201 lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev = "5735630d46572f1e5377c8f2ba0f79d18f53b10c" } # These have `std` alternatives matches = { path = "patches/matches" } home = { path = "patches/home" } # directories-next was created because directories was unmaintained # directories-next is now unmaintained while directories is maintained # The directories author pulls in ridiculously pointless crates and prefers # copyleft licenses # The following two patches resolve everything option-ext = { path = "patches/option-ext" } directories-next = { path = "patches/directories-next" } [workspace.lints.clippy] uninlined_format_args = "allow" # TODO unwrap_or_default = "allow" manual_is_multiple_of = "allow" incompatible_msrv = "allow" # Manually verified with a GitHub workflow borrow_as_ptr = "deny" cast_lossless = "deny" cast_possible_truncation = "deny" cast_possible_wrap = "deny" cast_precision_loss = "deny" cast_ptr_alignment = "deny" cast_sign_loss = "deny" checked_conversions = "deny" cloned_instead_of_copied = "deny" enum_glob_use = "deny" expl_impl_clone_on_copy = "deny" explicit_into_iter_loop = "deny" explicit_iter_loop = "deny" flat_map_option = "deny" float_cmp = "deny" fn_params_excessive_bools = "deny" ignored_unit_patterns = "deny" implicit_clone = "deny" inefficient_to_string = "deny" invalid_upcast_comparisons = "deny" large_stack_arrays = "deny" linkedlist = "deny" macro_use_imports = "deny" manual_instant_elapsed = "deny" # TODO manual_let_else = "deny" manual_ok_or = "deny" manual_string_new = "deny" map_unwrap_or = "deny" match_bool = "deny" match_same_arms = "deny" missing_fields_in_debug = "deny" # TODO needless_continue = "deny" needless_pass_by_value = "deny" ptr_cast_constness = "deny" range_minus_one = "deny" range_plus_one = "deny" redundant_closure_for_method_calls = "deny" redundant_else = "deny" string_add_assign = "deny" unchecked_time_subtraction = "deny" unnecessary_box_returns = "deny" unnecessary_join = "deny" unnecessary_wraps = "deny" unnested_or_patterns = "deny" unused_async = "deny" unused_self = "deny" zero_sized_map_values = "deny" # TODO: These were incurred when updating Rust as necessary for compilation, yet aren't being fixed # at this time due to the impacts it'd have throughout the repository (when this isn't actively the # primary branch, `next` is) needless_continue = "allow" needless_lifetimes = "allow" useless_conversion = "allow" empty_line_after_doc_comments = "allow" manual_div_ceil = "allow" manual_let_else = "allow" unnecessary_map_or = "allow" result_large_err = "allow" unneeded_struct_pattern = "allow" [workspace.lints.rust] unused = "allow" # TODO: https://github.com/rust-lang/rust/issues/147648 mismatched_lifetime_syntaxes = "allow" unused_attributes = "allow" unused_parens = "allow" ================================================ FILE: LICENSE ================================================ Serai crates are licensed under one of two licenses, either MIT or AGPL-3.0, depending on the crate in question. Each crate declares their license in their `Cargo.toml` and includes a `LICENSE` file detailing its status. Additionally, a full copy of the AGPL-3.0 License is included in the root of this repository as a reference text. This copy should be provided with any distribution of a crate licensed under the AGPL-3.0, as per its terms. The GitHub actions/workflows (`.github`) are licensed under the MIT license. ================================================ FILE: README.md ================================================ # Serai Serai is a new DEX, built from the ground up, initially planning on listing Bitcoin, Ethereum, DAI, and Monero, offering a liquidity-pool-based trading experience. Funds are stored in an economically secured threshold-multisig wallet. [Getting Started](spec/Getting%20Started.md) ### Layout - `audits`: Audits for various parts of Serai. - `spec`: The specification of the Serai protocol, both internally and as networked. - `docs`: User-facing documentation on the Serai protocol. - `common`: Crates containing utilities common to a variety of areas under Serai, none neatly fitting under another category. - `crypto`: A series of composable cryptographic libraries built around the `ff`/`group` APIs, achieving a variety of tasks. These range from generic infrastructure, to our IETF-compliant FROST implementation, to a DLEq proof as needed for Bitcoin-Monero atomic swaps. - `networks`: Various libraries intended for usage in Serai yet also by the wider community. This means they will always support the functionality Serai needs, yet won't disadvantage other use cases when possible. - `message-queue`: An ordered message server so services can talk to each other, even when the other is offline. - `processor`: A generic chain processor to process data for Serai and process events from Serai, executing transactions as expected and needed. - `coordinator`: A service to manage processors and communicate over a P2P network with other validators. - `substrate`: Substrate crates used to instantiate the Serai network. - `orchestration`: Dockerfiles and scripts to deploy a Serai node/test environment. - `tests`: Tests for various crates. Generally, `crate/src/tests` is used, or `crate/tests`, yet any tests requiring crates' binaries are placed here. ### Security Serai hosts a bug bounty program via [Immunefi](https://immunefi.com/bounty/serai/). For in-scope critical vulnerabilities, we will reward whitehats with up to $30,000. Anything not in-scope should still be submitted through Immunefi, with rewards issued at the discretion of the Immunefi program managers. ### Links - [Website](https://serai.exchange/): https://serai.exchange/ - [Immunefi](https://immunefi.com/bounty/serai/): https://immunefi.com/bounty/serai/ - [Twitter](https://twitter.com/SeraiDEX): https://twitter.com/SeraiDEX - [Discord](https://discord.gg/mpEUtJR3vz): https://discord.gg/mpEUtJR3vz - [Matrix](https://matrix.to/#/#serai:matrix.org): https://matrix.to/#/#serai:matrix.org - [Reddit](https://www.reddit.com/r/SeraiDEX/): https://www.reddit.com/r/SeraiDEX/ - [Telegram](https://t.me/SeraiDEX): https://t.me/SeraiDEX ================================================ FILE: audits/Cypher Stack crypto March 2023/LICENSE ================================================ MIT License Copyright (c) 2023 Cypher Stack Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: audits/Cypher Stack crypto March 2023/README.md ================================================ # Cypher Stack /crypto Audit, March 2023 This audit was over the /crypto folder, excluding the ed448 crate, the `Ed448` ciphersuite in the ciphersuite crate, and the `dleq/experimental` feature. It is encompassing up to commit 669d2dbffc1dafb82a09d9419ea182667115df06. Please see https://github.com/cypherstack/serai-audit for provenance. ================================================ FILE: audits/Cypher Stack networks bitcoin August 2023/LICENSE ================================================ MIT License Copyright (c) 2023 Cypher Stack Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: audits/Cypher Stack networks bitcoin August 2023/README.md ================================================ # Cypher Stack /networks/bitcoin Audit, August 2023 This audit was over the `/networks/bitcoin` folder (at the time located at `/coins/bitcoin`). It is encompassing up to commit 5121ca75199dff7bd34230880a1fdd793012068c. Please see https://github.com/cypherstack/serai-btc-audit for provenance. ================================================ FILE: common/db/Cargo.toml ================================================ [package] name = "serai-db" version = "0.1.0" description = "A simple database trait and backends for it" license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/common/db" authors = ["Luke Parker "] keywords = [] edition = "2021" rust-version = "1.65" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] parity-db = { version = "0.4", default-features = false, optional = true } rocksdb = { version = "0.24", default-features = false, features = ["zstd"], optional = true } [features] parity-db = ["dep:parity-db"] rocksdb = ["dep:rocksdb"] ================================================ FILE: common/db/LICENSE ================================================ MIT License Copyright (c) 2022-2023 Luke Parker Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: common/db/src/create_db.rs ================================================ #[doc(hidden)] pub fn serai_db_key( db_dst: &'static [u8], item_dst: &'static [u8], key: impl AsRef<[u8]>, ) -> Vec { let db_len = u8::try_from(db_dst.len()).unwrap(); let dst_len = u8::try_from(item_dst.len()).unwrap(); [[db_len].as_ref(), db_dst, [dst_len].as_ref(), item_dst, key.as_ref()].concat() } /// Creates a series of structs which provide namespacing for keys /// /// # Description /// /// Creates a unit struct and a default implementation for the `key`, `get`, and `set`. The macro /// uses a syntax similar to defining a function. Parameters are concatenated to produce a key, /// they must be `scale` encodable. The return type is used to auto encode and decode the database /// value bytes using `borsh`. /// /// # Arguments /// /// * `db_name` - A database name /// * `field_name` - An item name /// * `args` - Comma separated list of key arguments /// * `field_type` - The return type /// /// # Example /// /// ```ignore /// create_db!( /// TributariesDb { /// AttemptsDb: (key_bytes: &[u8], attempt_id: u32) -> u64, /// ExpiredDb: (genesis: [u8; 32]) -> Vec /// } /// ) /// ``` #[macro_export] macro_rules! create_db { ($db_name: ident { $($field_name: ident: ($($arg: ident: $arg_type: ty),*) -> $field_type: ty$(,)?)* }) => { $( #[derive(Clone, Debug)] pub(crate) struct $field_name; impl $field_name { pub(crate) fn key($($arg: $arg_type),*) -> Vec { use scale::Encode; $crate::serai_db_key( stringify!($db_name).as_bytes(), stringify!($field_name).as_bytes(), ($($arg),*).encode() ) } pub(crate) fn set(txn: &mut impl DbTxn $(, $arg: $arg_type)*, data: &$field_type) { let key = $field_name::key($($arg),*); txn.put(&key, borsh::to_vec(data).unwrap()); } pub(crate) fn get(getter: &impl Get, $($arg: $arg_type),*) -> Option<$field_type> { getter.get($field_name::key($($arg),*)).map(|data| { borsh::from_slice(data.as_ref()).unwrap() }) } #[allow(dead_code)] pub(crate) fn del(txn: &mut impl DbTxn $(, $arg: $arg_type)*) { txn.del(&$field_name::key($($arg),*)) } } )* }; } #[macro_export] macro_rules! db_channel { ($db_name: ident { $($field_name: ident: ($($arg: ident: $arg_type: ty),*) -> $field_type: ty$(,)?)* }) => { $( create_db! { $db_name { $field_name: ($($arg: $arg_type,)* index: u32) -> $field_type, } } impl $field_name { pub(crate) fn send(txn: &mut impl DbTxn $(, $arg: $arg_type)*, value: &$field_type) { // Use index 0 to store the amount of messages let messages_sent_key = $field_name::key($($arg),*, 0); let messages_sent = txn.get(&messages_sent_key).map(|counter| { u32::from_le_bytes(counter.try_into().unwrap()) }).unwrap_or(0); txn.put(&messages_sent_key, (messages_sent + 1).to_le_bytes()); // + 2 as index 1 is used for the amount of messages read // Using distinct counters enables send to be called without mutating anything recv may // at the same time let index_to_use = messages_sent + 2; $field_name::set(txn, $($arg),*, index_to_use, value); } pub(crate) fn try_recv(txn: &mut impl DbTxn $(, $arg: $arg_type)*) -> Option<$field_type> { let messages_recvd_key = $field_name::key($($arg),*, 1); let messages_recvd = txn.get(&messages_recvd_key).map(|counter| { u32::from_le_bytes(counter.try_into().unwrap()) }).unwrap_or(0); let index_to_read = messages_recvd + 2; let res = $field_name::get(txn, $($arg),*, index_to_read); if res.is_some() { $field_name::del(txn, $($arg),*, index_to_read); txn.put(&messages_recvd_key, (messages_recvd + 1).to_le_bytes()); } res } } )* }; } ================================================ FILE: common/db/src/lib.rs ================================================ mod create_db; pub use create_db::*; mod mem; pub use mem::*; #[cfg(feature = "rocksdb")] mod rocks; #[cfg(feature = "rocksdb")] pub use rocks::{RocksDB, new_rocksdb}; #[cfg(feature = "parity-db")] mod parity_db; #[cfg(feature = "parity-db")] pub use parity_db::{ParityDb, new_parity_db}; /// An object implementing get. pub trait Get { fn get(&self, key: impl AsRef<[u8]>) -> Option>; } /// An atomic database operation. #[must_use] pub trait DbTxn: Send + Get { fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>); fn del(&mut self, key: impl AsRef<[u8]>); fn commit(self); } /// A database supporting atomic operations. pub trait Db: 'static + Send + Sync + Clone + Get { type Transaction<'a>: DbTxn; fn key(db_dst: &'static [u8], item_dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec { let db_len = u8::try_from(db_dst.len()).unwrap(); let dst_len = u8::try_from(item_dst.len()).unwrap(); [[db_len].as_ref(), db_dst, [dst_len].as_ref(), item_dst, key.as_ref()].concat() } fn txn(&mut self) -> Self::Transaction<'_>; } ================================================ FILE: common/db/src/mem.rs ================================================ use core::fmt::Debug; use std::{ sync::{Arc, RwLock}, collections::{HashSet, HashMap}, }; use crate::*; /// An atomic operation for the in-memory database. #[must_use] #[derive(PartialEq, Eq, Debug)] pub struct MemDbTxn<'a>(&'a MemDb, HashMap, Vec>, HashSet>); impl<'a> Get for MemDbTxn<'a> { fn get(&self, key: impl AsRef<[u8]>) -> Option> { if self.2.contains(key.as_ref()) { return None; } self .1 .get(key.as_ref()) .cloned() .or_else(|| self.0 .0.read().unwrap().get(key.as_ref()).cloned()) } } impl<'a> DbTxn for MemDbTxn<'a> { fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) { self.2.remove(key.as_ref()); self.1.insert(key.as_ref().to_vec(), value.as_ref().to_vec()); } fn del(&mut self, key: impl AsRef<[u8]>) { self.1.remove(key.as_ref()); self.2.insert(key.as_ref().to_vec()); } fn commit(mut self) { let mut db = self.0 .0.write().unwrap(); for (key, value) in self.1.drain() { db.insert(key, value); } for key in self.2 { db.remove(&key); } } } /// An in-memory database. #[derive(Clone, Debug)] pub struct MemDb(Arc, Vec>>>); impl PartialEq for MemDb { fn eq(&self, other: &MemDb) -> bool { *self.0.read().unwrap() == *other.0.read().unwrap() } } impl Eq for MemDb {} impl Default for MemDb { fn default() -> MemDb { MemDb(Arc::new(RwLock::new(HashMap::new()))) } } impl MemDb { /// Create a new in-memory database. pub fn new() -> MemDb { MemDb::default() } } impl Get for MemDb { fn get(&self, key: impl AsRef<[u8]>) -> Option> { self.0.read().unwrap().get(key.as_ref()).cloned() } } impl Db for MemDb { type Transaction<'a> = MemDbTxn<'a>; fn txn(&mut self) -> MemDbTxn<'_> { MemDbTxn(self, HashMap::new(), HashSet::new()) } } ================================================ FILE: common/db/src/parity_db.rs ================================================ use std::sync::Arc; pub use ::parity_db::{Options, Db as ParityDb}; use crate::*; #[must_use] pub struct Transaction<'a>(&'a Arc, Vec<(u8, Vec, Option>)>); impl Get for Transaction<'_> { fn get(&self, key: impl AsRef<[u8]>) -> Option> { let mut res = self.0.get(&key); for change in &self.1 { if change.1 == key.as_ref() { res.clone_from(&change.2); } } res } } impl DbTxn for Transaction<'_> { fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) { self.1.push((0, key.as_ref().to_vec(), Some(value.as_ref().to_vec()))) } fn del(&mut self, key: impl AsRef<[u8]>) { self.1.push((0, key.as_ref().to_vec(), None)) } fn commit(self) { self.0.commit(self.1).unwrap() } } impl Get for Arc { fn get(&self, key: impl AsRef<[u8]>) -> Option> { ParityDb::get(self, 0, key.as_ref()).unwrap() } } impl Db for Arc { type Transaction<'a> = Transaction<'a>; fn txn(&mut self) -> Self::Transaction<'_> { Transaction(self, vec![]) } } pub fn new_parity_db(path: &str) -> Arc { Arc::new(ParityDb::open_or_create(&Options::with_columns(std::path::Path::new(path), 1)).unwrap()) } ================================================ FILE: common/db/src/rocks.rs ================================================ use std::sync::Arc; use rocksdb::{ DBCompressionType, ThreadMode, SingleThreaded, LogLevel, WriteOptions, Transaction as RocksTransaction, Options, OptimisticTransactionDB, }; use crate::*; #[must_use] pub struct Transaction<'a, T: ThreadMode>( RocksTransaction<'a, OptimisticTransactionDB>, &'a OptimisticTransactionDB, ); impl Get for Transaction<'_, T> { fn get(&self, key: impl AsRef<[u8]>) -> Option> { self.0.get(key).expect("couldn't read from RocksDB via transaction") } } impl DbTxn for Transaction<'_, T> { fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) { self.0.put(key, value).expect("couldn't write to RocksDB via transaction") } fn del(&mut self, key: impl AsRef<[u8]>) { self.0.delete(key).expect("couldn't delete from RocksDB via transaction") } fn commit(self) { self.0.commit().expect("couldn't commit to RocksDB via transaction"); self.1.flush_wal(true).expect("couldn't flush RocksDB WAL"); self.1.flush().expect("couldn't flush RocksDB"); } } impl Get for Arc> { fn get(&self, key: impl AsRef<[u8]>) -> Option> { OptimisticTransactionDB::get(self, key).expect("couldn't read from RocksDB") } } impl Db for Arc> { type Transaction<'a> = Transaction<'a, T>; fn txn(&mut self) -> Self::Transaction<'_> { let mut opts = WriteOptions::default(); opts.set_sync(true); Transaction(self.transaction_opt(&opts, &Default::default()), &**self) } } pub type RocksDB = Arc>; pub fn new_rocksdb(path: &str) -> RocksDB { let mut options = Options::default(); options.create_if_missing(true); options.set_compression_type(DBCompressionType::Zstd); options.set_wal_compression_type(DBCompressionType::Zstd); // 10 MB options.set_max_total_wal_size(10 * 1024 * 1024); options.set_wal_size_limit_mb(10); options.set_log_level(LogLevel::Warn); // 1 MB options.set_max_log_file_size(1024 * 1024); options.set_recycle_log_file_num(1); Arc::new(OptimisticTransactionDB::open(&options, path).unwrap()) } ================================================ FILE: common/env/Cargo.toml ================================================ [package] name = "serai-env" version = "0.1.0" description = "A common library for Serai apps to access environment variables" license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/common/env" authors = ["Luke Parker "] keywords = [] edition = "2021" rust-version = "1.60" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true ================================================ FILE: common/env/LICENSE ================================================ AGPL-3.0-only license Copyright (c) 2023 Luke Parker This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License Version 3 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see . ================================================ FILE: common/env/src/lib.rs ================================================ #![cfg_attr(docsrs, feature(doc_cfg))] // Obtain a variable from the Serai environment/secret store. pub fn var(variable: &str) -> Option { // TODO: Move this to a proper secret store // TODO: Unset this variable std::env::var(variable).ok() } ================================================ FILE: common/patchable-async-sleep/Cargo.toml ================================================ [package] name = "patchable-async-sleep" version = "0.1.0" description = "An async sleep function, patchable to the preferred runtime" license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/common/patchable-async-sleep" authors = ["Luke Parker "] keywords = ["async", "sleep", "tokio", "smol", "async-std"] edition = "2021" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] tokio = { version = "1", default-features = false, features = [ "time"] } ================================================ FILE: common/patchable-async-sleep/LICENSE ================================================ MIT License Copyright (c) 2024 Luke Parker Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: common/patchable-async-sleep/README.md ================================================ # Patchable Async Sleep An async sleep function, patchable to the preferred runtime. This crate is `tokio`-backed. Applications which don't want to use `tokio` should patch this crate to one which works witht heir preferred runtime. The point of it is to have a minimal API surface to trivially facilitate such work. ================================================ FILE: common/patchable-async-sleep/src/lib.rs ================================================ #![cfg_attr(docsrs, feature(doc_cfg))] #![doc = include_str!("../README.md")] #![deny(missing_docs)] use core::time::Duration; /// Sleep for the specified duration. pub fn sleep(duration: Duration) -> impl core::future::Future { tokio::time::sleep(duration) } ================================================ FILE: common/request/Cargo.toml ================================================ [package] name = "simple-request" version = "0.1.0" description = "A simple HTTP(S) request library" license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/common/simple-request" authors = ["Luke Parker "] keywords = ["http", "https", "async", "request", "ssl"] edition = "2021" rust-version = "1.70" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] tower-service = { version = "0.3", default-features = false } hyper = { version = "1", default-features = false, features = ["http1", "client"] } hyper-util = { version = "0.1", default-features = false, features = ["http1", "client-legacy", "tokio"] } http-body-util = { version = "0.1", default-features = false } tokio = { version = "1", default-features = false } hyper-rustls = { version = "0.27", default-features = false, features = ["http1", "ring", "rustls-native-certs", "native-tokio"], optional = true } zeroize = { version = "1", optional = true } base64ct = { version = "1", features = ["alloc"], optional = true } [features] tls = ["hyper-rustls"] basic-auth = ["zeroize", "base64ct"] default = ["tls"] ================================================ FILE: common/request/LICENSE ================================================ MIT License Copyright (c) 2023 Luke Parker Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: common/request/README.md ================================================ # Simple Request A simple alternative to reqwest, supporting HTTPS, intended to support a majority of use cases with a fraction of the dependency tree. This library is built directly around `hyper`, `hyper-rustls`, and does require `tokio`. Support for `async-std` would be welcome. ================================================ FILE: common/request/src/lib.rs ================================================ #![cfg_attr(docsrs, feature(doc_cfg))] #![doc = include_str!("../README.md")] use std::sync::Arc; use tokio::sync::Mutex; use tower_service::Service as TowerService; #[cfg(feature = "tls")] use hyper_rustls::{HttpsConnectorBuilder, HttpsConnector}; use hyper::{Uri, header::HeaderValue, body::Bytes, client::conn::http1::SendRequest}; use hyper_util::{ rt::tokio::TokioExecutor, client::legacy::{Client as HyperClient, connect::HttpConnector}, }; pub use hyper; mod request; pub use request::*; mod response; pub use response::*; #[derive(Debug)] pub enum Error { InvalidUri, MissingHost, InconsistentHost, ConnectionError(Box), Hyper(hyper::Error), HyperUtil(hyper_util::client::legacy::Error), } #[cfg(not(feature = "tls"))] type Connector = HttpConnector; #[cfg(feature = "tls")] type Connector = HttpsConnector; #[derive(Clone, Debug)] enum Connection { ConnectionPool(HyperClient>), Connection { connector: Connector, host: Uri, connection: Arc>>>>, }, } #[derive(Clone, Debug)] pub struct Client { connection: Connection, } impl Client { fn connector() -> Connector { let mut res = HttpConnector::new(); res.set_keepalive(Some(core::time::Duration::from_secs(60))); res.set_nodelay(true); res.set_reuse_address(true); #[cfg(feature = "tls")] res.enforce_http(false); #[cfg(feature = "tls")] let res = HttpsConnectorBuilder::new() .with_native_roots() .expect("couldn't fetch system's SSL roots") .https_or_http() .enable_http1() .wrap_connector(res); res } pub fn with_connection_pool() -> Client { Client { connection: Connection::ConnectionPool( HyperClient::builder(TokioExecutor::new()) .pool_idle_timeout(core::time::Duration::from_secs(60)) .build(Self::connector()), ), } } pub fn without_connection_pool(host: &str) -> Result { Ok(Client { connection: Connection::Connection { connector: Self::connector(), host: { let uri: Uri = host.parse().map_err(|_| Error::InvalidUri)?; if uri.host().is_none() { Err(Error::MissingHost)?; }; uri }, connection: Arc::new(Mutex::new(None)), }, }) } pub async fn request>(&self, request: R) -> Result, Error> { let request: Request = request.into(); let mut request = request.0; if let Some(header_host) = request.headers().get(hyper::header::HOST) { match &self.connection { Connection::ConnectionPool(_) => {} Connection::Connection { host, .. } => { if header_host.to_str().map_err(|_| Error::InvalidUri)? != host.host().unwrap() { Err(Error::InconsistentHost)?; } } } } else { let host = match &self.connection { Connection::ConnectionPool(_) => { request.uri().host().ok_or(Error::MissingHost)?.to_string() } Connection::Connection { host, .. } => { let host_str = host.host().unwrap(); if let Some(uri_host) = request.uri().host() { if host_str != uri_host { Err(Error::InconsistentHost)?; } } host_str.to_string() } }; request .headers_mut() .insert(hyper::header::HOST, HeaderValue::from_str(&host).map_err(|_| Error::InvalidUri)?); } let response = match &self.connection { Connection::ConnectionPool(client) => { client.request(request).await.map_err(Error::HyperUtil)? } Connection::Connection { connector, host, connection } => { let mut connection_lock = connection.lock().await; // If there's not a connection... if connection_lock.is_none() { let call_res = connector.clone().call(host.clone()).await; #[cfg(not(feature = "tls"))] let call_res = call_res.map_err(|e| Error::ConnectionError(format!("{e:?}").into())); #[cfg(feature = "tls")] let call_res = call_res.map_err(Error::ConnectionError); let (requester, connection) = hyper::client::conn::http1::handshake(call_res?).await.map_err(Error::Hyper)?; // This will die when we drop the requester, so we don't need to track an AbortHandle // for it tokio::spawn(connection); *connection_lock = Some(requester); } let connection = connection_lock.as_mut().unwrap(); let mut err = connection.ready().await.err(); if err.is_none() { // Send the request let res = connection.send_request(request).await; if let Ok(res) = res { return Ok(Response(res, self)); } err = res.err(); } // Since this connection has been put into an error state, drop it *connection_lock = None; Err(Error::Hyper(err.unwrap()))? } }; Ok(Response(response, self)) } } ================================================ FILE: common/request/src/request.rs ================================================ use hyper::body::Bytes; #[cfg(feature = "basic-auth")] use hyper::header::HeaderValue; pub use http_body_util::Full; #[cfg(feature = "basic-auth")] use crate::Error; #[derive(Debug)] pub struct Request(pub(crate) hyper::Request>); impl Request { #[cfg(feature = "basic-auth")] fn username_password_from_uri(&self) -> Result<(String, String), Error> { if let Some(authority) = self.0.uri().authority() { let authority = authority.as_str(); if authority.contains('@') { // Decode the username and password from the URI let mut userpass = authority.split('@').next().unwrap().to_string(); let mut userpass_iter = userpass.split(':'); let username = userpass_iter.next().unwrap().to_string(); let password = userpass_iter.next().map_or_else(String::new, str::to_string); zeroize::Zeroize::zeroize(&mut userpass); return Ok((username, password)); } } Err(Error::InvalidUri) } #[cfg(feature = "basic-auth")] pub fn basic_auth(&mut self, username: &str, password: &str) { use zeroize::Zeroize; use base64ct::{Encoding, Base64}; let mut formatted = format!("{username}:{password}"); let mut encoded = Base64::encode_string(formatted.as_bytes()); formatted.zeroize(); self.0.headers_mut().insert( hyper::header::AUTHORIZATION, HeaderValue::from_str(&format!("Basic {encoded}")).unwrap(), ); encoded.zeroize(); } #[cfg(feature = "basic-auth")] pub fn basic_auth_from_uri(&mut self) -> Result<(), Error> { let (mut username, mut password) = self.username_password_from_uri()?; self.basic_auth(&username, &password); use zeroize::Zeroize; username.zeroize(); password.zeroize(); Ok(()) } #[cfg(feature = "basic-auth")] pub fn with_basic_auth(&mut self) { let _ = self.basic_auth_from_uri(); } } impl From>> for Request { fn from(request: hyper::Request>) -> Request { Request(request) } } ================================================ FILE: common/request/src/response.rs ================================================ use hyper::{ StatusCode, header::{HeaderValue, HeaderMap}, body::{Buf, Incoming}, }; use http_body_util::BodyExt; use crate::{Client, Error}; // Borrows the client so its async task lives as long as this response exists. #[allow(dead_code)] #[derive(Debug)] pub struct Response<'a>(pub(crate) hyper::Response, pub(crate) &'a Client); impl<'a> Response<'a> { pub fn status(&self) -> StatusCode { self.0.status() } pub fn headers(&self) -> &HeaderMap { self.0.headers() } pub async fn body(self) -> Result { Ok(self.0.into_body().collect().await.map_err(Error::Hyper)?.aggregate().reader()) } } ================================================ FILE: common/std-shims/Cargo.toml ================================================ [package] name = "std-shims" version = "0.1.4" description = "A series of std shims to make alloc more feasible" license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/common/std-shims" authors = ["Luke Parker "] keywords = ["nostd", "no_std", "alloc", "io"] edition = "2021" rust-version = "1.64" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] rustversion = { version = "1", default-features = false } spin = { version = "0.10", default-features = false, features = ["use_ticket_mutex", "once", "lazy"] } hashbrown = { version = "0.14", default-features = false, features = ["ahash", "inline-more"] } [features] std = [] default = ["std"] ================================================ FILE: common/std-shims/LICENSE ================================================ MIT License Copyright (c) 2023 Luke Parker Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: common/std-shims/README.md ================================================ # std shims A crate which passes through to std when the default `std` feature is enabled, yet provides a series of shims when it isn't. No guarantee of one-to-one parity is provided. The shims provided aim to be sufficient for the average case. `HashSet` and `HashMap` are provided via `hashbrown`. Synchronization primitives are provided via `spin` (avoiding a requirement on `critical-section`). types are not guaranteed to be ================================================ FILE: common/std-shims/src/collections.rs ================================================ #[cfg(feature = "std")] pub use std::collections::*; #[cfg(not(feature = "std"))] pub use alloc::collections::*; #[cfg(not(feature = "std"))] pub use hashbrown::{HashSet, HashMap}; ================================================ FILE: common/std-shims/src/io.rs ================================================ #[cfg(feature = "std")] pub use std::io::*; #[cfg(not(feature = "std"))] mod shims { use core::fmt::{Debug, Formatter}; use alloc::{boxed::Box, vec::Vec}; #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub enum ErrorKind { UnexpectedEof, Other, } pub struct Error { kind: ErrorKind, error: Box, } impl Debug for Error { fn fmt(&self, fmt: &mut Formatter<'_>) -> core::result::Result<(), core::fmt::Error> { fmt.debug_struct("Error").field("kind", &self.kind).finish_non_exhaustive() } } impl Error { pub fn new(kind: ErrorKind, error: E) -> Error { Error { kind, error: Box::new(error) } } pub fn other(error: E) -> Error { Error { kind: ErrorKind::Other, error: Box::new(error) } } pub fn kind(&self) -> ErrorKind { self.kind } pub fn into_inner(self) -> Option> { Some(self.error) } } pub type Result = core::result::Result; pub trait Read { fn read(&mut self, buf: &mut [u8]) -> Result; fn read_exact(&mut self, buf: &mut [u8]) -> Result<()> { let read = self.read(buf)?; if read != buf.len() { Err(Error::new(ErrorKind::UnexpectedEof, "reader ran out of bytes"))?; } Ok(()) } } impl Read for &[u8] { fn read(&mut self, buf: &mut [u8]) -> Result { let read = buf.len().min(self.len()); buf[.. read].copy_from_slice(&self[.. read]); *self = &self[read ..]; Ok(read) } } pub trait BufRead: Read { fn fill_buf(&mut self) -> Result<&[u8]>; fn consume(&mut self, amt: usize); } impl BufRead for &[u8] { fn fill_buf(&mut self) -> Result<&[u8]> { Ok(*self) } fn consume(&mut self, amt: usize) { *self = &self[amt ..]; } } pub trait Write { fn write(&mut self, buf: &[u8]) -> Result; fn write_all(&mut self, buf: &[u8]) -> Result<()> { if self.write(buf)? != buf.len() { Err(Error::new(ErrorKind::UnexpectedEof, "writer ran out of bytes"))?; } Ok(()) } } impl Write for Vec { fn write(&mut self, buf: &[u8]) -> Result { self.extend(buf); Ok(buf.len()) } } } #[cfg(not(feature = "std"))] pub use shims::*; ================================================ FILE: common/std-shims/src/lib.rs ================================================ #![cfg_attr(docsrs, feature(doc_cfg))] #![doc = include_str!("../README.md")] #![cfg_attr(not(feature = "std"), no_std)] pub extern crate alloc; pub mod sync; pub mod collections; pub mod io; pub use alloc::vec; pub use alloc::str; pub use alloc::string; pub mod prelude { #[rustversion::before(1.73)] #[doc(hidden)] pub trait StdShimsDivCeil { fn div_ceil(self, rhs: Self) -> Self; } #[rustversion::before(1.73)] mod impl_divceil { use super::StdShimsDivCeil; impl StdShimsDivCeil for u8 { fn div_ceil(self, rhs: Self) -> Self { (self + (rhs - 1)) / rhs } } impl StdShimsDivCeil for u16 { fn div_ceil(self, rhs: Self) -> Self { (self + (rhs - 1)) / rhs } } impl StdShimsDivCeil for u32 { fn div_ceil(self, rhs: Self) -> Self { (self + (rhs - 1)) / rhs } } impl StdShimsDivCeil for u64 { fn div_ceil(self, rhs: Self) -> Self { (self + (rhs - 1)) / rhs } } impl StdShimsDivCeil for u128 { fn div_ceil(self, rhs: Self) -> Self { (self + (rhs - 1)) / rhs } } impl StdShimsDivCeil for usize { fn div_ceil(self, rhs: Self) -> Self { (self + (rhs - 1)) / rhs } } } #[cfg(feature = "std")] #[rustversion::before(1.74)] #[doc(hidden)] pub trait StdShimsIoErrorOther { fn other(error: E) -> Self where E: Into>; } #[cfg(feature = "std")] #[rustversion::before(1.74)] impl StdShimsIoErrorOther for std::io::Error { fn other(error: E) -> Self where E: Into>, { std::io::Error::new(std::io::ErrorKind::Other, error) } } } ================================================ FILE: common/std-shims/src/sync.rs ================================================ pub use core::sync::*; pub use alloc::sync::*; mod mutex_shim { #[cfg(feature = "std")] pub use std::sync::*; #[cfg(not(feature = "std"))] pub use spin::*; #[derive(Default, Debug)] pub struct ShimMutex(Mutex); impl ShimMutex { pub const fn new(value: T) -> Self { Self(Mutex::new(value)) } pub fn lock(&self) -> MutexGuard<'_, T> { #[cfg(feature = "std")] let res = self.0.lock().unwrap(); #[cfg(not(feature = "std"))] let res = self.0.lock(); res } } } pub use mutex_shim::{ShimMutex as Mutex, MutexGuard}; #[cfg(not(feature = "std"))] pub use spin::Lazy as LazyLock; #[rustversion::before(1.80)] #[cfg(feature = "std")] pub use spin::Lazy as LazyLock; #[rustversion::since(1.80)] #[cfg(feature = "std")] pub use std::sync::LazyLock; ================================================ FILE: common/zalloc/Cargo.toml ================================================ [package] name = "zalloc" version = "0.1.0" description = "An allocator wrapper which zeroizes memory on dealloc" license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/common/zalloc" authors = ["Luke Parker "] keywords = [] edition = "2021" rust-version = "1.77" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] zeroize = { version = "^1.5", default-features = false } [build-dependencies] rustversion = { version = "1", default-features = false } [features] std = ["zeroize/std"] default = ["std"] allocator = [] ================================================ FILE: common/zalloc/LICENSE ================================================ MIT License Copyright (c) 2022-2023 Luke Parker Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: common/zalloc/build.rs ================================================ #[rustversion::nightly] fn main() { println!("cargo::rustc-check-cfg=cfg(zalloc_rustc_nightly)"); println!("cargo::rustc-cfg=zalloc_rustc_nightly"); } #[rustversion::not(nightly)] fn main() { println!("cargo::rustc-check-cfg=cfg(zalloc_rustc_nightly)"); } ================================================ FILE: common/zalloc/src/lib.rs ================================================ #![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(all(zalloc_rustc_nightly, feature = "allocator"), feature(allocator_api))] //! Implementation of a Zeroizing Allocator, enabling zeroizing memory on deallocation. //! This can either be used with Box (requires nightly and the "allocator" feature) to provide the //! functionality of zeroize on types which don't implement zeroize, or used as a wrapper around //! the global allocator to ensure *all* memory is zeroized. use core::{ slice, alloc::{Layout, GlobalAlloc}, }; use zeroize::Zeroize; /// An allocator wrapper which zeroizes its memory on dealloc. pub struct ZeroizingAlloc(pub T); #[cfg(all(zalloc_rustc_nightly, feature = "allocator"))] use core::{ ptr::NonNull, alloc::{AllocError, Allocator}, }; #[cfg(all(zalloc_rustc_nightly, feature = "allocator"))] unsafe impl Allocator for ZeroizingAlloc { fn allocate(&self, layout: Layout) -> Result, AllocError> { self.0.allocate(layout) } unsafe fn deallocate(&self, mut ptr: NonNull, layout: Layout) { slice::from_raw_parts_mut(ptr.as_mut(), layout.size()).zeroize(); self.0.deallocate(ptr, layout); } } unsafe impl GlobalAlloc for ZeroizingAlloc { unsafe fn alloc(&self, layout: Layout) -> *mut u8 { self.0.alloc(layout) } unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { slice::from_raw_parts_mut(ptr, layout.size()).zeroize(); self.0.dealloc(ptr, layout); } } ================================================ FILE: coordinator/Cargo.toml ================================================ [package] name = "serai-coordinator" version = "0.1.0" description = "Serai coordinator to prepare batches and sign transactions" license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/coordinator" authors = ["Luke Parker "] keywords = [] edition = "2021" publish = false [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] async-trait = { version = "0.1", default-features = false } zeroize = { version = "^1.5", default-features = false, features = ["std"] } rand_core = { version = "0.6", default-features = false, features = ["std"] } blake2 = { version = "0.10", default-features = false, features = ["std"] } transcript = { package = "flexible-transcript", path = "../crypto/transcript", default-features = false, features = ["std", "recommended"] } dalek-ff-group = { path = "../crypto/dalek-ff-group", default-features = false, features = ["std"] } ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std"] } schnorr = { package = "schnorr-signatures", path = "../crypto/schnorr", default-features = false, features = ["std", "aggregate"] } dkg-musig = { path = "../crypto/dkg/musig", default-features = false, features = ["std"] } frost = { package = "modular-frost", path = "../crypto/frost" } frost-schnorrkel = { path = "../crypto/schnorrkel" } scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] } zalloc = { path = "../common/zalloc" } serai-db = { path = "../common/db" } serai-env = { path = "../common/env" } processor-messages = { package = "serai-processor-messages", path = "../processor/messages" } message-queue = { package = "serai-message-queue", path = "../message-queue" } tributary = { package = "tributary-chain", path = "./tributary" } sp-application-crypto = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false, features = ["std"] } serai-client = { path = "../substrate/client", default-features = false, features = ["serai", "borsh"] } hex = { version = "0.4", default-features = false, features = ["std"] } borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } log = { version = "0.4", default-features = false, features = ["std"] } env_logger = { version = "0.10", default-features = false, features = ["humantime"] } futures-util = { version = "0.3", default-features = false, features = ["std"] } tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "request-response", "gossipsub", "macros"] } [dev-dependencies] tributary = { package = "tributary-chain", path = "./tributary", features = ["tests"] } sp-application-crypto = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false, features = ["std"] } sp-runtime = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false, features = ["std"] } [features] longer-reattempts = [] parity-db = ["serai-db/parity-db"] rocksdb = ["serai-db/rocksdb"] ================================================ FILE: coordinator/LICENSE ================================================ AGPL-3.0-only license Copyright (c) 2023 Luke Parker This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License Version 3 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see . ================================================ FILE: coordinator/README.md ================================================ # Coordinator The Serai coordinator communicates with other coordinators to prepare batches for Serai and sign transactions. In order to achieve consensus over gossip, and order certain events, a micro-blockchain is instantiated. ================================================ FILE: coordinator/src/cosign_evaluator.rs ================================================ use core::time::Duration; use std::{ sync::Arc, collections::{HashSet, HashMap}, }; use tokio::{ sync::{mpsc, Mutex, RwLock}, time::sleep, }; use borsh::BorshSerialize; use sp_application_crypto::RuntimePublic; use serai_client::{ primitives::{ExternalNetworkId, EXTERNAL_NETWORKS}, validator_sets::primitives::{ExternalValidatorSet, Session}, Serai, SeraiError, TemporalSerai, }; use serai_db::{Get, DbTxn, Db, create_db}; use processor_messages::coordinator::cosign_block_msg; use crate::{ p2p::{CosignedBlock, GossipMessageKind, P2p}, substrate::LatestCosignedBlock, }; create_db! { CosignDb { ReceivedCosign: (set: ExternalValidatorSet, block: [u8; 32]) -> CosignedBlock, LatestCosign: (network: ExternalNetworkId) -> CosignedBlock, DistinctChain: (set: ExternalValidatorSet) -> (), } } pub struct CosignEvaluator { db: Mutex, serai: Arc, stakes: RwLock>>, latest_cosigns: RwLock>, } impl CosignEvaluator { async fn update_latest_cosign(&self) { let stakes_lock = self.stakes.read().await; // If we haven't gotten the stake data yet, return let Some(stakes) = stakes_lock.as_ref() else { return }; let total_stake = stakes.values().copied().sum::(); let latest_cosigns = self.latest_cosigns.read().await; let mut highest_block = 0; for cosign in latest_cosigns.values() { let mut networks = HashSet::new(); for (network, sub_cosign) in &*latest_cosigns { if sub_cosign.block_number >= cosign.block_number { networks.insert(network); } } let sum_stake = networks.into_iter().map(|network| stakes.get(network).unwrap_or(&0)).sum::(); let needed_stake = ((total_stake * 2) / 3) + 1; if (total_stake == 0) || (sum_stake > needed_stake) { highest_block = highest_block.max(cosign.block_number); } } let mut db_lock = self.db.lock().await; let mut txn = db_lock.txn(); if highest_block > LatestCosignedBlock::latest_cosigned_block(&txn) { log::info!("setting latest cosigned block to {}", highest_block); LatestCosignedBlock::set(&mut txn, &highest_block); } txn.commit(); } async fn update_stakes(&self) -> Result<(), SeraiError> { let serai = self.serai.as_of_latest_finalized_block().await?; let mut stakes = HashMap::new(); for network in EXTERNAL_NETWORKS { // Use if this network has published a Batch for a short-circuit of if they've ever set a key let set_key = serai.in_instructions().last_batch_for_network(network).await?.is_some(); if set_key { stakes.insert( network, serai .validator_sets() .total_allocated_stake(network.into()) .await? .expect("network which published a batch didn't have a stake set") .0, ); } } // Since we've successfully built stakes, set it *self.stakes.write().await = Some(stakes); self.update_latest_cosign().await; Ok(()) } // Uses Err to signify a message should be retried async fn handle_new_cosign(&self, cosign: CosignedBlock) -> Result<(), SeraiError> { // If we already have this cosign or a newer cosign, return if let Some(latest) = self.latest_cosigns.read().await.get(&cosign.network) { if latest.block_number >= cosign.block_number { return Ok(()); } } // If this an old cosign (older than a day), drop it let latest_block = self.serai.latest_finalized_block().await?; if (cosign.block_number + (24 * 60 * 60 / 6)) < latest_block.number() { log::debug!("received old cosign supposedly signed by {:?}", cosign.network); return Ok(()); } let Some(block) = self.serai.finalized_block_by_number(cosign.block_number).await? else { log::warn!("received cosign with a block number which doesn't map to a block"); return Ok(()); }; async fn set_with_keys_fn( serai: &TemporalSerai<'_>, network: ExternalNetworkId, ) -> Result, SeraiError> { let Some(latest_session) = serai.validator_sets().session(network.into()).await? else { log::warn!("received cosign from {:?}, which doesn't yet have a session", network); return Ok(None); }; let prior_session = Session(latest_session.0.saturating_sub(1)); Ok(Some( if serai .validator_sets() .keys(ExternalValidatorSet { network, session: prior_session }) .await? .is_some() { ExternalValidatorSet { network, session: prior_session } } else { ExternalValidatorSet { network, session: latest_session } }, )) } // Get the key for this network as of the prior block // If we have two chains, this value may be different across chains depending on if one chain // included the set_keys and one didn't // Because set_keys will force a cosign, it will force detection of distinct blocks // re: set_keys using keys prior to set_keys (assumed amenable to all) let serai = self.serai.as_of(block.header.parent_hash.into()); let Some(set_with_keys) = set_with_keys_fn(&serai, cosign.network).await? else { return Ok(()); }; let Some(keys) = serai.validator_sets().keys(set_with_keys).await? else { log::warn!("received cosign for a block we didn't have keys for"); return Ok(()); }; if !keys .0 .verify(&cosign_block_msg(cosign.block_number, cosign.block), &cosign.signature.into()) { log::warn!("received cosigned block with an invalid signature"); return Ok(()); } log::info!( "received cosign for block {} ({}) by {:?}", block.number(), hex::encode(cosign.block), cosign.network ); // Save this cosign to the DB { let mut db = self.db.lock().await; let mut txn = db.txn(); ReceivedCosign::set(&mut txn, set_with_keys, cosign.block, &cosign); LatestCosign::set(&mut txn, set_with_keys.network, &(cosign)); txn.commit(); } if cosign.block != block.hash() { log::error!( "received cosign for a distinct block at {}. we have {}. cosign had {}", cosign.block_number, hex::encode(block.hash()), hex::encode(cosign.block) ); let serai = self.serai.as_of(latest_block.hash()); let mut db = self.db.lock().await; // Save this set as being on a different chain let mut txn = db.txn(); DistinctChain::set(&mut txn, set_with_keys, &()); txn.commit(); let mut total_stake = 0; let mut total_on_distinct_chain = 0; for network in EXTERNAL_NETWORKS { // Get the current set for this network let set_with_keys = { let mut res; while { res = set_with_keys_fn(&serai, network).await; res.is_err() } { log::error!( "couldn't get the set with keys when checking for a distinct chain: {:?}", res ); tokio::time::sleep(core::time::Duration::from_secs(3)).await; } res.unwrap() }; // Get its stake // Doesn't use the stakes inside self to prevent deadlocks re: multi-lock acquisition if let Some(set_with_keys) = set_with_keys { let stake = { let mut res; while { res = serai.validator_sets().total_allocated_stake(set_with_keys.network.into()).await; res.is_err() } { log::error!( "couldn't get total allocated stake when checking for a distinct chain: {:?}", res ); tokio::time::sleep(core::time::Duration::from_secs(3)).await; } res.unwrap() }; if let Some(stake) = stake { total_stake += stake.0; if DistinctChain::get(&*db, set_with_keys).is_some() { total_on_distinct_chain += stake.0; } } } } // See https://github.com/serai-dex/serai/issues/339 for the reasoning on 17% if (total_stake * 17 / 100) <= total_on_distinct_chain { panic!("17% of validator sets (by stake) have co-signed a distinct chain"); } } else { { let mut latest_cosigns = self.latest_cosigns.write().await; latest_cosigns.insert(cosign.network, cosign); } self.update_latest_cosign().await; } Ok(()) } #[allow(clippy::new_ret_no_self)] pub fn new(db: D, p2p: P, serai: Arc) -> mpsc::UnboundedSender { let mut latest_cosigns = HashMap::new(); for network in EXTERNAL_NETWORKS { if let Some(cosign) = LatestCosign::get(&db, network) { latest_cosigns.insert(network, cosign); } } let evaluator = Arc::new(Self { db: Mutex::new(db), serai, stakes: RwLock::new(None), latest_cosigns: RwLock::new(latest_cosigns), }); // Spawn a task to update stakes regularly tokio::spawn({ let evaluator = evaluator.clone(); async move { loop { // Run this until it passes while evaluator.update_stakes().await.is_err() { log::warn!("couldn't update stakes in the cosign evaluator"); // Try again in 10 seconds sleep(Duration::from_secs(10)).await; } // Run it every 10 minutes as we don't need the exact stake data for this to be valid sleep(Duration::from_secs(10 * 60)).await; } } }); // Spawn a task to receive cosigns and handle them let (send, mut recv) = mpsc::unbounded_channel(); tokio::spawn({ let evaluator = evaluator.clone(); async move { while let Some(msg) = recv.recv().await { while evaluator.handle_new_cosign(msg).await.is_err() { // Try again in 10 seconds sleep(Duration::from_secs(10)).await; } } } }); // Spawn a task to rebroadcast the most recent cosigns tokio::spawn({ async move { loop { let cosigns = evaluator.latest_cosigns.read().await.values().copied().collect::>(); for cosign in cosigns { let mut buf = vec![]; cosign.serialize(&mut buf).unwrap(); P2p::broadcast(&p2p, GossipMessageKind::CosignedBlock, buf).await; } sleep(Duration::from_secs(60)).await; } } }); // Return the channel to send cosigns send } } ================================================ FILE: coordinator/src/db.rs ================================================ use blake2::{ digest::{consts::U32, Digest}, Blake2b, }; use scale::Encode; use borsh::{BorshSerialize, BorshDeserialize}; use serai_client::{ in_instructions::primitives::{Batch, SignedBatch}, primitives::ExternalNetworkId, validator_sets::primitives::{ExternalValidatorSet, Session}, }; pub use serai_db::*; use ::tributary::ReadWrite; use crate::tributary::{TributarySpec, Transaction, scanner::RecognizedIdType}; create_db!( MainDb { HandledMessageDb: (network: ExternalNetworkId) -> u64, ActiveTributaryDb: () -> Vec, RetiredTributaryDb: (set: ExternalValidatorSet) -> (), FirstPreprocessDb: ( network: ExternalNetworkId, id_type: RecognizedIdType, id: &[u8] ) -> Vec>, LastReceivedBatchDb: (network: ExternalNetworkId) -> u32, ExpectedBatchDb: (network: ExternalNetworkId, id: u32) -> [u8; 32], BatchDb: (network: ExternalNetworkId, id: u32) -> SignedBatch, LastVerifiedBatchDb: (network: ExternalNetworkId) -> u32, HandoverBatchDb: (set: ExternalValidatorSet) -> u32, LookupHandoverBatchDb: (network: ExternalNetworkId, batch: u32) -> Session, QueuedBatchesDb: (set: ExternalValidatorSet) -> Vec } ); impl ActiveTributaryDb { pub fn active_tributaries(getter: &G) -> (Vec, Vec) { let bytes = Self::get(getter).unwrap_or_default(); let mut bytes_ref: &[u8] = bytes.as_ref(); let mut tributaries = vec![]; while !bytes_ref.is_empty() { tributaries.push(TributarySpec::deserialize_reader(&mut bytes_ref).unwrap()); } (bytes, tributaries) } pub fn add_participating_in_tributary(txn: &mut impl DbTxn, spec: &TributarySpec) { let (mut existing_bytes, existing) = ActiveTributaryDb::active_tributaries(txn); for tributary in &existing { if tributary == spec { return; } } spec.serialize(&mut existing_bytes).unwrap(); ActiveTributaryDb::set(txn, &existing_bytes); } pub fn retire_tributary(txn: &mut impl DbTxn, set: ExternalValidatorSet) { let mut active = Self::active_tributaries(txn).1; for i in 0 .. active.len() { if active[i].set() == set { active.remove(i); break; } } let mut bytes = vec![]; for active in active { active.serialize(&mut bytes).unwrap(); } Self::set(txn, &bytes); RetiredTributaryDb::set(txn, set, &()); } } impl FirstPreprocessDb { pub fn save_first_preprocess( txn: &mut impl DbTxn, network: ExternalNetworkId, id_type: RecognizedIdType, id: &[u8], preprocess: &Vec>, ) { if let Some(existing) = FirstPreprocessDb::get(txn, network, id_type, id) { assert_eq!(&existing, preprocess, "saved a distinct first preprocess"); return; } FirstPreprocessDb::set(txn, network, id_type, id, preprocess); } } impl ExpectedBatchDb { pub fn save_expected_batch(txn: &mut impl DbTxn, batch: &Batch) { LastReceivedBatchDb::set(txn, batch.network, &batch.id); Self::set( txn, batch.network, batch.id, &Blake2b::::digest(batch.instructions.encode()).into(), ); } } impl HandoverBatchDb { pub fn set_handover_batch(txn: &mut impl DbTxn, set: ExternalValidatorSet, batch: u32) { Self::set(txn, set, &batch); LookupHandoverBatchDb::set(txn, set.network, batch, &set.session); } } impl QueuedBatchesDb { pub fn queue(txn: &mut impl DbTxn, set: ExternalValidatorSet, batch: &Transaction) { let mut batches = Self::get(txn, set).unwrap_or_default(); batch.write(&mut batches).unwrap(); Self::set(txn, set, &batches); } pub fn take(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Vec { let batches_vec = Self::get(txn, set).unwrap_or_default(); txn.del(Self::key(set)); let mut batches: &[u8] = &batches_vec; let mut res = vec![]; while !batches.is_empty() { res.push(Transaction::read(&mut batches).unwrap()); } res } } ================================================ FILE: coordinator/src/main.rs ================================================ #![expect(clippy::cast_possible_truncation)] use core::ops::Deref; use std::{ sync::{OnceLock, Arc}, time::Duration, collections::{VecDeque, HashSet, HashMap}, }; use zeroize::{Zeroize, Zeroizing}; use rand_core::OsRng; use dalek_ff_group::Ristretto; use ciphersuite::{ group::{ ff::{Field, PrimeField}, GroupEncoding, }, Ciphersuite, }; use schnorr::SchnorrSignature; use frost::Participant; use serai_db::{DbTxn, Db}; use scale::Encode; use borsh::BorshSerialize; use serai_client::{ primitives::ExternalNetworkId, validator_sets::primitives::{ExternalValidatorSet, KeyPair, Session}, Public, Serai, SeraiInInstructions, }; use message_queue::{Service, client::MessageQueue}; use tokio::{ sync::{Mutex, RwLock, mpsc, broadcast}, time::sleep, }; use ::tributary::{ProvidedError, TransactionKind, TransactionTrait, Block, Tributary}; mod tributary; use crate::tributary::{ TributarySpec, Label, SignData, Transaction, scanner::RecognizedIdType, PlanIds, }; mod db; use db::*; mod p2p; pub use p2p::*; use processor_messages::{ key_gen, sign, coordinator::{self, SubstrateSignableId}, ProcessorMessage, }; pub mod processors; use processors::Processors; mod substrate; use substrate::CosignTransactions; mod cosign_evaluator; use cosign_evaluator::CosignEvaluator; #[cfg(test)] pub mod tests; #[global_allocator] static ALLOCATOR: zalloc::ZeroizingAlloc = zalloc::ZeroizingAlloc(std::alloc::System); #[derive(Clone)] pub struct ActiveTributary { pub spec: TributarySpec, pub tributary: Arc>, } #[derive(Clone)] pub enum TributaryEvent { NewTributary(ActiveTributary), TributaryRetired(ExternalValidatorSet), } // Creates a new tributary and sends it to all listeners. async fn add_tributary( db: D, key: Zeroizing<::F>, processors: &Pro, p2p: P, tributaries: &broadcast::Sender>, spec: TributarySpec, ) { if RetiredTributaryDb::get(&db, spec.set()).is_some() { log::info!("not adding tributary {:?} since it's been retired", spec.set()); } log::info!("adding tributary {:?}", spec.set()); let tributary = Tributary::<_, Transaction, _>::new( // TODO2: Use a db on a distinct volume to protect against DoS attacks // TODO2: Delete said db once the Tributary is dropped db, spec.genesis(), spec.start_time(), key.clone(), spec.validators(), p2p, ) .await .unwrap(); // Trigger a DKG for the newly added Tributary // If we're rebooting, we'll re-fire this message // This is safe due to the message-queue deduplicating based off the intent system let set = spec.set(); let our_i = spec .i(&[], Ristretto::generator() * key.deref()) .expect("adding a tributary for a set we aren't in set for"); processors .send( set.network, processor_messages::key_gen::CoordinatorMessage::GenerateKey { id: processor_messages::key_gen::KeyGenId { session: set.session, attempt: 0 }, params: frost::ThresholdParams::new(spec.t(), spec.n(&[]), our_i.start).unwrap(), shares: u16::from(our_i.end) - u16::from(our_i.start), }, ) .await; tributaries .send(TributaryEvent::NewTributary(ActiveTributary { spec, tributary: Arc::new(tributary) })) .map_err(|_| "all ActiveTributary recipients closed") .unwrap(); } // TODO: Find a better pattern for this static HANDOVER_VERIFY_QUEUE_LOCK: OnceLock> = OnceLock::new(); #[allow(clippy::too_many_arguments)] async fn handle_processor_message( db: &mut D, key: &Zeroizing<::F>, serai: &Serai, p2p: &P, cosign_channel: &mpsc::UnboundedSender, tributaries: &HashMap>, network: ExternalNetworkId, msg: &processors::Message, ) -> bool { #[allow(clippy::nonminimal_bool)] if let Some(already_handled) = HandledMessageDb::get(db, msg.network) { assert!(!(already_handled > msg.id)); assert!((already_handled == msg.id) || (already_handled == msg.id - 1)); if already_handled == msg.id { return true; } } else { assert_eq!(msg.id, 0); } let _hvq_lock = HANDOVER_VERIFY_QUEUE_LOCK.get_or_init(|| Mutex::new(())).lock().await; let mut txn = db.txn(); let mut relevant_tributary = match &msg.msg { // We'll only receive these if we fired GenerateKey, which we'll only do if if we're // in-set, making the Tributary relevant ProcessorMessage::KeyGen(inner_msg) => match inner_msg { key_gen::ProcessorMessage::Commitments { id, .. } | key_gen::ProcessorMessage::InvalidCommitments { id, .. } | key_gen::ProcessorMessage::Shares { id, .. } | key_gen::ProcessorMessage::InvalidShare { id, .. } | key_gen::ProcessorMessage::GeneratedKeyPair { id, .. } | key_gen::ProcessorMessage::Blame { id, .. } => Some(id.session), }, ProcessorMessage::Sign(inner_msg) => match inner_msg { // We'll only receive InvalidParticipant/Preprocess/Share if we're actively signing sign::ProcessorMessage::InvalidParticipant { id, .. } | sign::ProcessorMessage::Preprocess { id, .. } | sign::ProcessorMessage::Share { id, .. } => Some(id.session), // While the Processor's Scanner will always emit Completed, that's routed through the // Signer and only becomes a ProcessorMessage::Completed if the Signer is present and // confirms it sign::ProcessorMessage::Completed { session, .. } => Some(*session), }, ProcessorMessage::Coordinator(inner_msg) => match inner_msg { // This is a special case as it's relevant to *all* Tributaries for this network we're // signing in // It doesn't return a Tributary to become `relevant_tributary` though coordinator::ProcessorMessage::SubstrateBlockAck { block, plans } => { // Get the sessions for these keys let sessions = plans .iter() .map(|plan| plan.session) .filter(|session| { RetiredTributaryDb::get(&txn, ExternalValidatorSet { network, session: *session }) .is_none() }) .collect::>(); // Ensure we have the Tributaries for session in &sessions { if !tributaries.contains_key(session) { return false; } } for session in sessions { let tributary = &tributaries[&session]; let plans = plans .iter() .filter_map(|plan| Some(plan.id).filter(|_| plan.session == session)) .collect::>(); PlanIds::set(&mut txn, &tributary.spec.genesis(), *block, &plans); let tx = Transaction::SubstrateBlock(*block); log::trace!( "processor message effected transaction {} {:?}", hex::encode(tx.hash()), &tx ); log::trace!("providing transaction {}", hex::encode(tx.hash())); let res = tributary.tributary.provide_transaction(tx).await; if !(res.is_ok() || (res == Err(ProvidedError::AlreadyProvided))) { if res == Err(ProvidedError::LocalMismatchesOnChain) { // Spin, since this is a crit for this Tributary loop { log::error!( "{}. tributary: {}, provided: SubstrateBlock({})", "tributary added distinct provided to delayed locally provided TX", hex::encode(tributary.spec.genesis()), block, ); sleep(Duration::from_secs(60)).await; } } panic!("provided an invalid transaction: {res:?}"); } } None } // We'll only fire these if we are the Substrate signer, making the Tributary relevant coordinator::ProcessorMessage::InvalidParticipant { id, .. } | coordinator::ProcessorMessage::CosignPreprocess { id, .. } | coordinator::ProcessorMessage::BatchPreprocess { id, .. } | coordinator::ProcessorMessage::SlashReportPreprocess { id, .. } | coordinator::ProcessorMessage::SubstrateShare { id, .. } => Some(id.session), // This causes an action on our P2P net yet not on any Tributary coordinator::ProcessorMessage::CosignedBlock { block_number, block, signature } => { let cosigned_block = CosignedBlock { network, block_number: *block_number, block: *block, signature: { let mut arr = [0; 64]; arr.copy_from_slice(signature); arr }, }; cosign_channel.send(cosigned_block).unwrap(); let mut buf = vec![]; cosigned_block.serialize(&mut buf).unwrap(); P2p::broadcast(p2p, GossipMessageKind::CosignedBlock, buf).await; None } // This causes an action on Substrate yet not on any Tributary coordinator::ProcessorMessage::SignedSlashReport { session, signature } => { let set = ExternalValidatorSet { network, session: *session }; let signature: &[u8] = signature.as_ref(); let signature = <[u8; 64]>::try_from(signature).unwrap(); let signature: serai_client::Signature = signature.into(); let slashes = crate::tributary::SlashReport::get(&txn, set) .expect("signed slash report despite not having slash report locally"); let slashes_pubs = slashes .iter() .map(|(address, points)| (Public::from(*address), *points)) .collect::>(); let tx = serai_client::SeraiValidatorSets::report_slashes( network, slashes .into_iter() .map(|(address, points)| (serai_client::SeraiAddress(address), points)) .collect::>() .try_into() .unwrap(), signature, ); loop { if serai.publish(&tx).await.is_ok() { break None; } // Check if the slashes shouldn't still be reported. If not, break. let Ok(serai) = serai.as_of_latest_finalized_block().await else { tokio::time::sleep(core::time::Duration::from_secs(5)).await; continue; }; let Ok(key) = serai.validator_sets().key_pending_slash_report(network).await else { tokio::time::sleep(core::time::Duration::from_secs(5)).await; continue; }; let Some(key) = key else { break None; }; // If this is the key for this slash report, then this will verify use sp_application_crypto::RuntimePublic; if !key.verify( &serai_client::validator_sets::primitives::report_slashes_message(&set, &slashes_pubs), &signature, ) { break None; } } } }, // These don't return a relevant Tributary as there's no Tributary with action expected ProcessorMessage::Substrate(inner_msg) => match inner_msg { processor_messages::substrate::ProcessorMessage::Batch { batch } => { assert_eq!( batch.network, msg.network, "processor sent us a batch for a different network than it was for", ); ExpectedBatchDb::save_expected_batch(&mut txn, batch); None } // If this is a new Batch, immediately publish it (if we can) processor_messages::substrate::ProcessorMessage::SignedBatch { batch } => { assert_eq!( batch.batch.network, msg.network, "processor sent us a signed batch for a different network than it was for", ); log::debug!("received batch {:?} {}", batch.batch.network, batch.batch.id); // Save this batch to the disk BatchDb::set(&mut txn, batch.batch.network, batch.batch.id, &batch.clone()); // Get the next-to-execute batch ID let Ok(mut next) = substrate::expected_next_batch(serai, network).await else { return false; }; // Since we have a new batch, publish all batches yet to be published to Serai // This handles the edge-case where batch n+1 is signed before batch n is let mut batches = VecDeque::new(); while let Some(batch) = BatchDb::get(&txn, network, next) { batches.push_back(batch); next += 1; } while let Some(batch) = batches.pop_front() { // If this Batch should no longer be published, continue let Ok(expected_next_batch) = substrate::expected_next_batch(serai, network).await else { return false; }; if expected_next_batch > batch.batch.id { continue; } let tx = SeraiInInstructions::execute_batch(batch.clone()); log::debug!("attempting to publish batch {:?} {}", batch.batch.network, batch.batch.id,); // This publish may fail if this transactions already exists in the mempool, which is // possible, or if this batch was already executed on-chain // Either case will have eventual resolution and be handled by the above check on if // this batch should execute let res = serai.publish(&tx).await; if res.is_ok() { log::info!( "published batch {network:?} {} (block {})", batch.batch.id, hex::encode(batch.batch.block), ); } else { log::debug!( "couldn't publish batch {:?} {}: {:?}", batch.batch.network, batch.batch.id, res, ); // If we failed to publish it, restore it batches.push_front(batch); // Sleep for a few seconds before retrying to prevent hammering the node sleep(Duration::from_secs(5)).await; } } None } }, }; // If we have a relevant Tributary, check it's actually still relevant and has yet to be retired if let Some(relevant_tributary_value) = relevant_tributary { if RetiredTributaryDb::get( &txn, ExternalValidatorSet { network: msg.network, session: relevant_tributary_value }, ) .is_some() { relevant_tributary = None; } } // If there's a relevant Tributary... if let Some(relevant_tributary) = relevant_tributary { // Make sure we have it // Per the reasoning above, we only return a Tributary as relevant if we're a participant // Accordingly, we do *need* to have this Tributary now to handle it UNLESS the Tributary has // already completed and this is simply an old message (which we prior checked) let Some(ActiveTributary { spec, tributary }) = tributaries.get(&relevant_tributary) else { // Since we don't, sleep for a fraction of a second and return false, signaling we didn't // handle this message // At the start of the loop which calls this function, we'll check for new tributaries, // making this eventually resolve sleep(Duration::from_millis(100)).await; return false; }; let genesis = spec.genesis(); let pub_key = Ristretto::generator() * key.deref(); let txs = match msg.msg.clone() { ProcessorMessage::KeyGen(inner_msg) => match inner_msg { key_gen::ProcessorMessage::Commitments { id, commitments } => { vec![Transaction::DkgCommitments { attempt: id.attempt, commitments, signed: Transaction::empty_signed(), }] } key_gen::ProcessorMessage::InvalidCommitments { id, faulty } => { // This doesn't have guaranteed timing // // While the party *should* be fatally slashed and not included in future attempts, // they'll actually be fatally slashed (assuming liveness before the Tributary retires) // and not included in future attempts *which begin after the latency window completes* let participant = spec .reverse_lookup_i( &crate::tributary::removed_as_of_dkg_attempt(&txn, spec.genesis(), id.attempt) .expect("participating in DKG attempt yet we didn't save who was removed"), faulty, ) .unwrap(); vec![Transaction::RemoveParticipantDueToDkg { participant, signed: Transaction::empty_signed(), }] } key_gen::ProcessorMessage::Shares { id, mut shares } => { // Create a MuSig-based machine to inform Substrate of this key generation let nonces = crate::tributary::dkg_confirmation_nonces(key, spec, &mut txn, id.attempt); let removed = crate::tributary::removed_as_of_dkg_attempt(&txn, genesis, id.attempt) .expect("participating in a DKG attempt yet we didn't track who was removed yet?"); let our_i = spec .i(&removed, pub_key) .expect("processor message to DKG for an attempt we aren't a validator in"); // `tx_shares` needs to be done here as while it can be serialized from the HashMap // without further context, it can't be deserialized without context let mut tx_shares = Vec::with_capacity(shares.len()); for shares in &mut shares { tx_shares.push(vec![]); for i in 1 ..= spec.n(&removed) { let i = Participant::new(i).unwrap(); if our_i.contains(&i) { if shares.contains_key(&i) { panic!("processor sent us our own shares"); } continue; } tx_shares.last_mut().unwrap().push( shares.remove(&i).expect("processor didn't send share for another validator"), ); } } vec![Transaction::DkgShares { attempt: id.attempt, shares: tx_shares, confirmation_nonces: nonces, signed: Transaction::empty_signed(), }] } key_gen::ProcessorMessage::InvalidShare { id, accuser, faulty, blame } => { vec![Transaction::InvalidDkgShare { attempt: id.attempt, accuser, faulty, blame, signed: Transaction::empty_signed(), }] } key_gen::ProcessorMessage::GeneratedKeyPair { id, substrate_key, network_key } => { // TODO2: Check the KeyGenId fields // Tell the Tributary the key pair, get back the share for the MuSig signature let share = crate::tributary::generated_key_pair::( &mut txn, key, spec, &KeyPair(Public::from(substrate_key), network_key.try_into().unwrap()), id.attempt, ); // TODO: Move this into generated_key_pair? match share { Ok(share) => { vec![Transaction::DkgConfirmed { attempt: id.attempt, confirmation_share: share, signed: Transaction::empty_signed(), }] } Err(p) => { let participant = spec .reverse_lookup_i( &crate::tributary::removed_as_of_dkg_attempt(&txn, spec.genesis(), id.attempt) .expect("participating in DKG attempt yet we didn't save who was removed"), p, ) .unwrap(); vec![Transaction::RemoveParticipantDueToDkg { participant, signed: Transaction::empty_signed(), }] } } } key_gen::ProcessorMessage::Blame { id, participant } => { let participant = spec .reverse_lookup_i( &crate::tributary::removed_as_of_dkg_attempt(&txn, spec.genesis(), id.attempt) .expect("participating in DKG attempt yet we didn't save who was removed"), participant, ) .unwrap(); vec![Transaction::RemoveParticipantDueToDkg { participant, signed: Transaction::empty_signed(), }] } }, ProcessorMessage::Sign(msg) => match msg { sign::ProcessorMessage::InvalidParticipant { .. } => { // TODO: Locally increase slash points to maximum (distinct from an explicitly fatal // slash) and censor transactions (yet don't explicitly ban) vec![] } sign::ProcessorMessage::Preprocess { id, preprocesses } => { if id.attempt == 0 { FirstPreprocessDb::save_first_preprocess( &mut txn, network, RecognizedIdType::Plan, &id.id, &preprocesses, ); vec![] } else { vec![Transaction::Sign(SignData { plan: id.id, attempt: id.attempt, label: Label::Preprocess, data: preprocesses, signed: Transaction::empty_signed(), })] } } sign::ProcessorMessage::Share { id, shares } => { vec![Transaction::Sign(SignData { plan: id.id, attempt: id.attempt, label: Label::Share, data: shares, signed: Transaction::empty_signed(), })] } sign::ProcessorMessage::Completed { session: _, id, tx } => { let r = Zeroizing::new(::F::random(&mut OsRng)); #[allow(non_snake_case)] let R = ::generator() * r.deref(); let mut tx = Transaction::SignCompleted { plan: id, tx_hash: tx, first_signer: pub_key, signature: SchnorrSignature { R, s: ::F::ZERO }, }; let signed = SchnorrSignature::sign(key, r, tx.sign_completed_challenge()); match &mut tx { Transaction::SignCompleted { signature, .. } => { *signature = signed; } _ => unreachable!(), } vec![tx] } }, ProcessorMessage::Coordinator(inner_msg) => match inner_msg { coordinator::ProcessorMessage::SubstrateBlockAck { .. } => unreachable!(), coordinator::ProcessorMessage::InvalidParticipant { .. } => { // TODO: Locally increase slash points to maximum (distinct from an explicitly fatal // slash) and censor transactions (yet don't explicitly ban) vec![] } coordinator::ProcessorMessage::CosignPreprocess { id, preprocesses } | coordinator::ProcessorMessage::SlashReportPreprocess { id, preprocesses } => { vec![Transaction::SubstrateSign(SignData { plan: id.id, attempt: id.attempt, label: Label::Preprocess, data: preprocesses.into_iter().map(Into::into).collect(), signed: Transaction::empty_signed(), })] } coordinator::ProcessorMessage::BatchPreprocess { id, block, preprocesses } => { log::info!( "informed of batch (sign ID {}, attempt {}) for block {}", hex::encode(id.id.encode()), id.attempt, hex::encode(block), ); // If this is the first attempt instance, wait until we synchronize around the batch // first if id.attempt == 0 { FirstPreprocessDb::save_first_preprocess( &mut txn, spec.set().network, RecognizedIdType::Batch, &{ let SubstrateSignableId::Batch(id) = id.id else { panic!("BatchPreprocess SubstrateSignableId wasn't Batch") }; id.to_le_bytes() }, &preprocesses.into_iter().map(Into::into).collect::>(), ); let intended = Transaction::Batch { block: block.0, batch: match id.id { SubstrateSignableId::Batch(id) => id, _ => panic!("BatchPreprocess did not contain Batch ID"), }, }; // If this is the new key's first Batch, only create this TX once we verify all // all prior published `Batch`s // TODO: This assumes BatchPreprocess is immediately after Batch // Ensure that assumption let last_received = LastReceivedBatchDb::get(&txn, msg.network).unwrap(); let handover_batch = HandoverBatchDb::get(&txn, spec.set()); let mut queue = false; if let Some(handover_batch) = handover_batch { // There is a race condition here. We may verify all `Batch`s from the prior set, // start signing the handover `Batch` `n`, start signing `n+1`, have `n+1` signed // before `n` (or at the same time), yet then the prior set forges a malicious // `Batch` `n`. // // The malicious `Batch` `n` would be publishable to Serai, as Serai can't // distinguish what's intended to be a handover `Batch`, yet then anyone could // publish the new set's `n+1`, causing their acceptance of the handover. // // To fix this, if this is after the handover `Batch` and we have yet to verify // publication of the handover `Batch`, don't yet yield the provided. if last_received > handover_batch { if let Some(last_verified) = LastVerifiedBatchDb::get(&txn, msg.network) { if last_verified < handover_batch { queue = true; } } else { queue = true; } } } else { HandoverBatchDb::set_handover_batch(&mut txn, spec.set(), last_received); // If this isn't the first batch, meaning we do have to verify all prior batches, and // the prior Batch hasn't been verified yet... if (last_received != 0) && LastVerifiedBatchDb::get(&txn, msg.network) .map_or(true, |last_verified| last_verified < (last_received - 1)) { // Withhold this TX until we verify all prior `Batch`s queue = true; } } if queue { QueuedBatchesDb::queue(&mut txn, spec.set(), &intended); vec![] } else { // Because this is post-verification of the handover batch, take all queued `Batch`s // now to ensure we don't provide this before an already queued Batch // This *may* be an unreachable case due to how last_verified_batch is set, yet it // doesn't hurt to have as a defensive pattern let mut res = QueuedBatchesDb::take(&mut txn, spec.set()); res.push(intended); res } } else { vec![Transaction::SubstrateSign(SignData { plan: id.id, attempt: id.attempt, label: Label::Preprocess, data: preprocesses.into_iter().map(Into::into).collect(), signed: Transaction::empty_signed(), })] } } coordinator::ProcessorMessage::SubstrateShare { id, shares } => { vec![Transaction::SubstrateSign(SignData { plan: id.id, attempt: id.attempt, label: Label::Share, data: shares.into_iter().map(|share| share.to_vec()).collect(), signed: Transaction::empty_signed(), })] } #[allow(clippy::match_same_arms)] // Allowed to preserve layout coordinator::ProcessorMessage::CosignedBlock { .. } => unreachable!(), #[allow(clippy::match_same_arms)] coordinator::ProcessorMessage::SignedSlashReport { .. } => unreachable!(), }, ProcessorMessage::Substrate(inner_msg) => match inner_msg { processor_messages::substrate::ProcessorMessage::Batch { .. } | processor_messages::substrate::ProcessorMessage::SignedBatch { .. } => unreachable!(), }, }; // If this created transactions, publish them for mut tx in txs { log::trace!("processor message effected transaction {} {:?}", hex::encode(tx.hash()), &tx); match tx.kind() { TransactionKind::Provided(_) => { log::trace!("providing transaction {}", hex::encode(tx.hash())); let res = tributary.provide_transaction(tx.clone()).await; if !(res.is_ok() || (res == Err(ProvidedError::AlreadyProvided))) { if res == Err(ProvidedError::LocalMismatchesOnChain) { // Spin, since this is a crit for this Tributary loop { log::error!( "{}. tributary: {}, provided: {:?}", "tributary added distinct provided to delayed locally provided TX", hex::encode(spec.genesis()), &tx, ); sleep(Duration::from_secs(60)).await; } } panic!("provided an invalid transaction: {res:?}"); } } TransactionKind::Unsigned => { log::trace!("publishing unsigned transaction {}", hex::encode(tx.hash())); match tributary.add_transaction(tx.clone()).await { Ok(_) => {} Err(e) => panic!("created an invalid unsigned transaction: {e:?}"), } } TransactionKind::Signed(_, _) => { tx.sign(&mut OsRng, genesis, key); tributary::publish_signed_transaction(&mut txn, tributary, tx).await; } } } } HandledMessageDb::set(&mut txn, msg.network, &msg.id); txn.commit(); true } #[allow(clippy::too_many_arguments)] async fn handle_processor_messages( mut db: D, key: Zeroizing<::F>, serai: Arc, processors: Pro, p2p: P, cosign_channel: mpsc::UnboundedSender, network: ExternalNetworkId, mut tributary_event: mpsc::UnboundedReceiver>, ) { let mut tributaries = HashMap::new(); loop { match tributary_event.try_recv() { Ok(event) => match event { TributaryEvent::NewTributary(tributary) => { let set = tributary.spec.set(); assert_eq!(set.network, network); tributaries.insert(set.session, tributary); } TributaryEvent::TributaryRetired(set) => { tributaries.remove(&set.session); } }, Err(mpsc::error::TryRecvError::Empty) => {} Err(mpsc::error::TryRecvError::Disconnected) => { panic!("handle_processor_messages tributary_event sender closed") } } // TODO: Check this ID is sane (last handled ID or expected next ID) let Ok(msg) = tokio::time::timeout(Duration::from_secs(1), processors.recv(network)).await else { continue; }; log::trace!("entering handle_processor_message for {:?}", network); if handle_processor_message( &mut db, &key, &serai, &p2p, &cosign_channel, &tributaries, network, &msg, ) .await { processors.ack(msg).await; } log::trace!("exited handle_processor_message for {:?}", network); } } #[allow(clippy::too_many_arguments)] async fn handle_cosigns_and_batch_publication( mut db: D, network: ExternalNetworkId, mut tributary_event: mpsc::UnboundedReceiver>, ) { let mut tributaries = HashMap::new(); 'outer: loop { // TODO: Create a better async flow for this tokio::time::sleep(core::time::Duration::from_millis(100)).await; match tributary_event.try_recv() { Ok(event) => match event { TributaryEvent::NewTributary(tributary) => { let set = tributary.spec.set(); assert_eq!(set.network, network); tributaries.insert(set.session, tributary); } TributaryEvent::TributaryRetired(set) => { tributaries.remove(&set.session); } }, Err(mpsc::error::TryRecvError::Empty) => {} Err(mpsc::error::TryRecvError::Disconnected) => { panic!("handle_processor_messages tributary_event sender closed") } } // Handle pending cosigns { let mut txn = db.txn(); while let Some((session, block, hash)) = CosignTransactions::try_recv(&mut txn, network) { let Some(ActiveTributary { spec, tributary }) = tributaries.get(&session) else { log::warn!("didn't yet have tributary we're supposed to cosign with"); break; }; log::info!( "{network:?} {session:?} cosigning block #{block} (hash {}...)", hex::encode(&hash[.. 8]) ); let tx = Transaction::CosignSubstrateBlock(hash); let res = tributary.provide_transaction(tx.clone()).await; if !(res.is_ok() || (res == Err(ProvidedError::AlreadyProvided))) { if res == Err(ProvidedError::LocalMismatchesOnChain) { // Spin, since this is a crit for this Tributary loop { log::error!( "{}. tributary: {}, provided: {:?}", "tributary added distinct CosignSubstrateBlock", hex::encode(spec.genesis()), &tx, ); sleep(Duration::from_secs(60)).await; } } panic!("provided an invalid CosignSubstrateBlock: {res:?}"); } } txn.commit(); } // Verify any publifshed `Batch`s { let _hvq_lock = HANDOVER_VERIFY_QUEUE_LOCK.get_or_init(|| Mutex::new(())).lock().await; let mut txn = db.txn(); let mut to_publish = vec![]; let start_id = LastVerifiedBatchDb::get(&txn, network).map_or(0, |already_verified| already_verified + 1); if let Some(last_id) = substrate::verify_published_batches::(&mut txn, network, u32::MAX).await { // Check if any of these `Batch`s were a handover `Batch` or the `Batch` before a handover // `Batch` // If so, we need to publish queued provided `Batch` transactions for batch in start_id ..= last_id { let is_pre_handover = LookupHandoverBatchDb::get(&txn, network, batch + 1); if let Some(session) = is_pre_handover { let set = ExternalValidatorSet { network, session }; let mut queued = QueuedBatchesDb::take(&mut txn, set); // is_handover_batch is only set for handover `Batch`s we're participating in, making // this safe if queued.is_empty() { panic!("knew the next Batch was a handover yet didn't queue it"); } // Only publish the handover Batch to_publish.push((set.session, queued.remove(0))); // Re-queue the remaining batches for remaining in queued { QueuedBatchesDb::queue(&mut txn, set, &remaining); } } let is_handover = LookupHandoverBatchDb::get(&txn, network, batch); if let Some(session) = is_handover { for queued in QueuedBatchesDb::take(&mut txn, ExternalValidatorSet { network, session }) { to_publish.push((session, queued)); } } } } for (session, tx) in to_publish { let Some(ActiveTributary { spec, tributary }) = tributaries.get(&session) else { log::warn!("didn't yet have tributary we're supposed to provide a queued Batch for"); // Safe since this will drop the txn updating the most recently queued batch continue 'outer; }; log::debug!("providing Batch transaction {:?}", &tx); let res = tributary.provide_transaction(tx.clone()).await; if !(res.is_ok() || (res == Err(ProvidedError::AlreadyProvided))) { if res == Err(ProvidedError::LocalMismatchesOnChain) { // Spin, since this is a crit for this Tributary loop { log::error!( "{}. tributary: {}, provided: {:?}", "tributary added distinct Batch", hex::encode(spec.genesis()), &tx, ); sleep(Duration::from_secs(60)).await; } } panic!("provided an invalid Batch: {res:?}"); } } txn.commit(); } } } pub async fn handle_processors( db: D, key: Zeroizing<::F>, serai: Arc, processors: Pro, p2p: P, cosign_channel: mpsc::UnboundedSender, mut tributary_event: broadcast::Receiver>, ) { let mut channels = HashMap::new(); for network in serai_client::primitives::EXTERNAL_NETWORKS { let (processor_send, processor_recv) = mpsc::unbounded_channel(); tokio::spawn(handle_processor_messages( db.clone(), key.clone(), serai.clone(), processors.clone(), p2p.clone(), cosign_channel.clone(), network, processor_recv, )); let (cosign_send, cosign_recv) = mpsc::unbounded_channel(); tokio::spawn(handle_cosigns_and_batch_publication(db.clone(), network, cosign_recv)); channels.insert(network, (processor_send, cosign_send)); } // Listen to new tributary events loop { match tributary_event.recv().await.unwrap() { TributaryEvent::NewTributary(tributary) => { let (c1, c2) = &channels[&tributary.spec.set().network]; c1.send(TributaryEvent::NewTributary(tributary.clone())).unwrap(); c2.send(TributaryEvent::NewTributary(tributary)).unwrap(); } TributaryEvent::TributaryRetired(set) => { let (c1, c2) = &channels[&set.network]; c1.send(TributaryEvent::TributaryRetired(set)).unwrap(); c2.send(TributaryEvent::TributaryRetired(set)).unwrap(); } }; } } pub async fn run( raw_db: D, key: Zeroizing<::F>, p2p: P, processors: Pro, serai: Arc, ) { let (new_tributary_spec_send, mut new_tributary_spec_recv) = mpsc::unbounded_channel(); // Reload active tributaries from the database for spec in ActiveTributaryDb::active_tributaries(&raw_db).1 { new_tributary_spec_send.send(spec).unwrap(); } let (perform_slash_report_send, mut perform_slash_report_recv) = mpsc::unbounded_channel(); let (tributary_retired_send, mut tributary_retired_recv) = mpsc::unbounded_channel(); // Handle new Substrate blocks tokio::spawn(crate::substrate::scan_task( raw_db.clone(), key.clone(), processors.clone(), serai.clone(), new_tributary_spec_send, perform_slash_report_send, tributary_retired_send, )); // Handle the Tributaries // This should be large enough for an entire rotation of all tributaries // If it's too small, the coordinator fail to boot, which is a decent sanity check let (tributary_event, mut tributary_event_listener_1) = broadcast::channel(32); let tributary_event_listener_2 = tributary_event.subscribe(); let tributary_event_listener_3 = tributary_event.subscribe(); let tributary_event_listener_4 = tributary_event.subscribe(); let tributary_event_listener_5 = tributary_event.subscribe(); // Emit TributaryEvent::TributaryRetired tokio::spawn({ let tributary_event = tributary_event.clone(); async move { loop { let retired = tributary_retired_recv.recv().await.unwrap(); tributary_event.send(TributaryEvent::TributaryRetired(retired)).map_err(|_| ()).unwrap(); } } }); // Spawn a task to further add Tributaries as needed tokio::spawn({ let raw_db = raw_db.clone(); let key = key.clone(); let processors = processors.clone(); let p2p = p2p.clone(); async move { loop { let spec = new_tributary_spec_recv.recv().await.unwrap(); // Uses an inner task as Tributary::new may take several seconds tokio::spawn({ let raw_db = raw_db.clone(); let key = key.clone(); let processors = processors.clone(); let p2p = p2p.clone(); let tributary_event = tributary_event.clone(); async move { add_tributary(raw_db, key, &processors, p2p, &tributary_event, spec).await; } }); } } }); // When we reach synchrony on an event requiring signing, send our preprocess for it // TODO: Properly place this into the Tributary scanner, as it's a mess out here let recognized_id = { let raw_db = raw_db.clone(); let key = key.clone(); let specs = Arc::new(RwLock::new(HashMap::new())); let tributaries = Arc::new(RwLock::new(HashMap::new())); // Spawn a task to maintain a local view of the tributaries for whenever recognized_id is // called tokio::spawn({ let specs = specs.clone(); let tributaries = tributaries.clone(); let mut set_to_genesis = HashMap::new(); async move { loop { match tributary_event_listener_1.recv().await { Ok(TributaryEvent::NewTributary(tributary)) => { set_to_genesis.insert(tributary.spec.set(), tributary.spec.genesis()); tributaries.write().await.insert(tributary.spec.genesis(), tributary.tributary); specs.write().await.insert(tributary.spec.set(), tributary.spec); } Ok(TributaryEvent::TributaryRetired(set)) => { if let Some(genesis) = set_to_genesis.remove(&set) { specs.write().await.remove(&set); tributaries.write().await.remove(&genesis); } } Err(broadcast::error::RecvError::Lagged(_)) => { panic!("recognized_id lagged to handle tributary_event") } Err(broadcast::error::RecvError::Closed) => panic!("tributary_event sender closed"), } } } }); // Also spawn a task to handle slash reports, as this needs such a view of tributaries tokio::spawn({ let mut raw_db = raw_db.clone(); let key = key.clone(); let tributaries = tributaries.clone(); async move { 'task_loop: loop { match perform_slash_report_recv.recv().await { Some(set) => { let (genesis, validators) = loop { let specs = specs.read().await; let Some(spec) = specs.get(&set) else { // If we don't have this Tributary because it's retired, break and move on if RetiredTributaryDb::get(&raw_db, set).is_some() { continue 'task_loop; } // This may happen if the task above is simply slow log::warn!("tributary we don't have yet is supposed to perform a slash report"); continue; }; break (spec.genesis(), spec.validators()); }; let mut slashes = vec![]; for (validator, _) in validators { if validator == (::generator() * key.deref()) { continue; } let validator = validator.to_bytes(); let fatally = tributary::FatallySlashed::get(&raw_db, genesis, validator).is_some(); // TODO: Properly type this let points = if fatally { u32::MAX } else { tributary::SlashPoints::get(&raw_db, genesis, validator).unwrap_or(0) }; slashes.push(points); } let mut tx = Transaction::SlashReport(slashes, Transaction::empty_signed()); tx.sign(&mut OsRng, genesis, &key); let mut first = true; loop { if !first { sleep(Duration::from_millis(100)).await; } first = false; let tributaries = tributaries.read().await; let Some(tributary) = tributaries.get(&genesis) else { // If we don't have this Tributary because it's retired, break and move on if RetiredTributaryDb::get(&raw_db, set).is_some() { break; } // This may happen if the task above is simply slow log::warn!("tributary we don't have yet is supposed to perform a slash report"); continue; }; // This is safe to perform multiple times and solely needs atomicity with regards // to itself // TODO: Should this not take a txn accordingly? It's best practice to take a txn, // yet taking a txn fails to declare its achieved independence let mut txn = raw_db.txn(); tributary::publish_signed_transaction(&mut txn, tributary, tx).await; txn.commit(); break; } } None => panic!("perform slash report sender closed"), } } } }); move |set: ExternalValidatorSet, genesis, id_type, id: Vec| { log::debug!("recognized ID {:?} {}", id_type, hex::encode(&id)); let mut raw_db = raw_db.clone(); let key = key.clone(); let tributaries = tributaries.clone(); async move { // The transactions for these are fired before the preprocesses are actually // received/saved, creating a race between Tributary ack and the availability of all // Preprocesses // This waits until the necessary preprocess is available 0, let get_preprocess = |raw_db, id_type, id| async move { loop { let Some(preprocess) = FirstPreprocessDb::get(raw_db, set.network, id_type, id) else { log::warn!("waiting for preprocess for recognized ID"); sleep(Duration::from_millis(100)).await; continue; }; return preprocess; } }; let mut tx = match id_type { RecognizedIdType::Batch => Transaction::SubstrateSign(SignData { data: get_preprocess(&raw_db, id_type, &id).await, plan: SubstrateSignableId::Batch(u32::from_le_bytes(id.try_into().unwrap())), label: Label::Preprocess, attempt: 0, signed: Transaction::empty_signed(), }), RecognizedIdType::Plan => Transaction::Sign(SignData { data: get_preprocess(&raw_db, id_type, &id).await, plan: id.try_into().unwrap(), label: Label::Preprocess, attempt: 0, signed: Transaction::empty_signed(), }), }; tx.sign(&mut OsRng, genesis, &key); let mut first = true; loop { if !first { sleep(Duration::from_millis(100)).await; } first = false; let tributaries = tributaries.read().await; let Some(tributary) = tributaries.get(&genesis) else { // If we don't have this Tributary because it's retired, break and move on if RetiredTributaryDb::get(&raw_db, set).is_some() { break; } // This may happen if the task above is simply slow log::warn!("tributary we don't have yet came to consensus on an Batch"); continue; }; // This is safe to perform multiple times and solely needs atomicity with regards to // itself // TODO: Should this not take a txn accordingly? It's best practice to take a txn, yet // taking a txn fails to declare its achieved independence let mut txn = raw_db.txn(); tributary::publish_signed_transaction(&mut txn, tributary, tx).await; txn.commit(); break; } } } }; // Handle new blocks for each Tributary { let raw_db = raw_db.clone(); tokio::spawn(tributary::scanner::scan_tributaries_task( raw_db, key.clone(), recognized_id, processors.clone(), serai.clone(), tributary_event_listener_2, )); } // Spawn the heartbeat task, which will trigger syncing if there hasn't been a Tributary block // in a while (presumably because we're behind) tokio::spawn(p2p::heartbeat_tributaries_task(p2p.clone(), tributary_event_listener_3)); // Create the Cosign evaluator let cosign_channel = CosignEvaluator::new(raw_db.clone(), p2p.clone(), serai.clone()); // Handle P2P messages tokio::spawn(p2p::handle_p2p_task( p2p.clone(), cosign_channel.clone(), tributary_event_listener_4, )); // Handle all messages from processors handle_processors( raw_db, key, serai, processors, p2p, cosign_channel, tributary_event_listener_5, ) .await; } #[tokio::main] async fn main() { // Override the panic handler with one which will panic if any tokio task panics { let existing = std::panic::take_hook(); std::panic::set_hook(Box::new(move |panic| { existing(panic); const MSG: &str = "exiting the process due to a task panicking"; println!("{MSG}"); log::error!("{MSG}"); std::process::exit(1); })); } if std::env::var("RUST_LOG").is_err() { std::env::set_var("RUST_LOG", serai_env::var("RUST_LOG").unwrap_or_else(|| "info".to_string())); } env_logger::init(); log::info!("starting coordinator service..."); #[allow(unused_variables, unreachable_code)] let db = { #[cfg(all(feature = "parity-db", feature = "rocksdb"))] panic!("built with parity-db and rocksdb"); #[cfg(all(feature = "parity-db", not(feature = "rocksdb")))] let db = serai_db::new_parity_db(&serai_env::var("DB_PATH").expect("path to DB wasn't specified")); #[cfg(feature = "rocksdb")] let db = serai_db::new_rocksdb(&serai_env::var("DB_PATH").expect("path to DB wasn't specified")); db }; let key = { let mut key_hex = serai_env::var("SERAI_KEY").expect("Serai key wasn't provided"); let mut key_vec = hex::decode(&key_hex).map_err(|_| ()).expect("Serai key wasn't hex-encoded"); key_hex.zeroize(); if key_vec.len() != 32 { key_vec.zeroize(); panic!("Serai key had an invalid length"); } let mut key_bytes = [0; 32]; key_bytes.copy_from_slice(&key_vec); key_vec.zeroize(); let key = Zeroizing::new(::F::from_repr(key_bytes).unwrap()); key_bytes.zeroize(); key }; let processors = Arc::new(MessageQueue::from_env(Service::Coordinator)); let serai = (async { loop { let Ok(serai) = Serai::new(format!( "http://{}:9944", serai_env::var("SERAI_HOSTNAME").expect("Serai hostname wasn't provided") )) .await else { log::error!("couldn't connect to the Serai node"); sleep(Duration::from_secs(5)).await; continue; }; log::info!("made initial connection to Serai node"); return Arc::new(serai); } }) .await; let p2p = LibP2p::new(serai.clone()); run(db, key, p2p, processors, serai).await } ================================================ FILE: coordinator/src/p2p.rs ================================================ use core::{time::Duration, fmt}; use std::{ sync::Arc, io::{self, Read}, collections::{HashSet, HashMap}, time::{SystemTime, Instant}, }; use async_trait::async_trait; use rand_core::{RngCore, OsRng}; use scale::{Decode, Encode}; use borsh::{BorshSerialize, BorshDeserialize}; use serai_client::{ primitives::ExternalNetworkId, validator_sets::primitives::ExternalValidatorSet, Serai, }; use serai_db::Db; use futures_util::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, StreamExt}; use tokio::{ sync::{Mutex, RwLock, mpsc, broadcast}, time::sleep, }; use libp2p::{ core::multiaddr::{Protocol, Multiaddr}, identity::Keypair, PeerId, tcp::Config as TcpConfig, noise, yamux, request_response::{ Codec as RrCodecTrait, Message as RrMessage, Event as RrEvent, Config as RrConfig, Behaviour as RrBehavior, ProtocolSupport, }, gossipsub::{ IdentTopic, FastMessageId, MessageId, MessageAuthenticity, ValidationMode, ConfigBuilder, IdentityTransform, AllowAllSubscriptionFilter, Event as GsEvent, PublishError, Behaviour as GsBehavior, }, swarm::{NetworkBehaviour, SwarmEvent}, SwarmBuilder, }; pub(crate) use tributary::{ReadWrite, P2p as TributaryP2p}; use crate::{Transaction, Block, Tributary, ActiveTributary, TributaryEvent}; // Block size limit + 1 KB of space for signatures/metadata const MAX_LIBP2P_GOSSIP_MESSAGE_SIZE: usize = tributary::BLOCK_SIZE_LIMIT + 1024; const MAX_LIBP2P_REQRES_MESSAGE_SIZE: usize = (tributary::BLOCK_SIZE_LIMIT * BLOCKS_PER_BATCH) + 1024; const MAX_LIBP2P_MESSAGE_SIZE: usize = { // Manual `max` since `max` isn't a const fn if MAX_LIBP2P_GOSSIP_MESSAGE_SIZE > MAX_LIBP2P_REQRES_MESSAGE_SIZE { MAX_LIBP2P_GOSSIP_MESSAGE_SIZE } else { MAX_LIBP2P_REQRES_MESSAGE_SIZE } }; const LIBP2P_TOPIC: &str = "serai-coordinator"; // Amount of blocks in a minute const BLOCKS_PER_MINUTE: usize = (60 / (tributary::tendermint::TARGET_BLOCK_TIME / 1000)) as usize; // Maximum amount of blocks to send in a batch const BLOCKS_PER_BATCH: usize = BLOCKS_PER_MINUTE + 1; #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, BorshSerialize, BorshDeserialize)] pub struct CosignedBlock { pub network: ExternalNetworkId, pub block_number: u64, pub block: [u8; 32], pub signature: [u8; 64], } #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] pub enum ReqResMessageKind { KeepAlive, Heartbeat([u8; 32]), Block([u8; 32]), } impl ReqResMessageKind { pub fn read(reader: &mut R) -> Option { let mut kind = [0; 1]; reader.read_exact(&mut kind).ok()?; match kind[0] { 0 => Some(ReqResMessageKind::KeepAlive), 1 => Some({ let mut genesis = [0; 32]; reader.read_exact(&mut genesis).ok()?; ReqResMessageKind::Heartbeat(genesis) }), 2 => Some({ let mut genesis = [0; 32]; reader.read_exact(&mut genesis).ok()?; ReqResMessageKind::Block(genesis) }), _ => None, } } pub fn serialize(&self) -> Vec { match self { ReqResMessageKind::KeepAlive => vec![0], ReqResMessageKind::Heartbeat(genesis) => { let mut res = vec![1]; res.extend(genesis); res } ReqResMessageKind::Block(genesis) => { let mut res = vec![2]; res.extend(genesis); res } } } } #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] pub enum GossipMessageKind { Tributary([u8; 32]), CosignedBlock, } impl GossipMessageKind { pub fn read(reader: &mut R) -> Option { let mut kind = [0; 1]; reader.read_exact(&mut kind).ok()?; match kind[0] { 0 => Some({ let mut genesis = [0; 32]; reader.read_exact(&mut genesis).ok()?; GossipMessageKind::Tributary(genesis) }), 1 => Some(GossipMessageKind::CosignedBlock), _ => None, } } pub fn serialize(&self) -> Vec { match self { GossipMessageKind::Tributary(genesis) => { let mut res = vec![0]; res.extend(genesis); res } GossipMessageKind::CosignedBlock => { vec![1] } } } } #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] pub enum P2pMessageKind { ReqRes(ReqResMessageKind), Gossip(GossipMessageKind), } impl P2pMessageKind { fn genesis(&self) -> Option<[u8; 32]> { match self { P2pMessageKind::ReqRes(ReqResMessageKind::KeepAlive) | P2pMessageKind::Gossip(GossipMessageKind::CosignedBlock) => None, P2pMessageKind::ReqRes( ReqResMessageKind::Heartbeat(genesis) | ReqResMessageKind::Block(genesis), ) | P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => Some(*genesis), } } } impl From for P2pMessageKind { fn from(kind: ReqResMessageKind) -> P2pMessageKind { P2pMessageKind::ReqRes(kind) } } impl From for P2pMessageKind { fn from(kind: GossipMessageKind) -> P2pMessageKind { P2pMessageKind::Gossip(kind) } } #[derive(Clone, Debug)] pub struct Message { pub sender: P::Id, pub kind: P2pMessageKind, pub msg: Vec, } #[derive(Clone, Debug, Encode, Decode)] pub struct BlockCommit { pub block: Vec, pub commit: Vec, } #[derive(Clone, Debug, Encode, Decode)] pub struct HeartbeatBatch { pub blocks: Vec, pub timestamp: u64, } #[async_trait] pub trait P2p: Send + Sync + Clone + fmt::Debug + TributaryP2p { type Id: Send + Sync + Clone + Copy + fmt::Debug; async fn subscribe(&self, set: ExternalValidatorSet, genesis: [u8; 32]); async fn unsubscribe(&self, set: ExternalValidatorSet, genesis: [u8; 32]); async fn send_raw(&self, to: Self::Id, msg: Vec); async fn broadcast_raw(&self, kind: P2pMessageKind, msg: Vec); async fn receive(&self) -> Message; async fn send(&self, to: Self::Id, kind: ReqResMessageKind, msg: Vec) { let mut actual_msg = kind.serialize(); actual_msg.extend(msg); self.send_raw(to, actual_msg).await; } async fn broadcast(&self, kind: impl Send + Into, msg: Vec) { let kind = kind.into(); let mut actual_msg = match kind { P2pMessageKind::ReqRes(kind) => kind.serialize(), P2pMessageKind::Gossip(kind) => kind.serialize(), }; actual_msg.extend(msg); /* log::trace!( "broadcasting p2p message (kind {})", match kind { P2pMessageKind::KeepAlive => "KeepAlive".to_string(), P2pMessageKind::Tributary(genesis) => format!("Tributary({})", hex::encode(genesis)), P2pMessageKind::Heartbeat(genesis) => format!("Heartbeat({})", hex::encode(genesis)), P2pMessageKind::Block(genesis) => format!("Block({})", hex::encode(genesis)), P2pMessageKind::CosignedBlock => "CosignedBlock".to_string(), } ); */ self.broadcast_raw(kind, actual_msg).await; } } #[derive(Default, Clone, Copy, PartialEq, Eq, Debug)] struct RrCodec; #[async_trait] impl RrCodecTrait for RrCodec { type Protocol = &'static str; type Request = Vec; type Response = Vec; async fn read_request( &mut self, _: &Self::Protocol, io: &mut R, ) -> io::Result> { let mut len = [0; 4]; io.read_exact(&mut len).await?; let len = usize::try_from(u32::from_le_bytes(len)).expect("not at least a 32-bit platform?"); if len > MAX_LIBP2P_REQRES_MESSAGE_SIZE { Err(io::Error::other("request length exceeded MAX_LIBP2P_REQRES_MESSAGE_SIZE"))?; } // This may be a non-trivial allocation easily causable // While we could chunk the read, meaning we only perform the allocation as bandwidth is used, // the max message size should be sufficiently sane let mut buf = vec![0; len]; io.read_exact(&mut buf).await?; Ok(buf) } async fn read_response( &mut self, proto: &Self::Protocol, io: &mut R, ) -> io::Result> { self.read_request(proto, io).await } async fn write_request( &mut self, _: &Self::Protocol, io: &mut W, req: Vec, ) -> io::Result<()> { io.write_all( &u32::try_from(req.len()) .map_err(|_| io::Error::other("request length exceeded 2**32"))? .to_le_bytes(), ) .await?; io.write_all(&req).await } async fn write_response( &mut self, proto: &Self::Protocol, io: &mut W, res: Vec, ) -> io::Result<()> { self.write_request(proto, io, res).await } } #[derive(NetworkBehaviour)] struct Behavior { reqres: RrBehavior, gossipsub: GsBehavior, } #[allow(clippy::type_complexity)] #[derive(Clone)] pub struct LibP2p { subscribe: Arc>>, send: Arc)>>>, broadcast: Arc)>>>, receive: Arc>>>, } impl fmt::Debug for LibP2p { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("LibP2p").finish_non_exhaustive() } } impl LibP2p { #[allow(clippy::new_without_default)] pub fn new(serai: Arc) -> Self { log::info!("creating a libp2p instance"); let throwaway_key_pair = Keypair::generate_ed25519(); let behavior = Behavior { reqres: { RrBehavior::new([("/coordinator", ProtocolSupport::Full)], RrConfig::default()) }, gossipsub: { let heartbeat_interval = tributary::tendermint::LATENCY_TIME / 2; let heartbeats_per_block = usize::try_from(tributary::tendermint::TARGET_BLOCK_TIME / heartbeat_interval).unwrap(); use blake2::{Digest, Blake2s256}; let config = ConfigBuilder::default() .heartbeat_interval(Duration::from_millis(heartbeat_interval.into())) .history_length(heartbeats_per_block * 2) .history_gossip(heartbeats_per_block) .max_transmit_size(MAX_LIBP2P_GOSSIP_MESSAGE_SIZE) // We send KeepAlive after 80s .idle_timeout(Duration::from_secs(85)) .validation_mode(ValidationMode::Strict) // Uses a content based message ID to avoid duplicates as much as possible .message_id_fn(|msg| { MessageId::new(&Blake2s256::digest([msg.topic.as_str().as_bytes(), &msg.data].concat())) }) // Re-defines for fast ID to prevent needing to convert into a Message to run // message_id_fn // This function is valid for both .fast_message_id_fn(|msg| { FastMessageId::new(&Blake2s256::digest( [msg.topic.as_str().as_bytes(), &msg.data].concat(), )) }) .build(); let mut gossipsub = GsBehavior::::new( MessageAuthenticity::Signed(throwaway_key_pair.clone()), config.unwrap(), ) .unwrap(); // Subscribe to the base topic let topic = IdentTopic::new(LIBP2P_TOPIC); gossipsub.subscribe(&topic).unwrap(); gossipsub }, }; // Uses noise for authentication, yamux for multiplexing // TODO: Do we want to add a custom authentication protocol to only accept connections from // fellow validators? Doing so would reduce the potential for spam // TODO: Relay client? let mut swarm = SwarmBuilder::with_existing_identity(throwaway_key_pair) .with_tokio() .with_tcp(TcpConfig::default().nodelay(true), noise::Config::new, || { let mut config = yamux::Config::default(); // 1 MiB default + max message size config.set_max_buffer_size((1024 * 1024) + MAX_LIBP2P_MESSAGE_SIZE); // 256 KiB default + max message size config .set_receive_window_size(((256 * 1024) + MAX_LIBP2P_MESSAGE_SIZE).try_into().unwrap()); config }) .unwrap() .with_behaviour(|_| behavior) .unwrap() .build(); const PORT: u16 = 30563; // 5132 ^ (('c' << 8) | 'o') swarm.listen_on(format!("/ip4/0.0.0.0/tcp/{PORT}").parse().unwrap()).unwrap(); let (send_send, mut send_recv) = mpsc::unbounded_channel(); let (broadcast_send, mut broadcast_recv) = mpsc::unbounded_channel(); let (receive_send, receive_recv) = mpsc::unbounded_channel(); let (subscribe_send, mut subscribe_recv) = mpsc::unbounded_channel(); fn topic_for_set(set: ExternalValidatorSet) -> IdentTopic { IdentTopic::new(format!("{LIBP2P_TOPIC}-{}", hex::encode(set.encode()))) } // TODO: If a network has less than TARGET_PEERS, this will cause retries ad infinitum const TARGET_PEERS: usize = 5; // The addrs we're currently dialing, and the networks associated with them let dialing_peers = Arc::new(RwLock::new(HashMap::new())); // The peers we're currently connected to, and the networks associated with them let connected_peers = Arc::new(RwLock::new(HashMap::>::new())); // Find and connect to peers let (connect_to_network_send, mut connect_to_network_recv) = tokio::sync::mpsc::unbounded_channel(); let (to_dial_send, mut to_dial_recv) = tokio::sync::mpsc::unbounded_channel(); tokio::spawn({ let dialing_peers = dialing_peers.clone(); let connected_peers = connected_peers.clone(); let connect_to_network_send = connect_to_network_send.clone(); async move { loop { let connect = |network: ExternalNetworkId, addr: Multiaddr| { let dialing_peers = dialing_peers.clone(); let connected_peers = connected_peers.clone(); let to_dial_send = to_dial_send.clone(); let connect_to_network_send = connect_to_network_send.clone(); async move { log::info!("found peer from substrate: {addr}"); let protocols = addr.iter().filter_map(|piece| match piece { // Drop PeerIds from the Substrate P2p network Protocol::P2p(_) => None, // Use our own TCP port Protocol::Tcp(_) => Some(Protocol::Tcp(PORT)), other => Some(other), }); let mut new_addr = Multiaddr::empty(); for protocol in protocols { new_addr.push(protocol); } let addr = new_addr; log::debug!("transformed found peer: {addr}"); let (is_fresh_dial, nets) = { let mut dialing_peers = dialing_peers.write().await; let is_fresh_dial = !dialing_peers.contains_key(&addr); if is_fresh_dial { dialing_peers.insert(addr.clone(), HashSet::new()); } // Associate this network with this peer dialing_peers.get_mut(&addr).unwrap().insert(network); let nets = dialing_peers.get(&addr).unwrap().clone(); (is_fresh_dial, nets) }; // Spawn a task to remove this peer from 'dialing' in sixty seconds, in case dialing // fails // This performs cleanup and bounds the size of the map to whatever growth occurs // within a temporal window tokio::spawn({ let dialing_peers = dialing_peers.clone(); let connected_peers = connected_peers.clone(); let connect_to_network_send = connect_to_network_send.clone(); let addr = addr.clone(); async move { tokio::time::sleep(core::time::Duration::from_secs(60)).await; let mut dialing_peers = dialing_peers.write().await; if let Some(expected_nets) = dialing_peers.remove(&addr) { log::debug!("removed addr from dialing upon timeout: {addr}"); // TODO: De-duplicate this below instance // If we failed to dial and haven't gotten enough actual connections, retry let connected_peers = connected_peers.read().await; for net in expected_nets { let mut remaining_peers = 0; for nets in connected_peers.values() { if nets.contains(&net) { remaining_peers += 1; } } // If we do not, start connecting to this network again if remaining_peers < TARGET_PEERS { connect_to_network_send.send(net).expect( "couldn't send net to connect to due to disconnects (receiver dropped?)", ); } } } } }); if is_fresh_dial { to_dial_send.send((addr, nets)).unwrap(); } } }; // TODO: We should also connect to random peers from random nets as needed for // cosigning // Drain the chainnel, de-duplicating any networks in it let mut connect_to_network_networks = HashSet::new(); while let Ok(network) = connect_to_network_recv.try_recv() { connect_to_network_networks.insert(network); } for network in connect_to_network_networks { if let Ok(mut nodes) = serai.p2p_validators(network.into()).await { // If there's an insufficient amount of nodes known, connect to all yet add it // back and break if nodes.len() < TARGET_PEERS { log::warn!( "insufficient amount of P2P nodes known for {:?}: {}", network, nodes.len() ); // Retry this later connect_to_network_send.send(network).unwrap(); for node in nodes { connect(network, node).await; } continue; } // Randomly select up to 150% of the TARGET_PEERS for _ in 0 .. ((3 * TARGET_PEERS) / 2) { if !nodes.is_empty() { let to_connect = nodes.swap_remove( usize::try_from(OsRng.next_u64() % u64::try_from(nodes.len()).unwrap()) .unwrap(), ); connect(network, to_connect).await; } } } } // Sleep 60 seconds before moving to the next iteration tokio::time::sleep(core::time::Duration::from_secs(60)).await; } } }); // Manage the actual swarm tokio::spawn({ let mut time_of_last_p2p_message = Instant::now(); async move { let connected_peers = connected_peers.clone(); let mut set_for_genesis = HashMap::new(); loop { let time_since_last = Instant::now().duration_since(time_of_last_p2p_message); tokio::select! { biased; // Subscribe to any new topics set = subscribe_recv.recv() => { let (subscribe, set, genesis): (_, ExternalValidatorSet, [u8; 32]) = set.expect("subscribe_recv closed. are we shutting down?"); let topic = topic_for_set(set); if subscribe { log::info!("subscribing to p2p messages for {set:?}"); connect_to_network_send.send(set.network).unwrap(); set_for_genesis.insert(genesis, set); swarm.behaviour_mut().gossipsub.subscribe(&topic).unwrap(); } else { log::info!("unsubscribing to p2p messages for {set:?}"); set_for_genesis.remove(&genesis); swarm.behaviour_mut().gossipsub.unsubscribe(&topic).unwrap(); } } msg = send_recv.recv() => { let (peer, msg): (PeerId, Vec) = msg.expect("send_recv closed. are we shutting down?"); swarm.behaviour_mut().reqres.send_request(&peer, msg); }, // Handle any queued outbound messages msg = broadcast_recv.recv() => { // Update the time of last message time_of_last_p2p_message = Instant::now(); let (kind, msg): (P2pMessageKind, Vec) = msg.expect("broadcast_recv closed. are we shutting down?"); if matches!(kind, P2pMessageKind::ReqRes(_)) { // Use request/response, yet send to all connected peers for peer_id in swarm.connected_peers().copied().collect::>() { swarm.behaviour_mut().reqres.send_request(&peer_id, msg.clone()); } } else { // Use gossipsub let set = kind.genesis().and_then(|genesis| set_for_genesis.get(&genesis).copied()); let topic = if let Some(set) = set { topic_for_set(set) } else { IdentTopic::new(LIBP2P_TOPIC) }; match swarm.behaviour_mut().gossipsub.publish(topic, msg.clone()) { Err(PublishError::SigningError(e)) => { panic!("signing error when broadcasting: {e}") }, Err(PublishError::InsufficientPeers) => { log::warn!("failed to send p2p message due to insufficient peers") } Err(PublishError::MessageTooLarge) => { panic!("tried to send a too large message: {}", hex::encode(msg)) } Err(PublishError::TransformFailed(e)) => panic!("IdentityTransform failed: {e}"), Err(PublishError::Duplicate) | Ok(_) => {} } } } // Handle new incoming messages event = swarm.next() => { match event { Some(SwarmEvent::Dialing { connection_id, .. }) => { log::debug!("dialing to peer in connection ID {}", &connection_id); } Some(SwarmEvent::ConnectionEstablished { peer_id, connection_id, endpoint, .. }) => { if &peer_id == swarm.local_peer_id() { log::warn!("established a libp2p connection to ourselves"); swarm.close_connection(connection_id); continue; } let addr = endpoint.get_remote_address(); let nets = { let mut dialing_peers = dialing_peers.write().await; if let Some(nets) = dialing_peers.remove(addr) { nets } else { log::debug!("connected to a peer who we didn't have within dialing"); HashSet::new() } }; { let mut connected_peers = connected_peers.write().await; connected_peers.insert(addr.clone(), nets); log::debug!( "connection established to peer {} in connection ID {}, connected peers: {}", &peer_id, &connection_id, connected_peers.len(), ); } } Some(SwarmEvent::ConnectionClosed { peer_id, endpoint, .. }) => { let mut connected_peers = connected_peers.write().await; let Some(nets) = connected_peers.remove(endpoint.get_remote_address()) else { log::debug!("closed connection to peer which wasn't in connected_peers"); continue; }; // Downgrade to a read lock let connected_peers = connected_peers.downgrade(); // For each net we lost a peer for, check if we still have sufficient peers // overall for net in nets { let mut remaining_peers = 0; for nets in connected_peers.values() { if nets.contains(&net) { remaining_peers += 1; } } // If we do not, start connecting to this network again if remaining_peers < TARGET_PEERS { connect_to_network_send .send(net) .expect( "couldn't send net to connect to due to disconnects (receiver dropped?)" ); } } log::debug!( "connection with peer {peer_id} closed, connected peers: {}", connected_peers.len(), ); } Some(SwarmEvent::Behaviour(BehaviorEvent::Reqres( RrEvent::Message { peer, message }, ))) => { let message = match message { RrMessage::Request { request, .. } => request, RrMessage::Response { response, .. } => response, }; let mut msg_ref = message.as_slice(); let Some(kind) = ReqResMessageKind::read(&mut msg_ref) else { continue }; let message = Message { sender: peer, kind: P2pMessageKind::ReqRes(kind), msg: msg_ref.to_vec(), }; receive_send.send(message).expect("receive_send closed. are we shutting down?"); } Some(SwarmEvent::Behaviour(BehaviorEvent::Gossipsub( GsEvent::Message { propagation_source, message, .. }, ))) => { let mut msg_ref = message.data.as_slice(); let Some(kind) = GossipMessageKind::read(&mut msg_ref) else { continue }; let message = Message { sender: propagation_source, kind: P2pMessageKind::Gossip(kind), msg: msg_ref.to_vec(), }; receive_send.send(message).expect("receive_send closed. are we shutting down?"); } _ => {} } } // Handle peers to dial addr_and_nets = to_dial_recv.recv() => { let (addr, nets) = addr_and_nets.expect("received address was None (sender dropped?)"); // If we've already dialed and connected to this address, don't further dial them // Just associate these networks with them if let Some(existing_nets) = connected_peers.write().await.get_mut(&addr) { for net in nets { existing_nets.insert(net); } continue; } if let Err(e) = swarm.dial(addr) { log::warn!("dialing peer failed: {e:?}"); } } // If it's been >80s since we've published a message, publish a KeepAlive since we're // still an active service // This is useful when we have no active tributaries and accordingly aren't sending // heartbeats // If we are sending heartbeats, we should've sent one after 60s of no finalized blocks // (where a finalized block only occurs due to network activity), meaning this won't be // run () = tokio::time::sleep(Duration::from_secs(80).saturating_sub(time_since_last)) => { time_of_last_p2p_message = Instant::now(); for peer_id in swarm.connected_peers().copied().collect::>() { swarm .behaviour_mut() .reqres .send_request(&peer_id, ReqResMessageKind::KeepAlive.serialize()); } } } } } }); LibP2p { subscribe: Arc::new(Mutex::new(subscribe_send)), send: Arc::new(Mutex::new(send_send)), broadcast: Arc::new(Mutex::new(broadcast_send)), receive: Arc::new(Mutex::new(receive_recv)), } } } #[async_trait] impl P2p for LibP2p { type Id = PeerId; async fn subscribe(&self, set: ExternalValidatorSet, genesis: [u8; 32]) { self .subscribe .lock() .await .send((true, set, genesis)) .expect("subscribe_send closed. are we shutting down?"); } async fn unsubscribe(&self, set: ExternalValidatorSet, genesis: [u8; 32]) { self .subscribe .lock() .await .send((false, set, genesis)) .expect("subscribe_send closed. are we shutting down?"); } async fn send_raw(&self, peer: Self::Id, msg: Vec) { self.send.lock().await.send((peer, msg)).expect("send_send closed. are we shutting down?"); } async fn broadcast_raw(&self, kind: P2pMessageKind, msg: Vec) { self .broadcast .lock() .await .send((kind, msg)) .expect("broadcast_send closed. are we shutting down?"); } // TODO: We only have a single handle call this. Differentiate Send/Recv to remove this constant // lock acquisition? async fn receive(&self) -> Message { self.receive.lock().await.recv().await.expect("receive_recv closed. are we shutting down?") } } #[async_trait] impl TributaryP2p for LibP2p { async fn broadcast(&self, genesis: [u8; 32], msg: Vec) { ::broadcast(self, GossipMessageKind::Tributary(genesis), msg).await } } pub async fn heartbeat_tributaries_task( p2p: P, mut tributary_event: broadcast::Receiver>, ) { let ten_blocks_of_time = Duration::from_secs((10 * Tributary::::block_time()).into()); let mut readers = HashMap::new(); loop { loop { match tributary_event.try_recv() { Ok(TributaryEvent::NewTributary(ActiveTributary { spec, tributary })) => { readers.insert(spec.set(), tributary.reader()); } Ok(TributaryEvent::TributaryRetired(set)) => { readers.remove(&set); } Err(broadcast::error::TryRecvError::Empty) => break, Err(broadcast::error::TryRecvError::Lagged(_)) => { panic!("heartbeat_tributaries lagged to handle tributary_event") } Err(broadcast::error::TryRecvError::Closed) => panic!("tributary_event sender closed"), } } for tributary in readers.values() { let tip = tributary.tip(); let block_time = SystemTime::UNIX_EPOCH + Duration::from_secs(tributary.time_of_block(&tip).unwrap_or(0)); // Only trigger syncing if the block is more than a minute behind if SystemTime::now() > (block_time + Duration::from_secs(60)) { log::warn!("last known tributary block was over a minute ago"); let mut msg = tip.to_vec(); let time: u64 = SystemTime::now() .duration_since(SystemTime::UNIX_EPOCH) .expect("system clock is wrong") .as_secs(); msg.extend(time.to_le_bytes()); P2p::broadcast(&p2p, ReqResMessageKind::Heartbeat(tributary.genesis()), msg).await; } } // Only check once every 10 blocks of time sleep(ten_blocks_of_time).await; } } pub async fn handle_p2p_task( p2p: P, cosign_channel: mpsc::UnboundedSender, mut tributary_event: broadcast::Receiver>, ) { let channels = Arc::new(RwLock::new(HashMap::<_, mpsc::UnboundedSender>>::new())); tokio::spawn({ let p2p = p2p.clone(); let channels = channels.clone(); let mut set_to_genesis = HashMap::new(); async move { loop { match tributary_event.recv().await.unwrap() { TributaryEvent::NewTributary(tributary) => { let genesis = tributary.spec.genesis(); set_to_genesis.insert(tributary.spec.set(), genesis); let (send, mut recv) = mpsc::unbounded_channel(); channels.write().await.insert(genesis, send); // Subscribe to the topic for this tributary p2p.subscribe(tributary.spec.set(), genesis).await; let spec_set = tributary.spec.set(); // Per-Tributary P2P message handler tokio::spawn({ let p2p = p2p.clone(); async move { loop { let Some(msg) = recv.recv().await else { // Channel closure happens when the tributary retires break; }; match msg.kind { P2pMessageKind::ReqRes(ReqResMessageKind::KeepAlive) => {} // TODO: Slash on Heartbeat which justifies a response, since the node // obviously was offline and we must now use our bandwidth to compensate for // them? P2pMessageKind::ReqRes(ReqResMessageKind::Heartbeat(msg_genesis)) => { assert_eq!(msg_genesis, genesis); if msg.msg.len() != 40 { log::error!("validator sent invalid heartbeat"); continue; } // Only respond to recent heartbeats let msg_time = u64::from_le_bytes(msg.msg[32 .. 40].try_into().expect( "length-checked heartbeat message didn't have 8 bytes for the u64", )); if SystemTime::now() .duration_since(SystemTime::UNIX_EPOCH) .expect("system clock is wrong") .as_secs() .saturating_sub(msg_time) > 10 { continue; } log::debug!("received heartbeat with a recent timestamp"); let reader = tributary.tributary.reader(); let p2p = p2p.clone(); // Spawn a dedicated task as this may require loading large amounts of data // from disk and take a notable amount of time tokio::spawn(async move { let mut latest = msg.msg[.. 32].try_into().unwrap(); let mut to_send = vec![]; while let Some(next) = reader.block_after(&latest) { to_send.push(next); latest = next; } if to_send.len() > 3 { // prepare the batch to sends let mut blocks = vec![]; for (i, next) in to_send.iter().enumerate() { if i >= BLOCKS_PER_BATCH { break; } blocks.push(BlockCommit { block: reader.block(next).unwrap().serialize(), commit: reader.commit(next).unwrap(), }); } let batch = HeartbeatBatch { blocks, timestamp: msg_time }; p2p .send(msg.sender, ReqResMessageKind::Block(genesis), batch.encode()) .await; } }); } P2pMessageKind::ReqRes(ReqResMessageKind::Block(msg_genesis)) => { assert_eq!(msg_genesis, genesis); // decode the batch let Ok(batch) = HeartbeatBatch::decode(&mut msg.msg.as_ref()) else { log::error!( "received HeartBeatBatch message with an invalidly serialized batch" ); continue; }; // sync blocks for bc in batch.blocks { // TODO: why do we use ReadWrite instead of Encode/Decode for blocks? // Should we use the same for batches so we can read both at the same time? let Ok(block) = Block::::read(&mut bc.block.as_slice()) else { log::error!("received block message with an invalidly serialized block"); continue; }; let res = tributary.tributary.sync_block(block, bc.commit).await; log::debug!( "received block from {:?}, sync_block returned {}", msg.sender, res ); } } P2pMessageKind::Gossip(GossipMessageKind::Tributary(msg_genesis)) => { assert_eq!(msg_genesis, genesis); log::trace!("handling message for tributary {:?}", spec_set); if tributary.tributary.handle_message(&msg.msg).await { P2p::broadcast(&p2p, msg.kind, msg.msg).await; } } P2pMessageKind::Gossip(GossipMessageKind::CosignedBlock) => unreachable!(), } } } }); } TributaryEvent::TributaryRetired(set) => { if let Some(genesis) = set_to_genesis.remove(&set) { p2p.unsubscribe(set, genesis).await; channels.write().await.remove(&genesis); } } } } } }); loop { let msg = p2p.receive().await; match msg.kind { P2pMessageKind::ReqRes(ReqResMessageKind::KeepAlive) => {} P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) | P2pMessageKind::ReqRes( ReqResMessageKind::Heartbeat(genesis) | ReqResMessageKind::Block(genesis), ) => { if let Some(channel) = channels.read().await.get(&genesis) { channel.send(msg).unwrap(); } } P2pMessageKind::Gossip(GossipMessageKind::CosignedBlock) => { let Ok(msg) = CosignedBlock::deserialize_reader(&mut msg.msg.as_slice()) else { log::error!("received CosignedBlock message with invalidly serialized contents"); continue; }; cosign_channel.send(msg).unwrap(); } } } } ================================================ FILE: coordinator/src/processors.rs ================================================ use std::sync::Arc; use serai_client::primitives::ExternalNetworkId; use processor_messages::{ProcessorMessage, CoordinatorMessage}; use message_queue::{Service, Metadata, client::MessageQueue}; #[derive(Clone, PartialEq, Eq, Debug)] pub struct Message { pub id: u64, pub network: ExternalNetworkId, pub msg: ProcessorMessage, } #[async_trait::async_trait] pub trait Processors: 'static + Send + Sync + Clone { async fn send(&self, network: ExternalNetworkId, msg: impl Send + Into); async fn recv(&self, network: ExternalNetworkId) -> Message; async fn ack(&self, msg: Message); } #[async_trait::async_trait] impl Processors for Arc { async fn send(&self, network: ExternalNetworkId, msg: impl Send + Into) { let msg: CoordinatorMessage = msg.into(); let metadata = Metadata { from: self.service, to: Service::Processor(network), intent: msg.intent() }; let msg = borsh::to_vec(&msg).unwrap(); self.queue(metadata, msg).await; } async fn recv(&self, network: ExternalNetworkId) -> Message { let msg = self.next(Service::Processor(network)).await; assert_eq!(msg.from, Service::Processor(network)); let id = msg.id; // Deserialize it into a ProcessorMessage let msg: ProcessorMessage = borsh::from_slice(&msg.msg).expect("message wasn't a borsh-encoded ProcessorMessage"); return Message { id, network, msg }; } async fn ack(&self, msg: Message) { MessageQueue::ack(self, Service::Processor(msg.network), msg.id).await } } ================================================ FILE: coordinator/src/substrate/cosign.rs ================================================ /* If: A) This block has events and it's been at least X blocks since the last cosign or B) This block doesn't have events but it's been X blocks since a skipped block which did have events or C) This block key gens (which changes who the cosigners are) cosign this block. This creates both a minimum and maximum delay of X blocks before a block's cosigning begins, barring key gens which are exceptional. The minimum delay is there to ensure we don't constantly spawn new protocols every 6 seconds, overwriting the old ones. The maximum delay is there to ensure any block needing cosigned is consigned within a reasonable amount of time. */ use zeroize::Zeroizing; use dalek_ff_group::Ristretto; use ciphersuite::Ciphersuite; use borsh::{BorshSerialize, BorshDeserialize}; use serai_client::{ primitives::ExternalNetworkId, validator_sets::primitives::{ExternalValidatorSet, Session}, Serai, SeraiError, }; use serai_db::*; use crate::{Db, substrate::in_set, tributary::SeraiBlockNumber}; // 5 minutes, expressed in blocks // TODO: Pull a constant for block time const COSIGN_DISTANCE: u64 = 5 * 60 / 6; #[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] enum HasEvents { KeyGen, Yes, No, } create_db!( SubstrateCosignDb { ScanCosignFrom: () -> u64, IntendedCosign: () -> (u64, Option), BlockHasEventsCache: (block: u64) -> HasEvents, LatestCosignedBlock: () -> u64, } ); impl IntendedCosign { // Sets the intended to cosign block, clearing the prior value entirely. pub fn set_intended_cosign(txn: &mut impl DbTxn, intended: u64) { Self::set(txn, &(intended, None::)); } // Sets the cosign skipped since the last intended to cosign block. pub fn set_skipped_cosign(txn: &mut impl DbTxn, skipped: u64) { let (intended, prior_skipped) = Self::get(txn).unwrap(); assert!(prior_skipped.is_none()); Self::set(txn, &(intended, Some(skipped))); } } impl LatestCosignedBlock { pub fn latest_cosigned_block(getter: &impl Get) -> u64 { Self::get(getter).unwrap_or_default().max(1) } } db_channel! { SubstrateDbChannels { CosignTransactions: (network: ExternalNetworkId) -> (Session, u64, [u8; 32]), } } impl CosignTransactions { // Append a cosign transaction. pub fn append_cosign( txn: &mut impl DbTxn, set: ExternalValidatorSet, number: u64, hash: [u8; 32], ) { CosignTransactions::send(txn, set.network, &(set.session, number, hash)) } } async fn block_has_events( txn: &mut impl DbTxn, serai: &Serai, block: u64, ) -> Result { let cached = BlockHasEventsCache::get(txn, block); match cached { None => { let serai = serai.as_of( serai .finalized_block_by_number(block) .await? .expect("couldn't get block which should've been finalized") .hash(), ); if !serai.validator_sets().key_gen_events().await?.is_empty() { return Ok(HasEvents::KeyGen); } let has_no_events = serai.coins().burn_with_instruction_events().await?.is_empty() && serai.in_instructions().batch_events().await?.is_empty() && serai.validator_sets().new_set_events().await?.is_empty() && serai.validator_sets().set_retired_events().await?.is_empty(); let has_events = if has_no_events { HasEvents::No } else { HasEvents::Yes }; BlockHasEventsCache::set(txn, block, &has_events); Ok(has_events) } Some(code) => Ok(code), } } async fn potentially_cosign_block( txn: &mut impl DbTxn, serai: &Serai, block: u64, skipped_block: Option, window_end_exclusive: u64, ) -> Result { // The following code regarding marking cosigned if prior block is cosigned expects this block to // not be zero // While we could perform this check there, there's no reason not to optimize the entire function // as such if block == 0 { return Ok(false); } let block_has_events = block_has_events(txn, serai, block).await?; // If this block had no events and immediately follows a cosigned block, mark it as cosigned if (block_has_events == HasEvents::No) && (LatestCosignedBlock::latest_cosigned_block(txn) == (block - 1)) { log::debug!("automatically co-signing next block ({block}) since it has no events"); LatestCosignedBlock::set(txn, &block); } // If we skipped a block, we're supposed to sign it plus the COSIGN_DISTANCE if no other blocks // trigger a cosigning protocol covering it // This means there will be the maximum delay allowed from a block needing cosigning occurring // and a cosign for it triggering let maximally_latent_cosign_block = skipped_block.map(|skipped_block| skipped_block + COSIGN_DISTANCE); // If this block is within the window, if block < window_end_exclusive { // and set a key, cosign it if block_has_events == HasEvents::KeyGen { IntendedCosign::set_intended_cosign(txn, block); // Carry skipped if it isn't included by cosigning this block if let Some(skipped) = skipped_block { if skipped > block { IntendedCosign::set_skipped_cosign(txn, block); } } return Ok(true); } } else if (Some(block) == maximally_latent_cosign_block) || (block_has_events != HasEvents::No) { // Since this block was outside the window and had events/was maximally latent, cosign it IntendedCosign::set_intended_cosign(txn, block); return Ok(true); } Ok(false) } /* Advances the cosign protocol as should be done per the latest block. A block is considered cosigned if: A) It was cosigned B) It's the parent of a cosigned block C) It immediately follows a cosigned block and has no events requiring cosigning This only actually performs advancement within a limited bound (generally until it finds a block which should be cosigned). Accordingly, it is necessary to call multiple times even if `latest_number` doesn't change. */ async fn advance_cosign_protocol_inner( db: &mut impl Db, key: &Zeroizing<::F>, serai: &Serai, latest_number: u64, ) -> Result<(), SeraiError> { let mut txn = db.txn(); const INITIAL_INTENDED_COSIGN: u64 = 1; let (last_intended_to_cosign_block, mut skipped_block) = { let intended_cosign = IntendedCosign::get(&txn); // If we haven't prior intended to cosign a block, set the intended cosign to 1 if let Some(intended_cosign) = intended_cosign { intended_cosign } else { IntendedCosign::set_intended_cosign(&mut txn, INITIAL_INTENDED_COSIGN); IntendedCosign::get(&txn).unwrap() } }; // "windows" refers to the window of blocks where even if there's a block which should be // cosigned, it won't be due to proximity due to the prior cosign let mut window_end_exclusive = last_intended_to_cosign_block + COSIGN_DISTANCE; // If we've never triggered a cosign, don't skip any cosigns based on proximity if last_intended_to_cosign_block == INITIAL_INTENDED_COSIGN { window_end_exclusive = 1; } // The consensus rules for this are `last_intended_to_cosign_block + 1` let scan_start_block = last_intended_to_cosign_block + 1; // As a practical optimization, we don't re-scan old blocks since old blocks are independent to // new state let scan_start_block = scan_start_block.max(ScanCosignFrom::get(&txn).unwrap_or(1)); // Check all blocks within the window to see if they should be cosigned // If so, we're skipping them and need to flag them as skipped so that once the window closes, we // do cosign them // We only perform this check if we haven't already marked a block as skipped since the cosign // the skipped block will cause will cosign all other blocks within this window if skipped_block.is_none() { let window_end_inclusive = window_end_exclusive - 1; for b in scan_start_block ..= window_end_inclusive.min(latest_number) { if block_has_events(&mut txn, serai, b).await? == HasEvents::Yes { skipped_block = Some(b); log::debug!("skipping cosigning {b} due to proximity to prior cosign"); IntendedCosign::set_skipped_cosign(&mut txn, b); break; } } } // A block which should be cosigned let mut to_cosign = None; // A list of sets which are cosigning, along with a boolean of if we're in the set let mut cosigning = vec![]; for block in scan_start_block ..= latest_number { let actual_block = serai .finalized_block_by_number(block) .await? .expect("couldn't get block which should've been finalized"); // Save the block number for this block, as needed by the cosigner to perform cosigning SeraiBlockNumber::set(&mut txn, actual_block.hash(), &block); if potentially_cosign_block(&mut txn, serai, block, skipped_block, window_end_exclusive).await? { to_cosign = Some((block, actual_block.hash())); // Get the keys as of the prior block // If this key sets new keys, the coordinator won't acknowledge so until we process this // block // We won't process this block until its co-signed // Using the keys of the prior block ensures this deadlock isn't reached let serai = serai.as_of(actual_block.header.parent_hash.into()); for network in serai_client::primitives::EXTERNAL_NETWORKS { // Get the latest session to have set keys let set_with_keys = { let Some(latest_session) = serai.validator_sets().session(network.into()).await? else { continue; }; let prior_session = Session(latest_session.0.saturating_sub(1)); if serai .validator_sets() .keys(ExternalValidatorSet { network, session: prior_session }) .await? .is_some() { ExternalValidatorSet { network, session: prior_session } } else { let set = ExternalValidatorSet { network, session: latest_session }; if serai.validator_sets().keys(set).await?.is_none() { continue; } set } }; log::debug!("{:?} will be cosigning {block}", set_with_keys.network); cosigning.push((set_with_keys, in_set(key, &serai, set_with_keys.into()).await?.unwrap())); } break; } // If this TX is committed, always start future scanning from the next block ScanCosignFrom::set(&mut txn, &(block + 1)); // Since we're scanning *from* the next block, tidy the cache BlockHasEventsCache::del(&mut txn, block); } if let Some((number, hash)) = to_cosign { // If this block doesn't have cosigners, yet does have events, automatically mark it as // cosigned if cosigning.is_empty() { log::debug!("{} had no cosigners available, marking as cosigned", number); LatestCosignedBlock::set(&mut txn, &number); } else { for (set, in_set) in cosigning { if in_set { log::debug!("cosigning {number} with {:?} {:?}", set.network, set.session); CosignTransactions::append_cosign(&mut txn, set, number, hash); } } } } txn.commit(); Ok(()) } pub async fn advance_cosign_protocol( db: &mut impl Db, key: &Zeroizing<::F>, serai: &Serai, latest_number: u64, ) -> Result<(), SeraiError> { loop { let scan_from = ScanCosignFrom::get(db).unwrap_or(1); // Only scan 1000 blocks at a time to limit a massive txn from forming let scan_to = latest_number.min(scan_from + 1000); advance_cosign_protocol_inner(db, key, serai, scan_to).await?; // If we didn't limit the scan_to, break if scan_to == latest_number { break; } } Ok(()) } ================================================ FILE: coordinator/src/substrate/db.rs ================================================ use serai_client::primitives::ExternalNetworkId; pub use serai_db::*; mod inner_db { use super::*; create_db!( SubstrateDb { NextBlock: () -> u64, HandledEvent: (block: [u8; 32]) -> u32, BatchInstructionsHashDb: (network: ExternalNetworkId, id: u32) -> [u8; 32] } ); } pub(crate) use inner_db::{NextBlock, BatchInstructionsHashDb}; pub struct HandledEvent; impl HandledEvent { fn next_to_handle_event(getter: &impl Get, block: [u8; 32]) -> u32 { inner_db::HandledEvent::get(getter, block).map_or(0, |last| last + 1) } pub fn is_unhandled(getter: &impl Get, block: [u8; 32], event_id: u32) -> bool { let next = Self::next_to_handle_event(getter, block); assert!(next >= event_id); next == event_id } pub fn handle_event(txn: &mut impl DbTxn, block: [u8; 32], index: u32) { assert!(Self::next_to_handle_event(txn, block) == index); inner_db::HandledEvent::set(txn, block, &index); } } ================================================ FILE: coordinator/src/substrate/mod.rs ================================================ use core::{ops::Deref, time::Duration}; use std::{ sync::Arc, collections::{HashSet, HashMap}, }; use zeroize::Zeroizing; use dalek_ff_group::Ristretto; use ciphersuite::{group::GroupEncoding, Ciphersuite}; use serai_client::{ coins::CoinsEvent, in_instructions::InInstructionsEvent, primitives::{BlockHash, ExternalNetworkId}, validator_sets::{ primitives::{ExternalValidatorSet, ValidatorSet}, ValidatorSetsEvent, }, Block, Serai, SeraiError, TemporalSerai, }; use serai_db::DbTxn; use processor_messages::SubstrateContext; use tokio::{sync::mpsc, time::sleep}; use crate::{ Db, processors::Processors, tributary::{TributarySpec, SeraiDkgCompleted}, }; mod db; pub use db::*; mod cosign; pub use cosign::*; async fn in_set( key: &Zeroizing<::F>, serai: &TemporalSerai<'_>, set: ValidatorSet, ) -> Result, SeraiError> { let Some(participants) = serai.validator_sets().participants(set.network).await? else { return Ok(None); }; let key = (Ristretto::generator() * key.deref()).to_bytes(); Ok(Some(participants.iter().any(|(participant, _)| participant.0 == key))) } async fn handle_new_set( txn: &mut D::Transaction<'_>, key: &Zeroizing<::F>, new_tributary_spec: &mpsc::UnboundedSender, serai: &Serai, block: &Block, set: ExternalValidatorSet, ) -> Result<(), SeraiError> { if in_set(key, &serai.as_of(block.hash()), set.into()) .await? .expect("NewSet for set which doesn't exist") { log::info!("present in set {:?}", set); let set_data = { let serai = serai.as_of(block.hash()); let serai = serai.validator_sets(); let set_participants = serai.participants(set.network.into()).await?.expect("NewSet for set which doesn't exist"); set_participants.into_iter().map(|(k, w)| (k, u16::try_from(w).unwrap())).collect::>() }; let time = if let Ok(time) = block.time() { time } else { assert_eq!(block.number(), 0); // Use the next block's time loop { let Ok(Some(res)) = serai.finalized_block_by_number(1).await else { sleep(Duration::from_secs(5)).await; continue; }; break res.time().unwrap(); } }; // The block time is in milliseconds yet the Tributary is in seconds let time = time / 1000; // Since this block is in the past, and Tendermint doesn't play nice with starting chains after // their start time (though it does eventually work), delay the start time by 120 seconds // This is meant to handle ~20 blocks of lack of finalization for this first block const SUBSTRATE_TO_TRIBUTARY_TIME_DELAY: u64 = 120; let time = time + SUBSTRATE_TO_TRIBUTARY_TIME_DELAY; let spec = TributarySpec::new(block.hash(), time, set, set_data); log::info!("creating new tributary for {:?}", spec.set()); // Save it to the database now, not on the channel receiver's side, so this is safe against // reboots // If this txn finishes, and we reboot, then this'll be reloaded from active Tributaries // If this txn doesn't finish, this will be re-fired // If we waited to save to the DB, this txn may be finished, preventing re-firing, yet the // prior fired event may have not been received yet crate::ActiveTributaryDb::add_participating_in_tributary(txn, &spec); new_tributary_spec.send(spec).unwrap(); } else { log::info!("not present in new set {:?}", set); } Ok(()) } async fn handle_batch_and_burns( txn: &mut impl DbTxn, processors: &Pro, serai: &Serai, block: &Block, ) -> Result<(), SeraiError> { // Track which networks had events with a Vec in ordr to preserve the insertion order // While that shouldn't be needed, ensuring order never hurts, and may enable design choices // with regards to Processor <-> Coordinator message passing let mut networks_with_event = vec![]; let mut network_had_event = |burns: &mut HashMap<_, _>, batches: &mut HashMap<_, _>, network| { // Don't insert this network multiple times // A Vec is still used in order to maintain the insertion order if !networks_with_event.contains(&network) { networks_with_event.push(network); burns.insert(network, vec![]); batches.insert(network, vec![]); } }; let mut batch_block = HashMap::new(); let mut batches = HashMap::>::new(); let mut burns = HashMap::new(); let serai = serai.as_of(block.hash()); for batch in serai.in_instructions().batch_events().await? { if let InInstructionsEvent::Batch { network, id, block: network_block, instructions_hash } = batch { network_had_event(&mut burns, &mut batches, network); BatchInstructionsHashDb::set(txn, network, id, &instructions_hash); // Make sure this is the only Batch event for this network in this Block assert!(batch_block.insert(network, network_block).is_none()); // Add the batch included by this block batches.get_mut(&network).unwrap().push(id); } else { panic!("Batch event wasn't Batch: {batch:?}"); } } for burn in serai.coins().burn_with_instruction_events().await? { if let CoinsEvent::BurnWithInstruction { from: _, instruction } = burn { let network = instruction.balance.coin.network(); network_had_event(&mut burns, &mut batches, network); // network_had_event should register an entry in burns burns.get_mut(&network).unwrap().push(instruction); } else { panic!("Burn event wasn't Burn: {burn:?}"); } } assert_eq!(HashSet::<&_>::from_iter(networks_with_event.iter()).len(), networks_with_event.len()); for network in networks_with_event { let network_latest_finalized_block = if let Some(block) = batch_block.remove(&network) { block } else { // If it's had a batch or a burn, it must have had a block acknowledged serai .in_instructions() .latest_block_for_network(network) .await? .expect("network had a batch/burn yet never set a latest block") }; processors .send( network, processor_messages::substrate::CoordinatorMessage::SubstrateBlock { context: SubstrateContext { serai_time: block.time().unwrap() / 1000, network_latest_finalized_block, }, block: block.number(), burns: burns.remove(&network).unwrap(), batches: batches.remove(&network).unwrap(), }, ) .await; } Ok(()) } // Handle a specific Substrate block, returning an error when it fails to get data // (not blocking / holding) #[allow(clippy::too_many_arguments)] async fn handle_block( db: &mut D, key: &Zeroizing<::F>, new_tributary_spec: &mpsc::UnboundedSender, perform_slash_report: &mpsc::UnboundedSender, tributary_retired: &mpsc::UnboundedSender, processors: &Pro, serai: &Serai, block: Block, ) -> Result<(), SeraiError> { let hash = block.hash(); // Define an indexed event ID. let mut event_id = 0; // If a new validator set was activated, create tributary/inform processor to do a DKG for new_set in serai.as_of(hash).validator_sets().new_set_events().await? { // Individually mark each event as handled so on reboot, we minimize duplicates // Additionally, if the Serai connection also fails 1/100 times, this means a block with 1000 // events will successfully be incrementally handled // (though the Serai connection should be stable, making this unnecessary) let ValidatorSetsEvent::NewSet { set } = new_set else { panic!("NewSet event wasn't NewSet: {new_set:?}"); }; // We only coordinate/process external networks let Ok(set) = ExternalValidatorSet::try_from(set) else { continue }; if HandledEvent::is_unhandled(db, hash, event_id) { log::info!("found fresh new set event {:?}", new_set); let mut txn = db.txn(); handle_new_set::(&mut txn, key, new_tributary_spec, serai, &block, set).await?; HandledEvent::handle_event(&mut txn, hash, event_id); txn.commit(); } event_id += 1; } // If a key pair was confirmed, inform the processor for key_gen in serai.as_of(hash).validator_sets().key_gen_events().await? { if HandledEvent::is_unhandled(db, hash, event_id) { log::info!("found fresh key gen event {:?}", key_gen); let ValidatorSetsEvent::KeyGen { set, key_pair } = key_gen else { panic!("KeyGen event wasn't KeyGen: {key_gen:?}"); }; let substrate_key = key_pair.0 .0; processors .send( set.network, processor_messages::substrate::CoordinatorMessage::ConfirmKeyPair { context: SubstrateContext { serai_time: block.time().unwrap() / 1000, network_latest_finalized_block: serai .as_of(block.hash()) .in_instructions() .latest_block_for_network(set.network) .await? // The processor treats this as a magic value which will cause it to find a network // block which has a time greater than or equal to the Serai time .unwrap_or(BlockHash([0; 32])), }, session: set.session, key_pair, }, ) .await; // TODO: If we were in the set, yet were removed, drop the tributary let mut txn = db.txn(); SeraiDkgCompleted::set(&mut txn, set, &substrate_key); HandledEvent::handle_event(&mut txn, hash, event_id); txn.commit(); } event_id += 1; } for accepted_handover in serai.as_of(hash).validator_sets().accepted_handover_events().await? { let ValidatorSetsEvent::AcceptedHandover { set } = accepted_handover else { panic!("AcceptedHandover event wasn't AcceptedHandover: {accepted_handover:?}"); }; let Ok(set) = ExternalValidatorSet::try_from(set) else { continue }; if HandledEvent::is_unhandled(db, hash, event_id) { log::info!("found fresh accepted handover event {:?}", accepted_handover); // TODO: This isn't atomic with the event handling // Send a oneshot receiver so we can await the response? perform_slash_report.send(set).unwrap(); let mut txn = db.txn(); HandledEvent::handle_event(&mut txn, hash, event_id); txn.commit(); } event_id += 1; } for retired_set in serai.as_of(hash).validator_sets().set_retired_events().await? { let ValidatorSetsEvent::SetRetired { set } = retired_set else { panic!("SetRetired event wasn't SetRetired: {retired_set:?}"); }; let Ok(set) = ExternalValidatorSet::try_from(set) else { continue }; if HandledEvent::is_unhandled(db, hash, event_id) { log::info!("found fresh set retired event {:?}", retired_set); let mut txn = db.txn(); crate::ActiveTributaryDb::retire_tributary(&mut txn, set); tributary_retired.send(set).unwrap(); HandledEvent::handle_event(&mut txn, hash, event_id); txn.commit(); } event_id += 1; } // Finally, tell the processor of acknowledged blocks/burns // This uses a single event as unlike prior events which individually executed code, all // following events share data collection if HandledEvent::is_unhandled(db, hash, event_id) { let mut txn = db.txn(); handle_batch_and_burns(&mut txn, processors, serai, &block).await?; HandledEvent::handle_event(&mut txn, hash, event_id); txn.commit(); } Ok(()) } #[allow(clippy::too_many_arguments)] async fn handle_new_blocks( db: &mut D, key: &Zeroizing<::F>, new_tributary_spec: &mpsc::UnboundedSender, perform_slash_report: &mpsc::UnboundedSender, tributary_retired: &mpsc::UnboundedSender, processors: &Pro, serai: &Serai, next_block: &mut u64, ) -> Result<(), SeraiError> { // Check if there's been a new Substrate block let latest_number = serai.latest_finalized_block().await?.number(); // Advance the cosigning protocol advance_cosign_protocol(db, key, serai, latest_number).await?; // Reduce to the latest cosigned block let latest_number = latest_number.min(LatestCosignedBlock::latest_cosigned_block(db)); if latest_number < *next_block { return Ok(()); } for b in *next_block ..= latest_number { let block = serai .finalized_block_by_number(b) .await? .expect("couldn't get block before the latest finalized block"); log::info!("handling substrate block {b}"); handle_block( db, key, new_tributary_spec, perform_slash_report, tributary_retired, processors, serai, block, ) .await?; *next_block += 1; let mut txn = db.txn(); NextBlock::set(&mut txn, next_block); txn.commit(); log::info!("handled substrate block {b}"); } Ok(()) } pub async fn scan_task( mut db: D, key: Zeroizing<::F>, processors: Pro, serai: Arc, new_tributary_spec: mpsc::UnboundedSender, perform_slash_report: mpsc::UnboundedSender, tributary_retired: mpsc::UnboundedSender, ) { log::info!("scanning substrate"); let mut next_substrate_block = NextBlock::get(&db).unwrap_or_default(); /* let new_substrate_block_notifier = { let serai = &serai; move || async move { loop { match serai.newly_finalized_block().await { Ok(sub) => return sub, Err(e) => { log::error!("couldn't communicate with serai node: {e}"); sleep(Duration::from_secs(5)).await; } } } } }; */ // TODO: Restore the above subscription-based system // That would require moving serai-client from HTTP to websockets let new_substrate_block_notifier = { let serai = &serai; move |next_substrate_block| async move { loop { match serai.latest_finalized_block().await { Ok(latest) => { if latest.header.number >= next_substrate_block { return latest; } sleep(Duration::from_secs(3)).await; } Err(e) => { log::error!("couldn't communicate with serai node: {e}"); sleep(Duration::from_secs(5)).await; } } } } }; loop { // await the next block, yet if our notifier had an error, re-create it { let Ok(_) = tokio::time::timeout( Duration::from_secs(60), new_substrate_block_notifier(next_substrate_block), ) .await else { // Timed out, which may be because Serai isn't finalizing or may be some issue with the // notifier if serai.latest_finalized_block().await.map(|block| block.number()).ok() == Some(next_substrate_block.saturating_sub(1)) { log::info!("serai hasn't finalized a block in the last 60s..."); } continue; }; /* // next_block is a Option if next_block.and_then(Result::ok).is_none() { substrate_block_notifier = new_substrate_block_notifier(next_substrate_block); continue; } */ } match handle_new_blocks( &mut db, &key, &new_tributary_spec, &perform_slash_report, &tributary_retired, &processors, &serai, &mut next_substrate_block, ) .await { Ok(()) => {} Err(e) => { log::error!("couldn't communicate with serai node: {e}"); sleep(Duration::from_secs(5)).await; } } } } /// Gets the expected ID for the next Batch. /// /// Will log an error and apply a slight sleep on error, letting the caller simply immediately /// retry. pub(crate) async fn expected_next_batch( serai: &Serai, network: ExternalNetworkId, ) -> Result { async fn expected_next_batch_inner( serai: &Serai, network: ExternalNetworkId, ) -> Result { let serai = serai.as_of_latest_finalized_block().await?; let last = serai.in_instructions().last_batch_for_network(network).await?; Ok(if let Some(last) = last { last + 1 } else { 0 }) } match expected_next_batch_inner(serai, network).await { Ok(next) => Ok(next), Err(e) => { log::error!("couldn't get the expected next batch from substrate: {e:?}"); sleep(Duration::from_millis(100)).await; Err(e) } } } /// Verifies `Batch`s which have already been indexed from Substrate. /// /// Spins if a distinct `Batch` is detected on-chain. /// /// This has a slight malleability in that doesn't verify *who* published a `Batch` is as expected. /// This is deemed fine. pub(crate) async fn verify_published_batches( txn: &mut D::Transaction<'_>, network: ExternalNetworkId, optimistic_up_to: u32, ) -> Option { // TODO: Localize from MainDb to SubstrateDb let last = crate::LastVerifiedBatchDb::get(txn, network); for id in last.map_or(0, |last| last + 1) ..= optimistic_up_to { let Some(on_chain) = BatchInstructionsHashDb::get(txn, network, id) else { break; }; let off_chain = crate::ExpectedBatchDb::get(txn, network, id).unwrap(); if on_chain != off_chain { // Halt operations on this network and spin, as this is a critical fault loop { log::error!( "{}! network: {:?} id: {} off-chain: {} on-chain: {}", "on-chain batch doesn't match off-chain", network, id, hex::encode(off_chain), hex::encode(on_chain), ); sleep(Duration::from_secs(60)).await; } } crate::LastVerifiedBatchDb::set(txn, network, &id); } crate::LastVerifiedBatchDb::get(txn, network) } ================================================ FILE: coordinator/src/tests/mod.rs ================================================ use core::fmt::Debug; use std::{ sync::Arc, collections::{VecDeque, HashSet, HashMap}, }; use serai_client::{primitives::ExternalNetworkId, validator_sets::primitives::ExternalValidatorSet}; use processor_messages::CoordinatorMessage; use async_trait::async_trait; use tokio::sync::RwLock; use crate::{ processors::{Message, Processors}, TributaryP2p, ReqResMessageKind, GossipMessageKind, P2pMessageKind, Message as P2pMessage, P2p, }; pub mod tributary; #[derive(Clone)] pub struct MemProcessors(pub Arc>>>); impl MemProcessors { #[allow(clippy::new_without_default)] pub fn new() -> MemProcessors { MemProcessors(Arc::new(RwLock::new(HashMap::new()))) } } #[async_trait::async_trait] impl Processors for MemProcessors { async fn send(&self, network: ExternalNetworkId, msg: impl Send + Into) { let mut processors = self.0.write().await; let processor = processors.entry(network).or_insert_with(VecDeque::new); processor.push_back(msg.into()); } async fn recv(&self, _: ExternalNetworkId) -> Message { todo!() } async fn ack(&self, _: Message) { todo!() } } #[allow(clippy::type_complexity)] #[derive(Clone, Debug)] pub struct LocalP2p( usize, pub Arc>, Vec)>>)>>, ); impl LocalP2p { pub fn new(validators: usize) -> Vec { let shared = Arc::new(RwLock::new((HashSet::new(), vec![VecDeque::new(); validators]))); let mut res = vec![]; for i in 0 .. validators { res.push(LocalP2p(i, shared.clone())); } res } } #[async_trait] impl P2p for LocalP2p { type Id = usize; async fn subscribe(&self, _set: ExternalValidatorSet, _genesis: [u8; 32]) {} async fn unsubscribe(&self, _set: ExternalValidatorSet, _genesis: [u8; 32]) {} async fn send_raw(&self, to: Self::Id, msg: Vec) { let mut msg_ref = msg.as_slice(); let kind = ReqResMessageKind::read(&mut msg_ref).unwrap(); self.1.write().await.1[to].push_back((self.0, P2pMessageKind::ReqRes(kind), msg_ref.to_vec())); } async fn broadcast_raw(&self, kind: P2pMessageKind, msg: Vec) { // Content-based deduplication let mut lock = self.1.write().await; { let already_sent = &mut lock.0; if already_sent.contains(&msg) { return; } already_sent.insert(msg.clone()); } let queues = &mut lock.1; let kind_len = (match kind { P2pMessageKind::ReqRes(kind) => kind.serialize(), P2pMessageKind::Gossip(kind) => kind.serialize(), }) .len(); let msg = msg[kind_len ..].to_vec(); for (i, msg_queue) in queues.iter_mut().enumerate() { if i == self.0 { continue; } msg_queue.push_back((self.0, kind, msg.clone())); } } async fn receive(&self) -> P2pMessage { // This is a cursed way to implement an async read from a Vec loop { if let Some((sender, kind, msg)) = self.1.write().await.1[self.0].pop_front() { return P2pMessage { sender, kind, msg }; } tokio::time::sleep(std::time::Duration::from_millis(100)).await; } } } #[async_trait] impl TributaryP2p for LocalP2p { async fn broadcast(&self, genesis: [u8; 32], msg: Vec) { ::broadcast( self, P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)), msg, ) .await } } ================================================ FILE: coordinator/src/tests/tributary/chain.rs ================================================ use std::{ time::{Duration, SystemTime}, collections::HashSet, }; use zeroize::Zeroizing; use rand_core::{RngCore, CryptoRng, OsRng}; use futures_util::{task::Poll, poll}; use dalek_ff_group::Ristretto; use ciphersuite::{ group::{ff::Field, GroupEncoding}, Ciphersuite, }; use sp_application_crypto::sr25519; use borsh::BorshDeserialize; use serai_client::{ primitives::ExternalNetworkId, validator_sets::primitives::{ExternalValidatorSet, Session}, }; use tokio::time::sleep; use serai_db::MemDb; use tributary::Tributary; use crate::{ GossipMessageKind, P2pMessageKind, P2p, tributary::{Transaction, TributarySpec}, tests::LocalP2p, }; pub fn new_keys( rng: &mut R, ) -> Vec::F>> { let mut keys = vec![]; for _ in 0 .. 5 { keys.push(Zeroizing::new(::F::random(&mut *rng))); } keys } pub fn new_spec( rng: &mut R, keys: &[Zeroizing<::F>], ) -> TributarySpec { let mut serai_block = [0; 32]; rng.fill_bytes(&mut serai_block); let start_time = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs(); let set = ExternalValidatorSet { session: Session(0), network: ExternalNetworkId::Bitcoin }; let set_participants = keys .iter() .map(|key| { (sr25519::Public::from((::generator() * **key).to_bytes()), 1) }) .collect::>(); let res = TributarySpec::new(serai_block, start_time, set, set_participants); assert_eq!( TributarySpec::deserialize_reader(&mut borsh::to_vec(&res).unwrap().as_slice()).unwrap(), res, ); res } pub async fn new_tributaries( keys: &[Zeroizing<::F>], spec: &TributarySpec, ) -> Vec<(MemDb, LocalP2p, Tributary)> { let p2p = LocalP2p::new(keys.len()); let mut res = vec![]; for (i, key) in keys.iter().enumerate() { let db = MemDb::new(); res.push(( db.clone(), p2p[i].clone(), Tributary::<_, Transaction, _>::new( db, spec.genesis(), spec.start_time(), key.clone(), spec.validators(), p2p[i].clone(), ) .await .unwrap(), )); } res } pub async fn run_tributaries( mut tributaries: Vec<(LocalP2p, Tributary)>, ) { loop { for (p2p, tributary) in &mut tributaries { while let Poll::Ready(msg) = poll!(p2p.receive()) { match msg.kind { P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => { assert_eq!(genesis, tributary.genesis()); if tributary.handle_message(&msg.msg).await { p2p.broadcast(msg.kind, msg.msg).await; } } _ => panic!("unexpected p2p message found"), } } } sleep(Duration::from_millis(100)).await; } } pub async fn wait_for_tx_inclusion( tributary: &Tributary, mut last_checked: [u8; 32], hash: [u8; 32], ) -> [u8; 32] { let reader = tributary.reader(); loop { let tip = tributary.tip().await; if tip == last_checked { sleep(Duration::from_secs(1)).await; continue; } let mut queue = vec![reader.block(&tip).unwrap()]; let mut block = None; while { let parent = queue.last().unwrap().parent(); if parent == tributary.genesis() { false } else { block = Some(reader.block(&parent).unwrap()); block.as_ref().unwrap().hash() != last_checked } } { queue.push(block.take().unwrap()); } while let Some(block) = queue.pop() { for tx in &block.transactions { if tx.hash() == hash { return block.hash(); } } } last_checked = tip; } } #[tokio::test] async fn tributary_test() { let keys = new_keys(&mut OsRng); let spec = new_spec(&mut OsRng, &keys); let mut tributaries = new_tributaries(&keys, &spec) .await .into_iter() .map(|(_, p2p, tributary)| (p2p, tributary)) .collect::>(); let mut blocks = 0; let mut last_block = spec.genesis(); // Doesn't use run_tributaries as we want to wind these down at a certain point // run_tributaries will run them ad infinitum let timeout = SystemTime::now() + Duration::from_secs(65); while (blocks < 10) && (SystemTime::now().duration_since(timeout).is_err()) { for (p2p, tributary) in &mut tributaries { while let Poll::Ready(msg) = poll!(p2p.receive()) { match msg.kind { P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => { assert_eq!(genesis, tributary.genesis()); tributary.handle_message(&msg.msg).await; } _ => panic!("unexpected p2p message found"), } } } let tip = tributaries[0].1.tip().await; if tip != last_block { last_block = tip; blocks += 1; } sleep(Duration::from_millis(100)).await; } if blocks != 10 { panic!("tributary chain test hit timeout"); } // Handle all existing messages for (p2p, tributary) in &mut tributaries { while let Poll::Ready(msg) = poll!(p2p.receive()) { match msg.kind { P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => { assert_eq!(genesis, tributary.genesis()); tributary.handle_message(&msg.msg).await; } _ => panic!("unexpected p2p message found"), } } } // handle_message informed the Tendermint machine, yet it still has to process it // Sleep for a second accordingly // TODO: Is there a better way to handle this? sleep(Duration::from_secs(1)).await; // All tributaries should agree on the tip, within a block let mut tips = HashSet::new(); for (_, tributary) in &tributaries { tips.insert(tributary.tip().await); } assert!(tips.len() <= 2); if tips.len() == 2 { for tip in &tips { // Find a Tributary where this isn't the tip for (_, tributary) in &tributaries { let Some(after) = tributary.reader().block_after(tip) else { continue }; // Make sure the block after is the other tip assert!(tips.contains(&after)); return; } } } else { assert_eq!(tips.len(), 1); return; } panic!("tributary had different tip with a variance exceeding one block"); } ================================================ FILE: coordinator/src/tests/tributary/dkg.rs ================================================ use core::time::Duration; use std::collections::HashMap; use zeroize::Zeroizing; use rand_core::{RngCore, OsRng}; use dalek_ff_group::Ristretto; use ciphersuite::{group::GroupEncoding, Ciphersuite}; use frost::Participant; use sp_runtime::traits::Verify; use serai_client::{ primitives::{SeraiAddress, Signature}, validator_sets::primitives::{ExternalValidatorSet, KeyPair}, }; use tokio::time::sleep; use serai_db::{Get, DbTxn, Db, MemDb}; use processor_messages::{ key_gen::{self, KeyGenId}, CoordinatorMessage, }; use tributary::{TransactionTrait, Tributary}; use crate::{ tributary::{ Transaction, TributarySpec, scanner::{PublishSeraiTransaction, handle_new_blocks}, }, tests::{ MemProcessors, LocalP2p, tributary::{new_keys, new_spec, new_tributaries, run_tributaries, wait_for_tx_inclusion}, }, }; #[tokio::test] async fn dkg_test() { env_logger::init(); let keys = new_keys(&mut OsRng); let spec = new_spec(&mut OsRng, &keys); let full_tributaries = new_tributaries(&keys, &spec).await; let mut dbs = vec![]; let mut tributaries = vec![]; for (db, p2p, tributary) in full_tributaries { dbs.push(db); tributaries.push((p2p, tributary)); } // Run the tributaries in the background tokio::spawn(run_tributaries(tributaries.clone())); let mut txs = vec![]; // Create DKG commitments for each key for key in &keys { let attempt = 0; let mut commitments = vec![0; 256]; OsRng.fill_bytes(&mut commitments); let mut tx = Transaction::DkgCommitments { attempt, commitments: vec![commitments], signed: Transaction::empty_signed(), }; tx.sign(&mut OsRng, spec.genesis(), key); txs.push(tx); } let block_before_tx = tributaries[0].1.tip().await; // Publish all commitments but one for (i, tx) in txs.iter().enumerate().skip(1) { assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true)); } // Wait until these are included for tx in txs.iter().skip(1) { wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await; } let expected_commitments: HashMap<_, _> = txs .iter() .enumerate() .map(|(i, tx)| { if let Transaction::DkgCommitments { commitments, .. } = tx { (Participant::new((i + 1).try_into().unwrap()).unwrap(), commitments[0].clone()) } else { panic!("txs had non-commitments"); } }) .collect(); async fn new_processors( db: &mut MemDb, key: &Zeroizing<::F>, spec: &TributarySpec, tributary: &Tributary, ) -> MemProcessors { let processors = MemProcessors::new(); handle_new_blocks::<_, _, _, _, _, LocalP2p>( db, key, &|_, _, _, _| async { panic!("provided TX caused recognized_id to be called in new_processors") }, &processors, &(), &|_| async { panic!( "test tried to publish a new Tributary TX from handle_application_tx in new_processors" ) }, spec, &tributary.reader(), ) .await; processors } // Instantiate a scanner and verify it has nothing to report let processors = new_processors(&mut dbs[0], &keys[0], &spec, &tributaries[0].1).await; assert!(processors.0.read().await.is_empty()); // Publish the last commitment let block_before_tx = tributaries[0].1.tip().await; assert_eq!(tributaries[0].1.add_transaction(txs[0].clone()).await, Ok(true)); wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, txs[0].hash()).await; sleep(Duration::from_secs(Tributary::::block_time().into())).await; // Verify the scanner emits a KeyGen::Commitments message handle_new_blocks::<_, _, _, _, _, LocalP2p>( &mut dbs[0], &keys[0], &|_, _, _, _| async { panic!("provided TX caused recognized_id to be called after Commitments") }, &processors, &(), &|_| async { panic!( "test tried to publish a new Tributary TX from handle_application_tx after Commitments" ) }, &spec, &tributaries[0].1.reader(), ) .await; { let mut msgs = processors.0.write().await; assert_eq!(msgs.len(), 1); let msgs = msgs.get_mut(&spec.set().network).unwrap(); let mut expected_commitments = expected_commitments.clone(); expected_commitments.remove(&Participant::new((1).try_into().unwrap()).unwrap()); assert_eq!( msgs.pop_front().unwrap(), CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments { id: KeyGenId { session: spec.set().session, attempt: 0 }, commitments: expected_commitments }) ); assert!(msgs.is_empty()); } // Verify all keys exhibit this scanner behavior for (i, key) in keys.iter().enumerate().skip(1) { let processors = new_processors(&mut dbs[i], key, &spec, &tributaries[i].1).await; let mut msgs = processors.0.write().await; assert_eq!(msgs.len(), 1); let msgs = msgs.get_mut(&spec.set().network).unwrap(); let mut expected_commitments = expected_commitments.clone(); expected_commitments.remove(&Participant::new((i + 1).try_into().unwrap()).unwrap()); assert_eq!( msgs.pop_front().unwrap(), CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments { id: KeyGenId { session: spec.set().session, attempt: 0 }, commitments: expected_commitments }) ); assert!(msgs.is_empty()); } // Now do shares let mut txs = vec![]; for (k, key) in keys.iter().enumerate() { let attempt = 0; let mut shares = vec![vec![]]; for i in 0 .. keys.len() { if i != k { let mut share = vec![0; 256]; OsRng.fill_bytes(&mut share); shares.last_mut().unwrap().push(share); } } let mut txn = dbs[k].txn(); let mut tx = Transaction::DkgShares { attempt, shares, confirmation_nonces: crate::tributary::dkg_confirmation_nonces(key, &spec, &mut txn, 0), signed: Transaction::empty_signed(), }; txn.commit(); tx.sign(&mut OsRng, spec.genesis(), key); txs.push(tx); } let block_before_tx = tributaries[0].1.tip().await; for (i, tx) in txs.iter().enumerate().skip(1) { assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true)); } for tx in txs.iter().skip(1) { wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await; } // With just 4 sets of shares, nothing should happen yet handle_new_blocks::<_, _, _, _, _, LocalP2p>( &mut dbs[0], &keys[0], &|_, _, _, _| async { panic!("provided TX caused recognized_id to be called after some shares") }, &processors, &(), &|_| async { panic!( "test tried to publish a new Tributary TX from handle_application_tx after some shares" ) }, &spec, &tributaries[0].1.reader(), ) .await; assert_eq!(processors.0.read().await.len(), 1); assert!(processors.0.read().await[&spec.set().network].is_empty()); // Publish the final set of shares let block_before_tx = tributaries[0].1.tip().await; assert_eq!(tributaries[0].1.add_transaction(txs[0].clone()).await, Ok(true)); wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, txs[0].hash()).await; sleep(Duration::from_secs(Tributary::::block_time().into())).await; // Each scanner should emit a distinct shares message let shares_for = |i: usize| { CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Shares { id: KeyGenId { session: spec.set().session, attempt: 0 }, shares: vec![txs .iter() .enumerate() .filter_map(|(l, tx)| { if let Transaction::DkgShares { shares, .. } = tx { if i == l { None } else { let relative_i = i - (if i > l { 1 } else { 0 }); Some(( Participant::new((l + 1).try_into().unwrap()).unwrap(), shares[0][relative_i].clone(), )) } } else { panic!("txs had non-shares"); } }) .collect::>()], }) }; // Any scanner which has handled the prior blocks should only emit the new event for (i, key) in keys.iter().enumerate() { handle_new_blocks::<_, _, _, _, _, LocalP2p>( &mut dbs[i], key, &|_, _, _, _| async { panic!("provided TX caused recognized_id to be called after shares") }, &processors, &(), &|_| async { panic!("test tried to publish a new Tributary TX from handle_application_tx") }, &spec, &tributaries[i].1.reader(), ) .await; { let mut msgs = processors.0.write().await; assert_eq!(msgs.len(), 1); let msgs = msgs.get_mut(&spec.set().network).unwrap(); assert_eq!(msgs.pop_front().unwrap(), shares_for(i)); assert!(msgs.is_empty()); } } // Yet new scanners should emit all events for (i, key) in keys.iter().enumerate() { let processors = new_processors(&mut MemDb::new(), key, &spec, &tributaries[i].1).await; let mut msgs = processors.0.write().await; assert_eq!(msgs.len(), 1); let msgs = msgs.get_mut(&spec.set().network).unwrap(); let mut expected_commitments = expected_commitments.clone(); expected_commitments.remove(&Participant::new((i + 1).try_into().unwrap()).unwrap()); assert_eq!( msgs.pop_front().unwrap(), CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments { id: KeyGenId { session: spec.set().session, attempt: 0 }, commitments: expected_commitments }) ); assert_eq!(msgs.pop_front().unwrap(), shares_for(i)); assert!(msgs.is_empty()); } // Send DkgConfirmed let mut substrate_key = [0; 32]; OsRng.fill_bytes(&mut substrate_key); let mut network_key = vec![0; usize::try_from((OsRng.next_u64() % 32) + 32).unwrap()]; OsRng.fill_bytes(&mut network_key); let key_pair = KeyPair(serai_client::Public::from(substrate_key), network_key.try_into().unwrap()); let mut txs = vec![]; for (i, key) in keys.iter().enumerate() { let attempt = 0; let mut txn = dbs[i].txn(); let share = crate::tributary::generated_key_pair::(&mut txn, key, &spec, &key_pair, 0).unwrap(); txn.commit(); let mut tx = Transaction::DkgConfirmed { attempt, confirmation_share: share, signed: Transaction::empty_signed(), }; tx.sign(&mut OsRng, spec.genesis(), key); txs.push(tx); } let block_before_tx = tributaries[0].1.tip().await; for (i, tx) in txs.iter().enumerate() { assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true)); } for tx in &txs { wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await; } struct CheckPublishSetKeys { spec: TributarySpec, key_pair: KeyPair, } #[async_trait::async_trait] impl PublishSeraiTransaction for CheckPublishSetKeys { async fn publish_set_keys( &self, _db: &(impl Sync + Get), set: ExternalValidatorSet, removed: Vec, key_pair: KeyPair, signature: Signature, ) { assert_eq!(set, self.spec.set()); assert!(removed.is_empty()); assert_eq!(self.key_pair, key_pair); assert!(signature.verify( &*serai_client::validator_sets::primitives::set_keys_message(&set, &[], &key_pair), &serai_client::Public::from( dkg_musig::musig_key_vartime::( serai_client::validator_sets::primitives::musig_context(set.into()), &self.spec.validators().into_iter().map(|(validator, _)| validator).collect::>() ) .unwrap() .to_bytes() ), )); } } // The scanner should successfully try to publish a transaction with a validly signed signature handle_new_blocks::<_, _, _, _, _, LocalP2p>( &mut dbs[0], &keys[0], &|_, _, _, _| async { panic!("provided TX caused recognized_id to be called after DKG confirmation") }, &processors, &CheckPublishSetKeys { spec: spec.clone(), key_pair: key_pair.clone() }, &|_| async { panic!("test tried to publish a new Tributary TX from handle_application_tx") }, &spec, &tributaries[0].1.reader(), ) .await; { assert!(processors.0.read().await.get(&spec.set().network).unwrap().is_empty()); } } ================================================ FILE: coordinator/src/tests/tributary/handle_p2p.rs ================================================ use core::time::Duration; use std::sync::Arc; use rand_core::OsRng; use tokio::{ sync::{mpsc, broadcast}, time::sleep, }; use serai_db::MemDb; use tributary::Tributary; use crate::{ tributary::Transaction, ActiveTributary, TributaryEvent, p2p::handle_p2p_task, tests::{ LocalP2p, tributary::{new_keys, new_spec, new_tributaries}, }, }; #[tokio::test] async fn handle_p2p_test() { let keys = new_keys(&mut OsRng); let spec = new_spec(&mut OsRng, &keys); let mut tributaries = new_tributaries(&keys, &spec) .await .into_iter() .map(|(_, p2p, tributary)| (p2p, tributary)) .collect::>(); let mut tributary_senders = vec![]; let mut tributary_arcs = vec![]; for (p2p, tributary) in tributaries.drain(..) { let tributary = Arc::new(tributary); tributary_arcs.push(tributary.clone()); let (new_tributary_send, new_tributary_recv) = broadcast::channel(5); let (cosign_send, _) = mpsc::unbounded_channel(); tokio::spawn(handle_p2p_task(p2p, cosign_send, new_tributary_recv)); new_tributary_send .send(TributaryEvent::NewTributary(ActiveTributary { spec: spec.clone(), tributary })) .map_err(|_| "failed to send ActiveTributary") .unwrap(); tributary_senders.push(new_tributary_send); } let tributaries = tributary_arcs; // After two blocks of time, we should have a new block // We don't wait one block of time as we may have missed the chance for this block sleep(Duration::from_secs((2 * Tributary::::block_time()).into())) .await; let tip = tributaries[0].tip().await; assert!(tip != spec.genesis()); // Sleep one second to make sure this block propagates sleep(Duration::from_secs(1)).await; // Make sure every tributary has it for tributary in &tributaries { assert!(tributary.reader().block(&tip).is_some()); } // Then after another block of time, we should have yet another new block sleep(Duration::from_secs(Tributary::::block_time().into())).await; let new_tip = tributaries[0].tip().await; assert!(new_tip != tip); sleep(Duration::from_secs(1)).await; for tributary in tributaries { assert!(tributary.reader().block(&new_tip).is_some()); } } ================================================ FILE: coordinator/src/tests/tributary/mod.rs ================================================ use core::fmt::Debug; use rand_core::{RngCore, OsRng}; use dalek_ff_group::Ristretto; use ciphersuite::{group::Group, Ciphersuite}; use scale::{Encode, Decode}; use serai_client::{ primitives::{SeraiAddress, Signature}, validator_sets::primitives::{ExternalValidatorSet, KeyPair, MAX_KEY_SHARES_PER_SET}, }; use processor_messages::coordinator::SubstrateSignableId; use tributary::{ReadWrite, tests::random_signed_with_nonce}; use crate::tributary::{Label, SignData, Transaction, scanner::PublishSeraiTransaction}; mod chain; pub use chain::*; mod tx; mod dkg; // TODO: Test the other transactions mod handle_p2p; mod sync; #[async_trait::async_trait] impl PublishSeraiTransaction for () { async fn publish_set_keys( &self, _db: &(impl Sync + serai_db::Get), _set: ExternalValidatorSet, _removed: Vec, _key_pair: KeyPair, _signature: Signature, ) { panic!("publish_set_keys was called in test") } } fn random_u32(rng: &mut R) -> u32 { u32::try_from(rng.next_u64() >> 32).unwrap() } fn random_vec(rng: &mut R, limit: usize) -> Vec { let len = usize::try_from(rng.next_u64() % u64::try_from(limit).unwrap()).unwrap(); let mut res = vec![0; len]; rng.fill_bytes(&mut res); res } fn random_sign_data( rng: &mut R, plan: Id, label: Label, ) -> SignData { SignData { plan, attempt: random_u32(&mut OsRng), label, data: { let mut res = vec![]; for _ in 0 ..= (rng.next_u64() % 255) { res.push(random_vec(&mut OsRng, 512)); } res }, signed: random_signed_with_nonce(&mut OsRng, label.nonce()), } } fn test_read_write(value: &RW) { assert_eq!(value, &RW::read::<&[u8]>(&mut value.serialize().as_ref()).unwrap()); } #[test] fn tx_size_limit() { use serai_client::validator_sets::primitives::MAX_KEY_LEN; use tributary::TRANSACTION_SIZE_LIMIT; let max_dkg_coefficients = (MAX_KEY_SHARES_PER_SET * 2).div_ceil(3) + 1; let max_key_shares_per_individual = MAX_KEY_SHARES_PER_SET - max_dkg_coefficients; // Handwave the DKG Commitments size as the size of the commitments to the coefficients and // 1024 bytes for all overhead let handwaved_dkg_commitments_size = (max_dkg_coefficients * MAX_KEY_LEN) + 1024; assert!( u32::try_from(TRANSACTION_SIZE_LIMIT).unwrap() >= (handwaved_dkg_commitments_size * max_key_shares_per_individual) ); // Encryption key, PoP (2 elements), message let elements_per_share = 4; let handwaved_dkg_shares_size = (elements_per_share * MAX_KEY_LEN * MAX_KEY_SHARES_PER_SET) + 1024; assert!( u32::try_from(TRANSACTION_SIZE_LIMIT).unwrap() >= (handwaved_dkg_shares_size * max_key_shares_per_individual) ); } #[test] fn serialize_sign_data() { fn test_read_write(value: &SignData) { let mut buf = vec![]; value.write(&mut buf).unwrap(); assert_eq!(value, &SignData::read(&mut buf.as_slice()).unwrap()) } let mut plan = [0; 3]; OsRng.fill_bytes(&mut plan); test_read_write(&random_sign_data::<_, _>( &mut OsRng, plan, if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share }, )); let mut plan = [0; 5]; OsRng.fill_bytes(&mut plan); test_read_write(&random_sign_data::<_, _>( &mut OsRng, plan, if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share }, )); let mut plan = [0; 8]; OsRng.fill_bytes(&mut plan); test_read_write(&random_sign_data::<_, _>( &mut OsRng, plan, if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share }, )); let mut plan = [0; 24]; OsRng.fill_bytes(&mut plan); test_read_write(&random_sign_data::<_, _>( &mut OsRng, plan, if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share }, )); } #[test] fn serialize_transaction() { test_read_write(&Transaction::RemoveParticipantDueToDkg { participant: ::G::random(&mut OsRng), signed: random_signed_with_nonce(&mut OsRng, 0), }); { let mut commitments = vec![random_vec(&mut OsRng, 512)]; for _ in 0 .. (OsRng.next_u64() % 100) { let mut temp = commitments[0].clone(); OsRng.fill_bytes(&mut temp); commitments.push(temp); } test_read_write(&Transaction::DkgCommitments { attempt: random_u32(&mut OsRng), commitments, signed: random_signed_with_nonce(&mut OsRng, 0), }); } { // This supports a variable share length, and variable amount of sent shares, yet share length // and sent shares is expected to be constant among recipients let share_len = usize::try_from((OsRng.next_u64() % 512) + 1).unwrap(); let amount_of_shares = usize::try_from((OsRng.next_u64() % 3) + 1).unwrap(); // Create a valid vec of shares let mut shares = vec![]; // Create up to 150 participants for _ in 0 ..= (OsRng.next_u64() % 150) { // Give each sender multiple shares let mut sender_shares = vec![]; for _ in 0 .. amount_of_shares { let mut share = vec![0; share_len]; OsRng.fill_bytes(&mut share); sender_shares.push(share); } shares.push(sender_shares); } test_read_write(&Transaction::DkgShares { attempt: random_u32(&mut OsRng), shares, confirmation_nonces: { let mut nonces = [0; 64]; OsRng.fill_bytes(&mut nonces); nonces }, signed: random_signed_with_nonce(&mut OsRng, 1), }); } for i in 0 .. 2 { test_read_write(&Transaction::InvalidDkgShare { attempt: random_u32(&mut OsRng), accuser: frost::Participant::new( u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1), ) .unwrap(), faulty: frost::Participant::new( u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1), ) .unwrap(), blame: if i == 0 { None } else { Some(random_vec(&mut OsRng, 500)).filter(|blame| !blame.is_empty()) }, signed: random_signed_with_nonce(&mut OsRng, 2), }); } test_read_write(&Transaction::DkgConfirmed { attempt: random_u32(&mut OsRng), confirmation_share: { let mut share = [0; 32]; OsRng.fill_bytes(&mut share); share }, signed: random_signed_with_nonce(&mut OsRng, 2), }); { let mut block = [0; 32]; OsRng.fill_bytes(&mut block); test_read_write(&Transaction::CosignSubstrateBlock(block)); } { let mut block = [0; 32]; OsRng.fill_bytes(&mut block); let batch = u32::try_from(OsRng.next_u64() >> 32).unwrap(); test_read_write(&Transaction::Batch { block, batch }); } test_read_write(&Transaction::SubstrateBlock(OsRng.next_u64())); { let batch = u32::try_from(OsRng.next_u64() >> 32).unwrap(); test_read_write(&Transaction::SubstrateSign(random_sign_data( &mut OsRng, SubstrateSignableId::Batch(batch), Label::Preprocess, ))); } { let batch = u32::try_from(OsRng.next_u64() >> 32).unwrap(); test_read_write(&Transaction::SubstrateSign(random_sign_data( &mut OsRng, SubstrateSignableId::Batch(batch), Label::Share, ))); } { let mut plan = [0; 32]; OsRng.fill_bytes(&mut plan); test_read_write(&Transaction::Sign(random_sign_data(&mut OsRng, plan, Label::Preprocess))); } { let mut plan = [0; 32]; OsRng.fill_bytes(&mut plan); test_read_write(&Transaction::Sign(random_sign_data(&mut OsRng, plan, Label::Share))); } { let mut plan = [0; 32]; OsRng.fill_bytes(&mut plan); let mut tx_hash = vec![0; (OsRng.next_u64() % 64).try_into().unwrap()]; OsRng.fill_bytes(&mut tx_hash); test_read_write(&Transaction::SignCompleted { plan, tx_hash, first_signer: random_signed_with_nonce(&mut OsRng, 2).signer, signature: random_signed_with_nonce(&mut OsRng, 2).signature, }); } test_read_write(&Transaction::SlashReport( { let amount = usize::try_from(OsRng.next_u64() % u64::from(MAX_KEY_SHARES_PER_SET - 1)).unwrap(); let mut points = vec![]; for _ in 0 .. amount { points.push((OsRng.next_u64() >> 32).try_into().unwrap()); } points }, random_signed_with_nonce(&mut OsRng, 0), )); } ================================================ FILE: coordinator/src/tests/tributary/sync.rs ================================================ use core::time::Duration; use std::{sync::Arc, collections::HashSet}; use rand_core::OsRng; use dalek_ff_group::Ristretto; use ciphersuite::{group::GroupEncoding, Ciphersuite}; use tokio::{ sync::{mpsc, broadcast}, time::sleep, }; use serai_db::MemDb; use tributary::Tributary; use crate::{ tributary::Transaction, ActiveTributary, TributaryEvent, p2p::{heartbeat_tributaries_task, handle_p2p_task}, tests::{ LocalP2p, tributary::{new_keys, new_spec, new_tributaries}, }, }; #[tokio::test] async fn sync_test() { let mut keys = new_keys(&mut OsRng); let spec = new_spec(&mut OsRng, &keys); // Ensure this can have a node fail assert!(spec.n(&[]) > spec.t()); let mut tributaries = new_tributaries(&keys, &spec) .await .into_iter() .map(|(_, p2p, tributary)| (p2p, tributary)) .collect::>(); // Keep a Tributary back, effectively having it offline let syncer_key = keys.pop().unwrap(); let (syncer_p2p, syncer_tributary) = tributaries.pop().unwrap(); // Have the rest form a P2P net let mut tributary_senders = vec![]; let mut tributary_arcs = vec![]; let mut p2p_threads = vec![]; for (p2p, tributary) in tributaries.drain(..) { let tributary = Arc::new(tributary); tributary_arcs.push(tributary.clone()); let (new_tributary_send, new_tributary_recv) = broadcast::channel(5); let (cosign_send, _) = mpsc::unbounded_channel(); let thread = tokio::spawn(handle_p2p_task(p2p, cosign_send, new_tributary_recv)); new_tributary_send .send(TributaryEvent::NewTributary(ActiveTributary { spec: spec.clone(), tributary })) .map_err(|_| "failed to send ActiveTributary") .unwrap(); tributary_senders.push(new_tributary_send); p2p_threads.push(thread); } let tributaries = tributary_arcs; // After four blocks of time, we should have a new block // We don't wait one block of time as we may have missed the chance for the first block // We don't wait two blocks because we may have missed the chance, and then had a failure to // propose by our 'offline' validator, which would cause the Tendermint round time to increase, // requiring a longer delay let block_time = u64::from(Tributary::::block_time()); sleep(Duration::from_secs(4 * block_time)).await; let tip = tributaries[0].tip().await; assert!(tip != spec.genesis()); // Sleep one second to make sure this block propagates sleep(Duration::from_secs(1)).await; // Make sure every tributary has it for tributary in &tributaries { assert!(tributary.reader().block(&tip).is_some()); } // Now that we've confirmed the other tributaries formed a net without issue, drop the syncer's // pending P2P messages syncer_p2p.1.write().await.1.last_mut().unwrap().clear(); // Have it join the net let syncer_key = Ristretto::generator() * *syncer_key; let syncer_tributary = Arc::new(syncer_tributary); let (syncer_tributary_send, syncer_tributary_recv) = broadcast::channel(5); let (cosign_send, _) = mpsc::unbounded_channel(); tokio::spawn(handle_p2p_task(syncer_p2p.clone(), cosign_send, syncer_tributary_recv)); syncer_tributary_send .send(TributaryEvent::NewTributary(ActiveTributary { spec: spec.clone(), tributary: syncer_tributary.clone(), })) .map_err(|_| "failed to send ActiveTributary to syncer") .unwrap(); // It shouldn't automatically catch up. If it somehow was, our test would be broken // Sanity check this let tip = tributaries[0].tip().await; // Wait until a new block occurs sleep(Duration::from_secs(3 * block_time)).await; // Make sure a new block actually occurred assert!(tributaries[0].tip().await != tip); // Make sure the new block alone didn't trigger catching up assert_eq!(syncer_tributary.tip().await, spec.genesis()); // Start the heartbeat protocol let (syncer_heartbeat_tributary_send, syncer_heartbeat_tributary_recv) = broadcast::channel(5); tokio::spawn(heartbeat_tributaries_task(syncer_p2p, syncer_heartbeat_tributary_recv)); syncer_heartbeat_tributary_send .send(TributaryEvent::NewTributary(ActiveTributary { spec: spec.clone(), tributary: syncer_tributary.clone(), })) .map_err(|_| "failed to send ActiveTributary to heartbeat") .unwrap(); // The heartbeat is once every 10 blocks, with some limitations sleep(Duration::from_secs(20 * block_time)).await; assert!(syncer_tributary.tip().await != spec.genesis()); // Verify it synced to the tip let syncer_tip = { let tributary = &tributaries[0]; let tip = tributary.tip().await; let syncer_tip = syncer_tributary.tip().await; // Allow a one block tolerance in case of race conditions assert!( HashSet::from([tip, tributary.reader().block(&tip).unwrap().parent()]).contains(&syncer_tip) ); syncer_tip }; sleep(Duration::from_secs(block_time)).await; // Verify it's now keeping up assert!(syncer_tributary.tip().await != syncer_tip); // Verify it's now participating in consensus // Because only `t` validators are used in a commit, take n - t nodes offline // leaving only `t` nodes. Which should force it to participate in the consensus // of next blocks. let spares = usize::from(spec.n(&[]) - spec.t()); for thread in p2p_threads.iter().take(spares) { thread.abort(); } // wait for a block sleep(Duration::from_secs(block_time)).await; if syncer_tributary .reader() .parsed_commit(&syncer_tributary.tip().await) .unwrap() .validators .iter() .any(|signer| signer == &syncer_key.to_bytes()) { return; } panic!("synced tributary didn't start participating in consensus"); } ================================================ FILE: coordinator/src/tests/tributary/tx.rs ================================================ use core::time::Duration; use rand_core::{RngCore, OsRng}; use tokio::time::sleep; use serai_db::MemDb; use tributary::{ transaction::Transaction as TransactionTrait, Transaction as TributaryTransaction, Tributary, }; use crate::{ tributary::Transaction, tests::{ LocalP2p, tributary::{new_keys, new_spec, new_tributaries, run_tributaries, wait_for_tx_inclusion}, }, }; #[tokio::test] async fn tx_test() { let keys = new_keys(&mut OsRng); let spec = new_spec(&mut OsRng, &keys); let tributaries = new_tributaries(&keys, &spec) .await .into_iter() .map(|(_, p2p, tributary)| (p2p, tributary)) .collect::>(); // Run the tributaries in the background tokio::spawn(run_tributaries(tributaries.clone())); // Send a TX from a random Tributary let sender = usize::try_from(OsRng.next_u64() % u64::try_from(tributaries.len()).unwrap()).unwrap(); let key = keys[sender].clone(); let attempt = 0; let mut commitments = vec![0; 256]; OsRng.fill_bytes(&mut commitments); // Create the TX with a null signature so we can get its sig hash let block_before_tx = tributaries[sender].1.tip().await; let mut tx = Transaction::DkgCommitments { attempt, commitments: vec![commitments.clone()], signed: Transaction::empty_signed(), }; tx.sign(&mut OsRng, spec.genesis(), &key); assert_eq!(tributaries[sender].1.add_transaction(tx.clone()).await, Ok(true)); let included_in = wait_for_tx_inclusion(&tributaries[sender].1, block_before_tx, tx.hash()).await; // Also sleep for the block time to ensure the block is synced around before we run checks on it sleep(Duration::from_secs(Tributary::::block_time().into())).await; // All tributaries should have acknowledged this transaction in a block for (_, tributary) in tributaries { let block = tributary.reader().block(&included_in).unwrap(); assert_eq!(block.transactions, vec![TributaryTransaction::Application(tx.clone())]); } } ================================================ FILE: coordinator/src/tributary/db.rs ================================================ use std::collections::HashMap; use scale::Encode; use borsh::{BorshSerialize, BorshDeserialize}; use dalek_ff_group::Ristretto; use ciphersuite::{group::GroupEncoding, Ciphersuite}; use frost::Participant; use serai_client::validator_sets::primitives::{KeyPair, ExternalValidatorSet}; use processor_messages::coordinator::SubstrateSignableId; pub use serai_db::*; use tributary::ReadWrite; use crate::tributary::{Label, Transaction}; #[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, BorshSerialize, BorshDeserialize)] pub enum Topic { Dkg, DkgConfirmation, SubstrateSign(SubstrateSignableId), Sign([u8; 32]), } // A struct to refer to a piece of data all validators will presumably provide a value for. #[derive(Clone, Copy, PartialEq, Eq, Debug, Encode)] pub struct DataSpecification { pub topic: Topic, pub label: Label, pub attempt: u32, } pub enum DataSet { Participating(HashMap>), NotParticipating, } pub enum Accumulation { Ready(DataSet), NotReady, } // TODO: Move from genesis to set for indexing create_db!( Tributary { SeraiBlockNumber: (hash: [u8; 32]) -> u64, SeraiDkgCompleted: (spec: ExternalValidatorSet) -> [u8; 32], TributaryBlockNumber: (block: [u8; 32]) -> u32, LastHandledBlock: (genesis: [u8; 32]) -> [u8; 32], // TODO: Revisit the point of this FatalSlashes: (genesis: [u8; 32]) -> Vec<[u8; 32]>, RemovedAsOfDkgAttempt: (genesis: [u8; 32], attempt: u32) -> Vec<[u8; 32]>, OfflineDuringDkg: (genesis: [u8; 32]) -> Vec<[u8; 32]>, // TODO: Combine these two FatallySlashed: (genesis: [u8; 32], account: [u8; 32]) -> (), SlashPoints: (genesis: [u8; 32], account: [u8; 32]) -> u32, VotedToRemove: (genesis: [u8; 32], voter: [u8; 32], to_remove: [u8; 32]) -> (), VotesToRemove: (genesis: [u8; 32], to_remove: [u8; 32]) -> u16, AttemptDb: (genesis: [u8; 32], topic: &Topic) -> u32, ReattemptDb: (genesis: [u8; 32], block: u32) -> Vec, DataReceived: (genesis: [u8; 32], data_spec: &DataSpecification) -> u16, DataDb: (genesis: [u8; 32], data_spec: &DataSpecification, signer_bytes: &[u8; 32]) -> Vec, DkgShare: (genesis: [u8; 32], from: u16, to: u16) -> Vec, ConfirmationNonces: (genesis: [u8; 32], attempt: u32) -> HashMap>, DkgKeyPair: (genesis: [u8; 32], attempt: u32) -> KeyPair, KeyToDkgAttempt: (key: [u8; 32]) -> u32, DkgLocallyCompleted: (genesis: [u8; 32]) -> (), PlanIds: (genesis: &[u8], block: u64) -> Vec<[u8; 32]>, SignedTransactionDb: (order: &[u8], nonce: u32) -> Vec, SlashReports: (genesis: [u8; 32], signer: [u8; 32]) -> Vec, SlashReported: (genesis: [u8; 32]) -> u16, SlashReportCutOff: (genesis: [u8; 32]) -> u64, SlashReport: (set: ExternalValidatorSet) -> Vec<([u8; 32], u32)>, } ); impl FatalSlashes { pub fn get_as_keys(getter: &impl Get, genesis: [u8; 32]) -> Vec<::G> { FatalSlashes::get(getter, genesis) .unwrap_or(vec![]) .iter() .map(|key| ::G::from_bytes(key).unwrap()) .collect::>() } } impl FatallySlashed { pub fn set_fatally_slashed(txn: &mut impl DbTxn, genesis: [u8; 32], account: [u8; 32]) { Self::set(txn, genesis, account, &()); let mut existing = FatalSlashes::get(txn, genesis).unwrap_or_default(); // Don't append if we already have it, which can occur upon multiple faults if existing.iter().any(|existing| existing == &account) { return; } existing.push(account); FatalSlashes::set(txn, genesis, &existing); } } impl AttemptDb { pub fn recognize_topic(txn: &mut impl DbTxn, genesis: [u8; 32], topic: Topic) { Self::set(txn, genesis, &topic, &0u32); } pub fn start_next_attempt(txn: &mut impl DbTxn, genesis: [u8; 32], topic: Topic) -> u32 { let next = Self::attempt(txn, genesis, topic).expect("starting next attempt for unknown topic") + 1; Self::set(txn, genesis, &topic, &next); next } pub fn attempt(getter: &impl Get, genesis: [u8; 32], topic: Topic) -> Option { let attempt = Self::get(getter, genesis, &topic); // Don't require explicit recognition of the Dkg topic as it starts when the chain does // Don't require explicit recognition of the SlashReport topic as it isn't a DoS risk and it // should always happen (eventually) if attempt.is_none() && ((topic == Topic::Dkg) || (topic == Topic::DkgConfirmation) || (topic == Topic::SubstrateSign(SubstrateSignableId::SlashReport))) { return Some(0); } attempt } } impl ReattemptDb { pub fn schedule_reattempt( txn: &mut impl DbTxn, genesis: [u8; 32], current_block_number: u32, topic: Topic, ) { // 5 minutes #[cfg(not(feature = "longer-reattempts"))] const BASE_REATTEMPT_DELAY: u32 = (5 * 60 * 1000) / tributary::tendermint::TARGET_BLOCK_TIME; // 10 minutes, intended for latent environments like the GitHub CI #[cfg(feature = "longer-reattempts")] const BASE_REATTEMPT_DELAY: u32 = (10 * 60 * 1000) / tributary::tendermint::TARGET_BLOCK_TIME; // 5 minutes for attempts 0 ..= 2, 10 minutes for attempts 3 ..= 5, 15 minutes for attempts > 5 // Assumes no event will take longer than 15 minutes, yet grows the time in case there are // network bandwidth issues let mut reattempt_delay = BASE_REATTEMPT_DELAY * ((AttemptDb::attempt(txn, genesis, topic) .expect("scheduling re-attempt for unknown topic") / 3) + 1) .min(3); // Allow more time for DKGs since they have an extra round and much more data if matches!(topic, Topic::Dkg) { reattempt_delay *= 4; } let upon_block = current_block_number + reattempt_delay; let mut reattempts = Self::get(txn, genesis, upon_block).unwrap_or(vec![]); reattempts.push(topic); Self::set(txn, genesis, upon_block, &reattempts); } pub fn take(txn: &mut impl DbTxn, genesis: [u8; 32], block_number: u32) -> Vec { let res = Self::get(txn, genesis, block_number).unwrap_or(vec![]); if !res.is_empty() { Self::del(txn, genesis, block_number); } res } } impl SignedTransactionDb { pub fn take_signed_transaction( txn: &mut impl DbTxn, order: &[u8], nonce: u32, ) -> Option { let res = SignedTransactionDb::get(txn, order, nonce) .map(|bytes| Transaction::read(&mut bytes.as_slice()).unwrap()); if res.is_some() { Self::del(txn, order, nonce); } res } } ================================================ FILE: coordinator/src/tributary/handle.rs ================================================ use core::ops::Deref; use std::collections::HashMap; use zeroize::Zeroizing; use rand_core::OsRng; use dalek_ff_group::Ristretto; use ciphersuite::{group::GroupEncoding, Ciphersuite}; use frost::dkg::Participant; use scale::{Encode, Decode}; use serai_client::validator_sets::primitives::KeyPair; use tributary::{Signed, TransactionKind, TransactionTrait}; use processor_messages::{ key_gen::{self, KeyGenId}, coordinator::{self, SubstrateSignableId, SubstrateSignId}, sign::{self, SignId}, }; use serai_db::*; use crate::{ processors::Processors, tributary::{ *, signing_protocol::DkgConfirmer, scanner::{ RecognizedIdType, RIDTrait, PublishSeraiTransaction, PTTTrait, TributaryBlockHandler, }, }, P2p, }; pub fn dkg_confirmation_nonces( key: &Zeroizing<::F>, spec: &TributarySpec, txn: &mut impl DbTxn, attempt: u32, ) -> [u8; 64] { DkgConfirmer::new(key, spec, txn, attempt) .expect("getting DKG confirmation nonces for unknown attempt") .preprocess() } pub fn generated_key_pair( txn: &mut D::Transaction<'_>, key: &Zeroizing<::F>, spec: &TributarySpec, key_pair: &KeyPair, attempt: u32, ) -> Result<[u8; 32], Participant> { DkgKeyPair::set(txn, spec.genesis(), attempt, key_pair); KeyToDkgAttempt::set(txn, key_pair.0 .0, &attempt); let preprocesses = ConfirmationNonces::get(txn, spec.genesis(), attempt).unwrap(); DkgConfirmer::new(key, spec, txn, attempt) .expect("claiming to have generated a key pair for an unrecognized attempt") .share(preprocesses, key_pair) } fn unflatten( spec: &TributarySpec, removed: &[::G], data: &mut HashMap>, ) { for (validator, _) in spec.validators() { let Some(range) = spec.i(removed, validator) else { continue }; let Some(all_segments) = data.remove(&range.start) else { continue; }; let mut data_vec = Vec::<_>::decode(&mut all_segments.as_slice()).unwrap(); for i in u16::from(range.start) .. u16::from(range.end) { let i = Participant::new(i).unwrap(); data.insert(i, data_vec.remove(0)); } } } impl< D: Db, T: DbTxn, Pro: Processors, PST: PublishSeraiTransaction, PTT: PTTTrait, RID: RIDTrait, P: P2p, > TributaryBlockHandler<'_, D, T, Pro, PST, PTT, RID, P> { fn accumulate( &mut self, removed: &[::G], data_spec: &DataSpecification, signer: ::G, data: &Vec, ) -> Accumulation { log::debug!("accumulating entry for {:?} attempt #{}", &data_spec.topic, &data_spec.attempt); let genesis = self.spec.genesis(); if DataDb::get(self.txn, genesis, data_spec, &signer.to_bytes()).is_some() { panic!("accumulating data for a participant multiple times"); } let signer_shares = { let Some(signer_i) = self.spec.i(removed, signer) else { log::warn!("accumulating data from {} who was removed", hex::encode(signer.to_bytes())); return Accumulation::NotReady; }; u16::from(signer_i.end) - u16::from(signer_i.start) }; let prior_received = DataReceived::get(self.txn, genesis, data_spec).unwrap_or_default(); let now_received = prior_received + signer_shares; DataReceived::set(self.txn, genesis, data_spec, &now_received); DataDb::set(self.txn, genesis, data_spec, &signer.to_bytes(), data); let received_range = (prior_received + 1) ..= now_received; // If 2/3rds of the network participated in this preprocess, queue it for an automatic // re-attempt // DkgConfirmation doesn't have a re-attempt as it's just an extension for Dkg if (data_spec.label == Label::Preprocess) && received_range.contains(&self.spec.t()) && (data_spec.topic != Topic::DkgConfirmation) { // Double check the attempt on this entry, as we don't want to schedule a re-attempt if this // is an old entry // This is an assert, not part of the if check, as old data shouldn't be here in the first // place assert_eq!(AttemptDb::attempt(self.txn, genesis, data_spec.topic), Some(data_spec.attempt)); ReattemptDb::schedule_reattempt(self.txn, genesis, self.block_number, data_spec.topic); } // If we have all the needed commitments/preprocesses/shares, tell the processor let needs_everyone = (data_spec.topic == Topic::Dkg) || (data_spec.topic == Topic::DkgConfirmation); let needed = if needs_everyone { self.spec.n(removed) } else { self.spec.t() }; if received_range.contains(&needed) { log::debug!( "accumulation for entry {:?} attempt #{} is ready", &data_spec.topic, &data_spec.attempt ); let mut data = HashMap::new(); for validator in self.spec.validators().iter().map(|validator| validator.0) { let Some(i) = self.spec.i(removed, validator) else { continue }; data.insert( i.start, if let Some(data) = DataDb::get(self.txn, genesis, data_spec, &validator.to_bytes()) { data } else { continue; }, ); } assert_eq!(data.len(), usize::from(needed)); // Remove our own piece of data, if we were involved if let Some(i) = self.spec.i(removed, Ristretto::generator() * self.our_key.deref()) { if data.remove(&i.start).is_some() { return Accumulation::Ready(DataSet::Participating(data)); } } return Accumulation::Ready(DataSet::NotParticipating); } Accumulation::NotReady } fn handle_data( &mut self, removed: &[::G], data_spec: &DataSpecification, bytes: &Vec, signed: &Signed, ) -> Accumulation { let genesis = self.spec.genesis(); let Some(curr_attempt) = AttemptDb::attempt(self.txn, genesis, data_spec.topic) else { // Premature publication of a valid ID/publication of an invalid ID self.fatal_slash(signed.signer.to_bytes(), "published data for ID without an attempt"); return Accumulation::NotReady; }; // If they've already published a TX for this attempt, slash // This shouldn't be reachable since nonces were made inserted by the coordinator, yet it's a // cheap check to leave in for safety if DataDb::get(self.txn, genesis, data_spec, &signed.signer.to_bytes()).is_some() { self.fatal_slash(signed.signer.to_bytes(), "published data multiple times"); return Accumulation::NotReady; } // If the attempt is lesser than the blockchain's, return if data_spec.attempt < curr_attempt { log::debug!( "dated attempt published onto tributary for topic {:?} (used attempt {}, current {})", data_spec.topic, data_spec.attempt, curr_attempt ); return Accumulation::NotReady; } // If the attempt is greater, this is a premature publication, full slash if data_spec.attempt > curr_attempt { self.fatal_slash( signed.signer.to_bytes(), "published data with an attempt which hasn't started", ); return Accumulation::NotReady; } // TODO: We can also full slash if shares before all commitments, or share before the // necessary preprocesses // TODO: If this is shares, we need to check they are part of the selected signing set // Accumulate this data self.accumulate(removed, data_spec, signed.signer, bytes) } fn check_sign_data_len( &mut self, removed: &[::G], signer: ::G, len: usize, ) -> Result<(), ()> { let Some(signer_i) = self.spec.i(removed, signer) else { // TODO: Ensure processor doesn't so participate/check how it handles removals for being // offline self.fatal_slash(signer.to_bytes(), "signer participated despite being removed"); Err(())? }; if len != usize::from(u16::from(signer_i.end) - u16::from(signer_i.start)) { self.fatal_slash( signer.to_bytes(), "signer published a distinct amount of sign data than they had shares", ); Err(())?; } Ok(()) } // TODO: Don't call fatal_slash in here, return the party to fatal_slash to ensure no further // execution occurs pub(crate) async fn handle_application_tx(&mut self, tx: Transaction) { let genesis = self.spec.genesis(); // Don't handle transactions from fatally slashed participants // This prevents removed participants from sabotaging the removal signing sessions and so on // TODO: Because fatally slashed participants can still publish onto the blockchain, they have // a notable DoS ability if let TransactionKind::Signed(_, signed) = tx.kind() { if FatallySlashed::get(self.txn, genesis, signed.signer.to_bytes()).is_some() { return; } } match tx { Transaction::RemoveParticipantDueToDkg { participant, signed } => { if self.spec.i(&[], participant).is_none() { self.fatal_slash( participant.to_bytes(), "RemoveParticipantDueToDkg vote for non-validator", ); return; } let participant = participant.to_bytes(); let signer = signed.signer.to_bytes(); assert!( VotedToRemove::get(self.txn, genesis, signer, participant).is_none(), "VotedToRemove multiple times despite a single nonce being allocated", ); VotedToRemove::set(self.txn, genesis, signer, participant, &()); let prior_votes = VotesToRemove::get(self.txn, genesis, participant).unwrap_or(0); let signer_votes = self.spec.i(&[], signed.signer).expect("signer wasn't a validator for this network?"); let new_votes = prior_votes + u16::from(signer_votes.end) - u16::from(signer_votes.start); VotesToRemove::set(self.txn, genesis, participant, &new_votes); if ((prior_votes + 1) ..= new_votes).contains(&self.spec.t()) { self.fatal_slash(participant, "RemoveParticipantDueToDkg vote") } } Transaction::DkgCommitments { attempt, commitments, signed } => { let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else { self.fatal_slash(signed.signer.to_bytes(), "DkgCommitments with an unrecognized attempt"); return; }; let Ok(()) = self.check_sign_data_len(&removed, signed.signer, commitments.len()) else { return; }; let data_spec = DataSpecification { topic: Topic::Dkg, label: Label::Preprocess, attempt }; match self.handle_data(&removed, &data_spec, &commitments.encode(), &signed) { Accumulation::Ready(DataSet::Participating(mut commitments)) => { log::info!("got all DkgCommitments for {}", hex::encode(genesis)); unflatten(self.spec, &removed, &mut commitments); self .processors .send( self.spec.set().network, key_gen::CoordinatorMessage::Commitments { id: KeyGenId { session: self.spec.set().session, attempt }, commitments, }, ) .await; } Accumulation::Ready(DataSet::NotParticipating) => { assert!( removed.contains(&(Ristretto::generator() * self.our_key.deref())), "NotParticipating in a DkgCommitments we weren't removed for" ); } Accumulation::NotReady => {} } } Transaction::DkgShares { attempt, mut shares, confirmation_nonces, signed } => { let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else { self.fatal_slash(signed.signer.to_bytes(), "DkgShares with an unrecognized attempt"); return; }; let not_participating = removed.contains(&(Ristretto::generator() * self.our_key.deref())); let Ok(()) = self.check_sign_data_len(&removed, signed.signer, shares.len()) else { return; }; let Some(sender_i) = self.spec.i(&removed, signed.signer) else { self.fatal_slash( signed.signer.to_bytes(), "DkgShares for a DKG they aren't participating in", ); return; }; let sender_is_len = u16::from(sender_i.end) - u16::from(sender_i.start); for shares in &shares { if shares.len() != (usize::from(self.spec.n(&removed) - sender_is_len)) { self.fatal_slash(signed.signer.to_bytes(), "invalid amount of DKG shares"); return; } } // Save each share as needed for blame for (from_offset, shares) in shares.iter().enumerate() { let from = Participant::new(u16::from(sender_i.start) + u16::try_from(from_offset).unwrap()) .unwrap(); for (to_offset, share) in shares.iter().enumerate() { // 0-indexed (the enumeration) to 1-indexed (Participant) let mut to = u16::try_from(to_offset).unwrap() + 1; // Adjust for the omission of the sender's own shares if to >= u16::from(sender_i.start) { to += u16::from(sender_i.end) - u16::from(sender_i.start); } let to = Participant::new(to).unwrap(); DkgShare::set(self.txn, genesis, from.into(), to.into(), share); } } // Filter down to only our share's bytes for handle let our_shares = if let Some(our_i) = self.spec.i(&removed, Ristretto::generator() * self.our_key.deref()) { if sender_i == our_i { vec![] } else { // 1-indexed to 0-indexed let mut our_i_pos = u16::from(our_i.start) - 1; // Handle the omission of the sender's own data if u16::from(our_i.start) > u16::from(sender_i.start) { our_i_pos -= sender_is_len; } let our_i_pos = usize::from(our_i_pos); shares .iter_mut() .map(|shares| { shares .drain( our_i_pos .. (our_i_pos + usize::from(u16::from(our_i.end) - u16::from(our_i.start))), ) .collect::>() }) .collect() } } else { assert!( not_participating, "we didn't have an i while handling DkgShares we weren't removed for" ); // Since we're not participating, simply save vec![] for our shares vec![] }; // Drop shares as it's presumably been mutated into invalidity drop(shares); let data_spec = DataSpecification { topic: Topic::Dkg, label: Label::Share, attempt }; let encoded_data = (confirmation_nonces.to_vec(), our_shares.encode()).encode(); match self.handle_data(&removed, &data_spec, &encoded_data, &signed) { Accumulation::Ready(DataSet::Participating(confirmation_nonces_and_shares)) => { log::info!("got all DkgShares for {}", hex::encode(genesis)); let mut confirmation_nonces = HashMap::new(); let mut shares = HashMap::new(); for (participant, confirmation_nonces_and_shares) in confirmation_nonces_and_shares { let (these_confirmation_nonces, these_shares) = <(Vec, Vec)>::decode(&mut confirmation_nonces_and_shares.as_slice()) .unwrap(); confirmation_nonces.insert(participant, these_confirmation_nonces); shares.insert(participant, these_shares); } ConfirmationNonces::set(self.txn, genesis, attempt, &confirmation_nonces); // shares is a HashMap>>>, with the values representing: // - Each of the sender's shares // - Each of the our shares // - Each share // We need a Vec>>, with the outer being each of ours let mut expanded_shares = vec![]; for (sender_start_i, shares) in shares { let shares: Vec>> = Vec::<_>::decode(&mut shares.as_slice()).unwrap(); for (sender_i_offset, our_shares) in shares.into_iter().enumerate() { for (our_share_i, our_share) in our_shares.into_iter().enumerate() { if expanded_shares.len() <= our_share_i { expanded_shares.push(HashMap::new()); } expanded_shares[our_share_i].insert( Participant::new( u16::from(sender_start_i) + u16::try_from(sender_i_offset).unwrap(), ) .unwrap(), our_share, ); } } } self .processors .send( self.spec.set().network, key_gen::CoordinatorMessage::Shares { id: KeyGenId { session: self.spec.set().session, attempt }, shares: expanded_shares, }, ) .await; } Accumulation::Ready(DataSet::NotParticipating) => { assert!(not_participating, "NotParticipating in a DkgShares we weren't removed for"); } Accumulation::NotReady => {} } } Transaction::InvalidDkgShare { attempt, accuser, faulty, blame, signed } => { let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else { self .fatal_slash(signed.signer.to_bytes(), "InvalidDkgShare with an unrecognized attempt"); return; }; let Some(range) = self.spec.i(&removed, signed.signer) else { self.fatal_slash( signed.signer.to_bytes(), "InvalidDkgShare for a DKG they aren't participating in", ); return; }; if !range.contains(&accuser) { self.fatal_slash( signed.signer.to_bytes(), "accused with a Participant index which wasn't theirs", ); return; } if range.contains(&faulty) { self.fatal_slash(signed.signer.to_bytes(), "accused self of having an InvalidDkgShare"); return; } let Some(share) = DkgShare::get(self.txn, genesis, accuser.into(), faulty.into()) else { self.fatal_slash( signed.signer.to_bytes(), "InvalidDkgShare had a non-existent faulty participant", ); return; }; self .processors .send( self.spec.set().network, key_gen::CoordinatorMessage::VerifyBlame { id: KeyGenId { session: self.spec.set().session, attempt }, accuser, accused: faulty, share, blame, }, ) .await; } Transaction::DkgConfirmed { attempt, confirmation_share, signed } => { let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else { self.fatal_slash(signed.signer.to_bytes(), "DkgConfirmed with an unrecognized attempt"); return; }; let data_spec = DataSpecification { topic: Topic::DkgConfirmation, label: Label::Share, attempt }; match self.handle_data(&removed, &data_spec, &confirmation_share.to_vec(), &signed) { Accumulation::Ready(DataSet::Participating(shares)) => { log::info!("got all DkgConfirmed for {}", hex::encode(genesis)); let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else { panic!( "DkgConfirmed for everyone yet didn't have the removed parties for this attempt", ); }; let preprocesses = ConfirmationNonces::get(self.txn, genesis, attempt).unwrap(); // TODO: This can technically happen under very very very specific timing as the txn // put happens before DkgConfirmed, yet the txn commit isn't guaranteed to let key_pair = DkgKeyPair::get(self.txn, genesis, attempt).expect( "in DkgConfirmed handling, which happens after everyone \ (including us) fires DkgConfirmed, yet no confirming key pair", ); let mut confirmer = DkgConfirmer::new(self.our_key, self.spec, self.txn, attempt) .expect("confirming DKG for unrecognized attempt"); let sig = match confirmer.complete(preprocesses, &key_pair, shares) { Ok(sig) => sig, Err(p) => { let mut tx = Transaction::RemoveParticipantDueToDkg { participant: self.spec.reverse_lookup_i(&removed, p).unwrap(), signed: Transaction::empty_signed(), }; tx.sign(&mut OsRng, genesis, self.our_key); self.publish_tributary_tx.publish_tributary_tx(tx).await; return; } }; DkgLocallyCompleted::set(self.txn, genesis, &()); self .publish_serai_tx .publish_set_keys( self.db, self.spec.set(), removed.into_iter().map(|key| key.to_bytes().into()).collect(), key_pair, sig.into(), ) .await; } Accumulation::Ready(DataSet::NotParticipating) => { panic!("wasn't a participant in DKG confirmination shares") } Accumulation::NotReady => {} } } Transaction::CosignSubstrateBlock(hash) => { AttemptDb::recognize_topic( self.txn, genesis, Topic::SubstrateSign(SubstrateSignableId::CosigningSubstrateBlock(hash)), ); let block_number = SeraiBlockNumber::get(self.txn, hash) .expect("CosignSubstrateBlock yet didn't save Serai block number"); let msg = coordinator::CoordinatorMessage::CosignSubstrateBlock { id: SubstrateSignId { session: self.spec.set().session, id: SubstrateSignableId::CosigningSubstrateBlock(hash), attempt: 0, }, block_number, }; self.processors.send(self.spec.set().network, msg).await; } Transaction::Batch { block: _, batch } => { // Because this Batch has achieved synchrony, its batch ID should be authorized AttemptDb::recognize_topic( self.txn, genesis, Topic::SubstrateSign(SubstrateSignableId::Batch(batch)), ); self .recognized_id .recognized_id( self.spec.set(), genesis, RecognizedIdType::Batch, batch.to_le_bytes().to_vec(), ) .await; } Transaction::SubstrateBlock(block) => { let plan_ids = PlanIds::get(self.txn, &genesis, block).expect( "synced a tributary block finalizing a substrate block in a provided transaction \ despite us not providing that transaction", ); for id in plan_ids { AttemptDb::recognize_topic(self.txn, genesis, Topic::Sign(id)); self .recognized_id .recognized_id(self.spec.set(), genesis, RecognizedIdType::Plan, id.to_vec()) .await; } } Transaction::SubstrateSign(data) => { // Provided transactions ensure synchrony on any signing protocol, and we won't start // signing with threshold keys before we've confirmed them on-chain let Some(removed) = crate::tributary::removed_as_of_set_keys(self.txn, self.spec.set(), genesis) else { self.fatal_slash( data.signed.signer.to_bytes(), "signing despite not having set keys on substrate", ); return; }; let signer = data.signed.signer; let Ok(()) = self.check_sign_data_len(&removed, signer, data.data.len()) else { return; }; let expected_len = match data.label { Label::Preprocess => 64, Label::Share => 32, }; for data in &data.data { if data.len() != expected_len { self.fatal_slash( signer.to_bytes(), "unexpected length data for substrate signing protocol", ); return; } } let data_spec = DataSpecification { topic: Topic::SubstrateSign(data.plan), label: data.label, attempt: data.attempt, }; let Accumulation::Ready(DataSet::Participating(mut results)) = self.handle_data(&removed, &data_spec, &data.data.encode(), &data.signed) else { return; }; unflatten(self.spec, &removed, &mut results); let id = SubstrateSignId { session: self.spec.set().session, id: data.plan, attempt: data.attempt, }; let msg = match data.label { Label::Preprocess => coordinator::CoordinatorMessage::SubstratePreprocesses { id, preprocesses: results.into_iter().map(|(v, p)| (v, p.try_into().unwrap())).collect(), }, Label::Share => coordinator::CoordinatorMessage::SubstrateShares { id, shares: results.into_iter().map(|(v, p)| (v, p.try_into().unwrap())).collect(), }, }; self.processors.send(self.spec.set().network, msg).await; } Transaction::Sign(data) => { let Some(removed) = crate::tributary::removed_as_of_set_keys(self.txn, self.spec.set(), genesis) else { self.fatal_slash( data.signed.signer.to_bytes(), "signing despite not having set keys on substrate", ); return; }; let Ok(()) = self.check_sign_data_len(&removed, data.signed.signer, data.data.len()) else { return; }; let data_spec = DataSpecification { topic: Topic::Sign(data.plan), label: data.label, attempt: data.attempt, }; if let Accumulation::Ready(DataSet::Participating(mut results)) = self.handle_data(&removed, &data_spec, &data.data.encode(), &data.signed) { unflatten(self.spec, &removed, &mut results); let id = SignId { session: self.spec.set().session, id: data.plan, attempt: data.attempt }; self .processors .send( self.spec.set().network, match data.label { Label::Preprocess => { sign::CoordinatorMessage::Preprocesses { id, preprocesses: results } } Label::Share => sign::CoordinatorMessage::Shares { id, shares: results }, }, ) .await; } } Transaction::SignCompleted { plan, tx_hash, first_signer, signature: _ } => { log::info!( "on-chain SignCompleted claims {} completes {}", hex::encode(&tx_hash), hex::encode(plan) ); if AttemptDb::attempt(self.txn, genesis, Topic::Sign(plan)).is_none() { self.fatal_slash(first_signer.to_bytes(), "claimed an unrecognized plan was completed"); return; }; // TODO: Confirm this signer hasn't prior published a completion let msg = sign::CoordinatorMessage::Completed { session: self.spec.set().session, id: plan, tx: tx_hash, }; self.processors.send(self.spec.set().network, msg).await; } Transaction::SlashReport(points, signed) => { // Uses &[] as we only need the length which is independent to who else was removed let signer_range = self.spec.i(&[], signed.signer).unwrap(); let signer_len = u16::from(signer_range.end) - u16::from(signer_range.start); if points.len() != (self.spec.validators().len() - 1) { self.fatal_slash( signed.signer.to_bytes(), "submitted a distinct amount of slash points to participants", ); return; } if SlashReports::get(self.txn, genesis, signed.signer.to_bytes()).is_some() { self.fatal_slash(signed.signer.to_bytes(), "submitted multiple slash points"); return; } SlashReports::set(self.txn, genesis, signed.signer.to_bytes(), &points); let prior_reported = SlashReported::get(self.txn, genesis).unwrap_or(0); let now_reported = prior_reported + signer_len; SlashReported::set(self.txn, genesis, &now_reported); if (prior_reported < self.spec.t()) && (now_reported >= self.spec.t()) { SlashReportCutOff::set( self.txn, genesis, // 30 minutes into the future &(u64::from(self.block_number) + ((30 * 60 * 1000) / u64::from(tributary::tendermint::TARGET_BLOCK_TIME))), ); } } } } } ================================================ FILE: coordinator/src/tributary/mod.rs ================================================ use dalek_ff_group::Ristretto; use ciphersuite::{group::GroupEncoding, Ciphersuite}; use serai_client::validator_sets::primitives::ExternalValidatorSet; use tributary::{ ReadWrite, transaction::{TransactionError, TransactionKind, Transaction as TransactionTrait}, Tributary, }; mod db; pub use db::*; mod spec; pub use spec::TributarySpec; mod transaction; pub use transaction::{Label, SignData, Transaction}; mod signing_protocol; mod handle; pub use handle::*; pub mod scanner; pub fn removed_as_of_dkg_attempt( getter: &impl Get, genesis: [u8; 32], attempt: u32, ) -> Option::G>> { if attempt == 0 { Some(vec![]) } else { RemovedAsOfDkgAttempt::get(getter, genesis, attempt).map(|keys| { keys.iter().map(|key| ::G::from_bytes(key).unwrap()).collect() }) } } pub fn removed_as_of_set_keys( getter: &impl Get, set: ExternalValidatorSet, genesis: [u8; 32], ) -> Option::G>> { // SeraiDkgCompleted has the key placed on-chain. // This key can be uniquely mapped to an attempt so long as one participant was honest, which we // assume as a presumably honest participant. // Resolve from generated key to attempt to fatally slashed as of attempt. // This expect will trigger if this is prematurely called and Substrate has tracked the keys yet // we haven't locally synced and handled the Tributary // All callers of this, at the time of writing, ensure the Tributary has sufficiently synced // making the panic with context more desirable than the None let attempt = KeyToDkgAttempt::get(getter, SeraiDkgCompleted::get(getter, set)?) .expect("key completed on-chain didn't have an attempt related"); removed_as_of_dkg_attempt(getter, genesis, attempt) } pub async fn publish_signed_transaction( txn: &mut D::Transaction<'_>, tributary: &Tributary, tx: Transaction, ) { log::debug!("publishing transaction {}", hex::encode(tx.hash())); let (order, signer) = if let TransactionKind::Signed(order, signed) = tx.kind() { let signer = signed.signer; // Safe as we should deterministically create transactions, meaning if this is already on-disk, // it's what we're saving now SignedTransactionDb::set(txn, &order, signed.nonce, &tx.serialize()); (order, signer) } else { panic!("non-signed transaction passed to publish_signed_transaction"); }; // If we're trying to publish 5, when the last transaction published was 3, this will delay // publication until the point in time we publish 4 while let Some(tx) = SignedTransactionDb::take_signed_transaction( txn, &order, tributary .next_nonce(&signer, &order) .await .expect("we don't have a nonce, meaning we aren't a participant on this tributary"), ) { // We need to return a proper error here to enable that, due to a race condition around // multiple publications match tributary.add_transaction(tx.clone()).await { Ok(_) => {} // Some asynchonicity if InvalidNonce, assumed safe to deterministic nonces Err(TransactionError::InvalidNonce) => { log::warn!("publishing TX {tx:?} returned InvalidNonce. was it already added?") } Err(e) => panic!("created an invalid transaction: {e:?}"), } } } ================================================ FILE: coordinator/src/tributary/scanner.rs ================================================ use core::{marker::PhantomData, ops::Deref, future::Future, time::Duration}; use std::{sync::Arc, collections::HashSet}; use zeroize::Zeroizing; use dalek_ff_group::Ristretto; use ciphersuite::{group::GroupEncoding, Ciphersuite}; use tokio::sync::broadcast; use scale::{Encode, Decode}; use serai_client::{ primitives::{SeraiAddress, Signature}, validator_sets::primitives::{ExternalValidatorSet, KeyPair}, Serai, }; use serai_db::DbTxn; use processor_messages::coordinator::{SubstrateSignId, SubstrateSignableId}; use tributary::{ TransactionKind, Transaction as TributaryTransaction, TransactionError, Block, TributaryReader, tendermint::{ tx::{TendermintTx, Evidence, decode_signed_message}, TendermintNetwork, }, }; use crate::{Db, processors::Processors, substrate::BatchInstructionsHashDb, tributary::*, P2p}; #[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, Decode)] pub enum RecognizedIdType { Batch, Plan, } #[async_trait::async_trait] pub trait RIDTrait { async fn recognized_id( &self, set: ExternalValidatorSet, genesis: [u8; 32], kind: RecognizedIdType, id: Vec, ); } #[async_trait::async_trait] impl< FRid: Send + Future, F: Sync + Fn(ExternalValidatorSet, [u8; 32], RecognizedIdType, Vec) -> FRid, > RIDTrait for F { async fn recognized_id( &self, set: ExternalValidatorSet, genesis: [u8; 32], kind: RecognizedIdType, id: Vec, ) { (self)(set, genesis, kind, id).await } } #[async_trait::async_trait] pub trait PublishSeraiTransaction { async fn publish_set_keys( &self, db: &(impl Sync + Get), set: ExternalValidatorSet, removed: Vec, key_pair: KeyPair, signature: Signature, ); } mod impl_pst_for_serai { use super::*; use serai_client::SeraiValidatorSets; // Uses a macro because Rust can't resolve the lifetimes/generics around the check function // check is expected to return true if the effect has already occurred // The generated publish function will return true if *we* published the transaction macro_rules! common_pst { ($Meta: ty, $check: ident) => { async fn publish( serai: &Serai, db: &impl Get, set: ExternalValidatorSet, tx: serai_client::Transaction, meta: $Meta, ) -> bool { loop { match serai.publish(&tx).await { Ok(_) => return true, // This is assumed to be some ephemeral error due to the assumed fault-free // creation // TODO2: Differentiate connection errors from invariants Err(e) => { // The following block is irrelevant, and can/likely will fail, if we're publishing // a TX for an old session // If we're on a newer session, move on if crate::RetiredTributaryDb::get(db, set).is_some() { log::warn!("trying to publish a TX relevant to set {set:?} which isn't the latest"); return false; } if let Ok(serai) = serai.as_of_latest_finalized_block().await { let serai = serai.validator_sets(); // Check if someone else published the TX in question if $check(serai, set, meta).await { return false; } } log::error!("couldn't connect to Serai node to publish TX: {e:?}"); tokio::time::sleep(core::time::Duration::from_secs(5)).await; } } } } }; } #[async_trait::async_trait] impl PublishSeraiTransaction for Serai { async fn publish_set_keys( &self, db: &(impl Sync + Get), set: ExternalValidatorSet, removed: Vec, key_pair: KeyPair, signature: Signature, ) { // TODO: BoundedVec as an arg to avoid this expect let tx = SeraiValidatorSets::set_keys( set.network, removed.try_into().expect("removing more than allowed"), key_pair, signature, ); async fn check(serai: SeraiValidatorSets<'_>, set: ExternalValidatorSet, (): ()) -> bool { if matches!(serai.keys(set).await, Ok(Some(_))) { log::info!("another coordinator set key pair for {:?}", set); return true; } false } common_pst!((), check); if publish(self, db, set, tx, ()).await { log::info!("published set keys for {set:?}"); } } } } #[async_trait::async_trait] pub trait PTTTrait { async fn publish_tributary_tx(&self, tx: Transaction); } #[async_trait::async_trait] impl, F: Sync + Fn(Transaction) -> FPtt> PTTTrait for F { async fn publish_tributary_tx(&self, tx: Transaction) { (self)(tx).await } } pub struct TributaryBlockHandler< 'a, D: Db, T: DbTxn, Pro: Processors, PST: PublishSeraiTransaction, PTT: PTTTrait, RID: RIDTrait, P: P2p, > { pub db: &'a D, pub txn: &'a mut T, pub our_key: &'a Zeroizing<::F>, pub recognized_id: &'a RID, pub processors: &'a Pro, pub publish_serai_tx: &'a PST, pub publish_tributary_tx: &'a PTT, pub spec: &'a TributarySpec, block: Block, pub block_number: u32, _p2p: PhantomData

, } impl< D: Db, T: DbTxn, Pro: Processors, PST: PublishSeraiTransaction, PTT: PTTTrait, RID: RIDTrait, P: P2p, > TributaryBlockHandler<'_, D, T, Pro, PST, PTT, RID, P> { pub fn fatal_slash(&mut self, slashing: [u8; 32], reason: &str) { let genesis = self.spec.genesis(); log::warn!("fatally slashing {}. reason: {}", hex::encode(slashing), reason); FatallySlashed::set_fatally_slashed(self.txn, genesis, slashing); // TODO: disconnect the node from network/ban from further participation in all Tributaries } // TODO: Once Substrate confirms a key, we need to rotate our validator set OR form a second // Tributary post-DKG // https://github.com/serai-dex/serai/issues/426 async fn handle(mut self) { log::info!("found block for Tributary {:?}", self.spec.set()); let transactions = self.block.transactions.clone(); for tx in transactions { match tx { TributaryTransaction::Tendermint(TendermintTx::SlashEvidence(ev)) => { // Since the evidence is on the chain, it should already have been validated // We can just punish the signer let data = match ev { Evidence::ConflictingMessages(first, second) => (first, Some(second)), Evidence::InvalidPrecommit(first) | Evidence::InvalidValidRound(first) => (first, None), }; let msgs = ( decode_signed_message::>(&data.0).unwrap(), if data.1.is_some() { Some( decode_signed_message::>(&data.1.unwrap()) .unwrap(), ) } else { None }, ); // Since anything with evidence is fundamentally faulty behavior, not just temporal // errors, mark the node as fatally slashed self.fatal_slash(msgs.0.msg.sender, &format!("invalid tendermint messages: {msgs:?}")); } TributaryTransaction::Application(tx) => { self.handle_application_tx(tx).await; } } } let genesis = self.spec.genesis(); let current_fatal_slashes = FatalSlashes::get_as_keys(self.txn, genesis); // Calculate the shares still present, spinning if not enough are // still_present_shares is used by a below branch, yet it's a natural byproduct of checking if // we should spin, hence storing it in a variable here let still_present_shares = { // Start with the original n value let mut present_shares = self.spec.n(&[]); // Remove everyone fatally slashed for removed in ¤t_fatal_slashes { let original_i_for_removed = self.spec.i(&[], *removed).expect("removed party was never present"); let removed_shares = u16::from(original_i_for_removed.end) - u16::from(original_i_for_removed.start); present_shares -= removed_shares; } // Spin if the present shares don't satisfy the required threshold if present_shares < self.spec.t() { loop { log::error!( "fatally slashed so many participants for {:?} we no longer meet the threshold", self.spec.set() ); tokio::time::sleep(core::time::Duration::from_secs(60)).await; } } present_shares }; for topic in ReattemptDb::take(self.txn, genesis, self.block_number) { let attempt = AttemptDb::start_next_attempt(self.txn, genesis, topic); log::info!("re-attempting {topic:?} with attempt {attempt}"); // Slash people who failed to participate as expected in the prior attempt { let prior_attempt = attempt - 1; let (removed, expected_participants) = match topic { Topic::Dkg => { // Every validator who wasn't removed is expected to have participated let removed = crate::tributary::removed_as_of_dkg_attempt(self.txn, genesis, prior_attempt) .expect("prior attempt didn't have its removed saved to disk"); let removed_set = removed.iter().copied().collect::>(); ( removed, self .spec .validators() .into_iter() .filter_map(|(validator, _)| { Some(validator).filter(|validator| !removed_set.contains(validator)) }) .collect(), ) } Topic::DkgConfirmation => { panic!("TODO: re-attempting DkgConfirmation when we should be re-attempting the Dkg") } Topic::SubstrateSign(_) | Topic::Sign(_) => { let removed = crate::tributary::removed_as_of_set_keys(self.txn, self.spec.set(), genesis) .expect("SubstrateSign/Sign yet have yet to set keys"); // TODO: If 67% sent preprocesses, this should be them. Else, this should be vec![] let expected_participants = vec![]; (removed, expected_participants) } }; let (expected_topic, expected_label) = match topic { Topic::Dkg => { let n = self.spec.n(&removed); // If we got all the DKG shares, we should be on DKG confirmation let share_spec = DataSpecification { topic: Topic::Dkg, label: Label::Share, attempt: prior_attempt }; if DataReceived::get(self.txn, genesis, &share_spec).unwrap_or(0) == n { // Label::Share since there is no Label::Preprocess for DkgConfirmation since the // preprocess is part of Topic::Dkg Label::Share (Topic::DkgConfirmation, Label::Share) } else { let preprocess_spec = DataSpecification { topic: Topic::Dkg, label: Label::Preprocess, attempt: prior_attempt, }; // If we got all the DKG preprocesses, DKG shares if DataReceived::get(self.txn, genesis, &preprocess_spec).unwrap_or(0) == n { // Label::Share since there is no Label::Preprocess for DkgConfirmation since the // preprocess is part of Topic::Dkg Label::Share (Topic::Dkg, Label::Share) } else { (Topic::Dkg, Label::Preprocess) } } } Topic::DkgConfirmation => unreachable!(), // If we got enough participants to move forward, then we expect shares from them all Topic::SubstrateSign(_) | Topic::Sign(_) => (topic, Label::Share), }; let mut did_not_participate = vec![]; for expected_participant in expected_participants { if DataDb::get( self.txn, genesis, &DataSpecification { topic: expected_topic, label: expected_label, attempt: prior_attempt, }, &expected_participant.to_bytes(), ) .is_none() { did_not_participate.push(expected_participant); } } // If a supermajority didn't participate as expected, the protocol was likely aborted due // to detection of a completion or some larger networking error // Accordingly, clear did_not_participate // TODO // If during the DKG, explicitly mark these people as having been offline // TODO: If they were offline sufficiently long ago, don't strike them off if topic == Topic::Dkg { let mut existing = OfflineDuringDkg::get(self.txn, genesis).unwrap_or(vec![]); for did_not_participate in did_not_participate { existing.push(did_not_participate.to_bytes()); } OfflineDuringDkg::set(self.txn, genesis, &existing); } // Slash everyone who didn't participate as expected // This may be overzealous as if a minority detects a completion, they'll abort yet the // supermajority will cause the above allowance to not trigger, causing an honest minority // to be slashed // At the end of the protocol, the accumulated slashes are reduced by the amount obtained // by the worst-performing member of the supermajority, and this is expected to // sufficiently compensate for slashes which occur under normal operation // TODO } /* All of these have the same common flow: 1) Check if this re-attempt is actually needed 2) If so, dispatch whatever events as needed This is because we *always* re-attempt any protocol which had participation. That doesn't mean we *should* re-attempt this protocol. The alternatives were: 1) Note on-chain we completed a protocol, halting re-attempts upon 34%. 2) Vote on-chain to re-attempt a protocol. This schema doesn't have any additional messages upon the success case (whereas alternative #1 does) and doesn't have overhead (as alternative #2 does, sending votes and then preprocesses. This only sends preprocesses). */ match topic { Topic::Dkg => { let mut removed = current_fatal_slashes.clone(); let t = self.spec.t(); { let mut present_shares = still_present_shares; // Load the parties marked as offline across the various attempts let mut offline = OfflineDuringDkg::get(self.txn, genesis) .unwrap_or(vec![]) .iter() .map(|key| ::G::from_bytes(key).unwrap()) .collect::>(); // Pop from the list to prioritize the removal of those recently offline while let Some(offline) = offline.pop() { // Make sure they weren't removed already (such as due to being fatally slashed) // This also may trigger if they were offline across multiple attempts if removed.contains(&offline) { continue; } // If we can remove them and still meet the threshold, do so let original_i_for_offline = self.spec.i(&[], offline).expect("offline was never present?"); let offline_shares = u16::from(original_i_for_offline.end) - u16::from(original_i_for_offline.start); if (present_shares - offline_shares) >= t { present_shares -= offline_shares; removed.push(offline); } // If we've removed as many people as we can, break if present_shares == t { break; } } } RemovedAsOfDkgAttempt::set( self.txn, genesis, attempt, &removed.iter().map(::G::to_bytes).collect(), ); if DkgLocallyCompleted::get(self.txn, genesis).is_none() { let Some(our_i) = self.spec.i(&removed, Ristretto::generator() * self.our_key.deref()) else { continue; }; // Since it wasn't completed, instruct the processor to start the next attempt let id = processor_messages::key_gen::KeyGenId { session: self.spec.set().session, attempt }; let params = frost::ThresholdParams::new(t, self.spec.n(&removed), our_i.start).unwrap(); let shares = u16::from(our_i.end) - u16::from(our_i.start); self .processors .send( self.spec.set().network, processor_messages::key_gen::CoordinatorMessage::GenerateKey { id, params, shares }, ) .await; } } Topic::DkgConfirmation => unreachable!(), Topic::SubstrateSign(inner_id) => { let id = processor_messages::coordinator::SubstrateSignId { session: self.spec.set().session, id: inner_id, attempt, }; match inner_id { SubstrateSignableId::CosigningSubstrateBlock(block) => { let block_number = SeraiBlockNumber::get(self.txn, block) .expect("couldn't get the block number for prior attempted cosign"); // Check if the cosigner has a signature from our set for this block/a newer one let latest_cosign = crate::cosign_evaluator::LatestCosign::get(self.txn, self.spec.set().network) .map_or(0, |cosign| cosign.block_number); if latest_cosign < block_number { // Instruct the processor to start the next attempt self .processors .send( self.spec.set().network, processor_messages::coordinator::CoordinatorMessage::CosignSubstrateBlock { id, block_number, }, ) .await; } } SubstrateSignableId::Batch(batch) => { // If the Batch hasn't appeared on-chain... if BatchInstructionsHashDb::get(self.txn, self.spec.set().network, batch).is_none() { // Instruct the processor to start the next attempt // The processor won't continue if it's already signed a Batch // Prior checking if the Batch is on-chain just may reduce the non-participating // 33% from publishing their re-attempt messages self .processors .send( self.spec.set().network, processor_messages::coordinator::CoordinatorMessage::BatchReattempt { id }, ) .await; } } SubstrateSignableId::SlashReport => { // If this Tributary hasn't been retired... // (published SlashReport/took too long to do so) if crate::RetiredTributaryDb::get(self.txn, self.spec.set()).is_none() { let report = SlashReport::get(self.txn, self.spec.set()) .expect("re-attempting signing a SlashReport we don't have?"); self .processors .send( self.spec.set().network, processor_messages::coordinator::CoordinatorMessage::SignSlashReport { id, report, }, ) .await; } } } } Topic::Sign(id) => { // Instruct the processor to start the next attempt // If it has already noted a completion, it won't send a preprocess and will simply drop // the re-attempt message self .processors .send( self.spec.set().network, processor_messages::sign::CoordinatorMessage::Reattempt { id: processor_messages::sign::SignId { session: self.spec.set().session, id, attempt, }, }, ) .await; } } } if Some(u64::from(self.block_number)) == SlashReportCutOff::get(self.txn, genesis) { // Grab every slash report let mut all_reports = vec![]; for (i, (validator, _)) in self.spec.validators().into_iter().enumerate() { let Some(mut report) = SlashReports::get(self.txn, genesis, validator.to_bytes()) else { continue; }; // Assign them 0 points for themselves report.insert(i, 0); // Uses &[] as we only need the length which is independent to who else was removed let signer_i = self.spec.i(&[], validator).unwrap(); let signer_len = u16::from(signer_i.end) - u16::from(signer_i.start); // Push `n` copies, one for each of their shares for _ in 0 .. signer_len { all_reports.push(report.clone()); } } // For each participant, grab their median let mut medians = vec![]; for p in 0 .. self.spec.validators().len() { let mut median_calc = vec![]; for report in &all_reports { median_calc.push(report[p]); } median_calc.sort_unstable(); medians.push(median_calc[median_calc.len() / 2]); } // Grab the points of the last party within the best-performing threshold // This is done by first expanding the point values by the amount of shares let mut sorted_medians = vec![]; for (i, (_, shares)) in self.spec.validators().into_iter().enumerate() { for _ in 0 .. shares { sorted_medians.push(medians[i]); } } // Then performing the sort sorted_medians.sort_unstable(); let worst_points_by_party_within_threshold = sorted_medians[usize::from(self.spec.t()) - 1]; // Reduce everyone's points by this value for median in &mut medians { *median = median.saturating_sub(worst_points_by_party_within_threshold); } // The threshold now has the proper incentive to report this as they no longer suffer // negative effects // // Additionally, if all validators had degraded performance, they don't all get penalized for // what's likely outside their control (as it occurred universally) // Mark everyone fatally slashed with u32::MAX for (i, (validator, _)) in self.spec.validators().into_iter().enumerate() { if FatallySlashed::get(self.txn, genesis, validator.to_bytes()).is_some() { medians[i] = u32::MAX; } } let mut report = vec![]; for (i, (validator, _)) in self.spec.validators().into_iter().enumerate() { if medians[i] != 0 { report.push((validator.to_bytes(), medians[i])); } } // This does lock in the report, meaning further slash point accumulations won't be reported // They still have value to be locally tracked due to local decisions made based off // accumulated slash reports SlashReport::set(self.txn, self.spec.set(), &report); // Start a signing protocol for this self .processors .send( self.spec.set().network, processor_messages::coordinator::CoordinatorMessage::SignSlashReport { id: SubstrateSignId { session: self.spec.set().session, id: SubstrateSignableId::SlashReport, attempt: 0, }, report, }, ) .await; } } } #[allow(clippy::too_many_arguments)] pub(crate) async fn handle_new_blocks< D: Db, Pro: Processors, PST: PublishSeraiTransaction, PTT: PTTTrait, RID: RIDTrait, P: P2p, >( db: &mut D, key: &Zeroizing<::F>, recognized_id: &RID, processors: &Pro, publish_serai_tx: &PST, publish_tributary_tx: &PTT, spec: &TributarySpec, tributary: &TributaryReader, ) { let genesis = tributary.genesis(); let mut last_block = LastHandledBlock::get(db, genesis).unwrap_or(genesis); let mut block_number = TributaryBlockNumber::get(db, last_block).unwrap_or(0); while let Some(next) = tributary.block_after(&last_block) { let block = tributary.block(&next).unwrap(); block_number += 1; // Make sure we have all of the provided transactions for this block for tx in &block.transactions { // Provided TXs will appear first in the Block, so we can break after we hit a non-Provided let TransactionKind::Provided(order) = tx.kind() else { break; }; // make sure we have all the provided txs in this block locally if !tributary.locally_provided_txs_in_block(&block.hash(), order) { return; } } let mut db_clone = db.clone(); let mut txn = db_clone.txn(); TributaryBlockNumber::set(&mut txn, next, &block_number); (TributaryBlockHandler { db, txn: &mut txn, spec, our_key: key, recognized_id, processors, publish_serai_tx, publish_tributary_tx, block, block_number, _p2p: PhantomData::

, }) .handle() .await; last_block = next; LastHandledBlock::set(&mut txn, genesis, &next); txn.commit(); } } pub(crate) async fn scan_tributaries_task< D: Db, Pro: Processors, P: P2p, RID: 'static + Send + Sync + Clone + RIDTrait, >( raw_db: D, key: Zeroizing<::F>, recognized_id: RID, processors: Pro, serai: Arc, mut tributary_event: broadcast::Receiver>, ) { log::info!("scanning tributaries"); loop { match tributary_event.recv().await { Ok(crate::TributaryEvent::NewTributary(crate::ActiveTributary { spec, tributary })) => { // For each Tributary, spawn a dedicated scanner task tokio::spawn({ let raw_db = raw_db.clone(); let key = key.clone(); let recognized_id = recognized_id.clone(); let processors = processors.clone(); let serai = serai.clone(); async move { let spec = &spec; let reader = tributary.reader(); let mut tributary_db = raw_db.clone(); loop { // Check if the set was retired, and if so, don't further operate if crate::db::RetiredTributaryDb::get(&raw_db, spec.set()).is_some() { break; } // Obtain the next block notification now to prevent obtaining it immediately after // the next block occurs let next_block_notification = tributary.next_block_notification().await; handle_new_blocks::<_, _, _, _, _, P>( &mut tributary_db, &key, &recognized_id, &processors, &*serai, &|tx: Transaction| { let tributary = tributary.clone(); async move { match tributary.add_transaction(tx.clone()).await { Ok(_) => {} // Can happen as this occurs on a distinct DB TXN Err(TransactionError::InvalidNonce) => { log::warn!( "publishing TX {tx:?} returned InvalidNonce. was it already added?" ) } Err(e) => panic!("created an invalid transaction: {e:?}"), } } }, spec, &reader, ) .await; // Run either when the notification fires, or every interval of block_time let _ = tokio::time::timeout( Duration::from_secs(tributary::Tributary::::block_time().into()), next_block_notification, ) .await; } } }); } // The above loop simply checks the DB every few seconds, voiding the need for this event Ok(crate::TributaryEvent::TributaryRetired(_)) => {} Err(broadcast::error::RecvError::Lagged(_)) => { panic!("scan_tributaries lagged to handle tributary_event") } Err(broadcast::error::RecvError::Closed) => panic!("tributary_event sender closed"), } } } ================================================ FILE: coordinator/src/tributary/signing_protocol.rs ================================================ /* A MuSig-based signing protocol executed with the validators' keys. This is used for confirming the results of a DKG on-chain, an operation requiring all validators which aren't specified as removed while still satisfying a supermajority. Since we're using the validator's keys, as needed for their being the root of trust, the coordinator must perform the signing. This is distinct from all other group-signing operations, as they're all done by the processor. The MuSig-aggregation achieves on-chain efficiency and enables a more secure design pattern. While we could individually tack votes, that'd require logic to prevent voting multiple times and tracking the accumulated votes. MuSig-aggregation simply requires checking the list is sorted and the list's weight exceeds the threshold. Instead of maintaining state in memory, a combination of the DB and re-execution are used. This is deemed acceptable re: performance as: 1) This is only done prior to a DKG being confirmed on Substrate and is assumed infrequent. 2) This is an O(n) algorithm. 3) The size of the validator set is bounded by MAX_KEY_SHARES_PER_SET. Accordingly, this should be tolerable. As for safety, it is explicitly unsafe to reuse nonces across signing sessions. This raises concerns regarding our re-execution which is dependent on fixed nonces. Safety is derived from the nonces being context-bound under a BFT protocol. The flow is as follows: 1) Decide the nonce. 2) Publish the nonces' commitments, receiving everyone elses *and potentially the message to be signed*. 3) Sign and publish the signature share. In order for nonce re-use to occur, the received nonce commitments (or the message to be signed) would have to be distinct and sign would have to be called again. Before we act on any received messages, they're ordered and finalized by a BFT algorithm. The only way to operate on distinct received messages would be if: 1) A logical flaw exists, letting new messages over write prior messages 2) A reorganization occurred from chain A to chain B, and with it, different messages Reorganizations are not supported, as BFT is assumed by the presence of a BFT algorithm. While a significant amount of processes may be byzantine, leading to BFT being broken, that still will not trigger a reorganization. The only way to move to a distinct chain, with distinct messages, would be by rebuilding the local process (this time following chain B). Upon any complete rebuild, we'd re-decide nonces, achieving safety. This does set a bound preventing partial rebuilds which is accepted. Additionally, to ensure a rebuilt service isn't flagged as malicious, we have to check the commitments generated from the decided nonces are in fact its commitments on-chain (TODO). TODO: We also need to review how we're handling Processor preprocesses and likely implement the same on-chain-preprocess-matches-presumed-preprocess check before publishing shares. */ use core::ops::Deref; use std::collections::HashMap; use zeroize::{Zeroize, Zeroizing}; use rand_core::OsRng; use blake2::{Digest, Blake2s256}; use dalek_ff_group::Ristretto; use ciphersuite::{ group::{ff::PrimeField, GroupEncoding}, Ciphersuite, }; use dkg_musig::musig; use frost::{FrostError, dkg::Participant, ThresholdKeys, sign::*}; use frost_schnorrkel::Schnorrkel; use scale::Encode; use serai_client::{ Public, validator_sets::primitives::{KeyPair, musig_context, set_keys_message}, }; use serai_db::*; use crate::tributary::TributarySpec; create_db!( SigningProtocolDb { CachedPreprocesses: (context: &impl Encode) -> [u8; 32] } ); struct SigningProtocol<'a, T: DbTxn, C: Encode> { pub(crate) key: &'a Zeroizing<::F>, pub(crate) spec: &'a TributarySpec, pub(crate) txn: &'a mut T, pub(crate) context: C, } impl SigningProtocol<'_, T, C> { fn preprocess_internal( &mut self, participants: &[::G], ) -> (AlgorithmSignMachine, [u8; 64]) { // Encrypt the cached preprocess as recovery of it will enable recovering the private key // While the DB isn't expected to be arbitrarily readable, it isn't a proper secret store and // shouldn't be trusted as one let mut encryption_key = { let mut encryption_key_preimage = Zeroizing::new(b"Cached Preprocess Encryption Key".to_vec()); encryption_key_preimage.extend(self.context.encode()); let repr = Zeroizing::new(self.key.to_repr()); encryption_key_preimage.extend(repr.deref()); Blake2s256::digest(&encryption_key_preimage) }; let encryption_key_slice: &mut [u8] = encryption_key.as_mut(); let algorithm = Schnorrkel::new(b"substrate"); let keys: ThresholdKeys = musig(musig_context(self.spec.set().into()), self.key.clone(), participants) .expect("signing for a set we aren't in/validator present multiple times") .into(); if CachedPreprocesses::get(self.txn, &self.context).is_none() { let (machine, _) = AlgorithmMachine::new(algorithm.clone(), keys.clone()).preprocess(&mut OsRng); let mut cache = machine.cache(); assert_eq!(cache.0.len(), 32); #[allow(clippy::needless_range_loop)] for b in 0 .. 32 { cache.0[b] ^= encryption_key_slice[b]; } CachedPreprocesses::set(self.txn, &self.context, &cache.0); } let cached = CachedPreprocesses::get(self.txn, &self.context).unwrap(); let mut cached: Zeroizing<[u8; 32]> = Zeroizing::new(cached); #[allow(clippy::needless_range_loop)] for b in 0 .. 32 { cached[b] ^= encryption_key_slice[b]; } encryption_key_slice.zeroize(); let (machine, preprocess) = AlgorithmSignMachine::from_cache(algorithm, keys, CachedPreprocess(cached)); (machine, preprocess.serialize().try_into().unwrap()) } fn share_internal( &mut self, participants: &[::G], mut serialized_preprocesses: HashMap>, msg: &[u8], ) -> Result<(AlgorithmSignatureMachine, [u8; 32]), Participant> { let machine = self.preprocess_internal(participants).0; let mut participants = serialized_preprocesses.keys().copied().collect::>(); participants.sort(); let mut preprocesses = HashMap::new(); for participant in participants { preprocesses.insert( participant, machine .read_preprocess(&mut serialized_preprocesses.remove(&participant).unwrap().as_slice()) .map_err(|_| participant)?, ); } let (machine, share) = machine.sign(preprocesses, msg).map_err(|e| match e { FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"), FrostError::InvalidParticipant(_, _) | FrostError::InvalidSigningSet(_) | FrostError::InvalidParticipantQuantity(_, _) | FrostError::DuplicatedParticipant(_) | FrostError::MissingParticipant(_) => unreachable!("{e:?}"), FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p, })?; Ok((machine, share.serialize().try_into().unwrap())) } fn complete_internal( machine: AlgorithmSignatureMachine, shares: HashMap>, ) -> Result<[u8; 64], Participant> { let shares = shares .into_iter() .map(|(p, share)| { machine.read_share(&mut share.as_slice()).map(|share| (p, share)).map_err(|_| p) }) .collect::, _>>()?; let signature = machine.complete(shares).map_err(|e| match e { FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"), FrostError::InvalidParticipant(_, _) | FrostError::InvalidSigningSet(_) | FrostError::InvalidParticipantQuantity(_, _) | FrostError::DuplicatedParticipant(_) | FrostError::MissingParticipant(_) => unreachable!("{e:?}"), FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p, })?; Ok(signature.to_bytes()) } } // Get the keys of the participants, noted by their threshold is, and return a new map indexed by // the MuSig is. fn threshold_i_map_to_keys_and_musig_i_map( spec: &TributarySpec, removed: &[::G], our_key: &Zeroizing<::F>, mut map: HashMap>, ) -> (Vec<::G>, HashMap>) { // Insert our own index so calculations aren't offset let our_threshold_i = spec .i(removed, ::generator() * our_key.deref()) .expect("MuSig t-of-n signing a for a protocol we were removed from") .start; assert!(map.insert(our_threshold_i, vec![]).is_none()); let spec_validators = spec.validators(); let key_from_threshold_i = |threshold_i| { for (key, _) in &spec_validators { if threshold_i == spec.i(removed, *key).expect("MuSig t-of-n participant was removed").start { return *key; } } panic!("requested info for threshold i which doesn't exist") }; let mut sorted = vec![]; let mut threshold_is = map.keys().copied().collect::>(); threshold_is.sort(); for threshold_i in threshold_is { sorted.push((key_from_threshold_i(threshold_i), map.remove(&threshold_i).unwrap())); } // Now that signers are sorted, with their shares, create a map with the is needed for MuSig let mut participants = vec![]; let mut map = HashMap::new(); for (raw_i, (key, share)) in sorted.into_iter().enumerate() { let musig_i = u16::try_from(raw_i).unwrap() + 1; participants.push(key); map.insert(Participant::new(musig_i).unwrap(), share); } map.remove(&our_threshold_i).unwrap(); (participants, map) } type DkgConfirmerSigningProtocol<'a, T> = SigningProtocol<'a, T, (&'static [u8; 12], u32)>; pub(crate) struct DkgConfirmer<'a, T: DbTxn> { key: &'a Zeroizing<::F>, spec: &'a TributarySpec, removed: Vec<::G>, txn: &'a mut T, attempt: u32, } impl DkgConfirmer<'_, T> { pub(crate) fn new<'a>( key: &'a Zeroizing<::F>, spec: &'a TributarySpec, txn: &'a mut T, attempt: u32, ) -> Option> { // This relies on how confirmations are inlined into the DKG protocol and they accordingly // share attempts let removed = crate::tributary::removed_as_of_dkg_attempt(txn, spec.genesis(), attempt)?; Some(DkgConfirmer { key, spec, removed, txn, attempt }) } fn signing_protocol(&mut self) -> DkgConfirmerSigningProtocol<'_, T> { let context = (b"DkgConfirmer", self.attempt); SigningProtocol { key: self.key, spec: self.spec, txn: self.txn, context } } fn preprocess_internal(&mut self) -> (AlgorithmSignMachine, [u8; 64]) { let participants = self.spec.validators().iter().map(|val| val.0).collect::>(); self.signing_protocol().preprocess_internal(&participants) } // Get the preprocess for this confirmation. pub(crate) fn preprocess(&mut self) -> [u8; 64] { self.preprocess_internal().1 } fn share_internal( &mut self, preprocesses: HashMap>, key_pair: &KeyPair, ) -> Result<(AlgorithmSignatureMachine, [u8; 32]), Participant> { let participants = self.spec.validators().iter().map(|val| val.0).collect::>(); let preprocesses = threshold_i_map_to_keys_and_musig_i_map(self.spec, &self.removed, self.key, preprocesses).1; let msg = set_keys_message( &self.spec.set(), &self.removed.iter().map(|key| Public::from(key.to_bytes())).collect::>(), key_pair, ); self.signing_protocol().share_internal(&participants, preprocesses, &msg) } // Get the share for this confirmation, if the preprocesses are valid. pub(crate) fn share( &mut self, preprocesses: HashMap>, key_pair: &KeyPair, ) -> Result<[u8; 32], Participant> { self.share_internal(preprocesses, key_pair).map(|(_, share)| share) } pub(crate) fn complete( &mut self, preprocesses: HashMap>, key_pair: &KeyPair, shares: HashMap>, ) -> Result<[u8; 64], Participant> { let shares = threshold_i_map_to_keys_and_musig_i_map(self.spec, &self.removed, self.key, shares).1; let machine = self .share_internal(preprocesses, key_pair) .expect("trying to complete a machine which failed to preprocess") .0; DkgConfirmerSigningProtocol::<'_, T>::complete_internal(machine, shares) } } ================================================ FILE: coordinator/src/tributary/spec.rs ================================================ use core::{ops::Range, fmt::Debug}; use std::{io, collections::HashMap}; use transcript::{Transcript, RecommendedTranscript}; use dalek_ff_group::Ristretto; use ciphersuite::{group::GroupEncoding, Ciphersuite}; use frost::Participant; use scale::Encode; use borsh::{BorshSerialize, BorshDeserialize}; use serai_client::{primitives::PublicKey, validator_sets::primitives::ExternalValidatorSet}; fn borsh_serialize_validators( validators: &Vec<(::G, u16)>, writer: &mut W, ) -> Result<(), io::Error> { let len = u16::try_from(validators.len()).unwrap(); BorshSerialize::serialize(&len, writer)?; for validator in validators { BorshSerialize::serialize(&validator.0.to_bytes(), writer)?; BorshSerialize::serialize(&validator.1, writer)?; } Ok(()) } fn borsh_deserialize_validators( reader: &mut R, ) -> Result::G, u16)>, io::Error> { let len: u16 = BorshDeserialize::deserialize_reader(reader)?; let mut res = vec![]; for _ in 0 .. len { let compressed: [u8; 32] = BorshDeserialize::deserialize_reader(reader)?; let point = Option::from(::G::from_bytes(&compressed)) .ok_or_else(|| io::Error::other("invalid point for validator"))?; let weight: u16 = BorshDeserialize::deserialize_reader(reader)?; res.push((point, weight)); } Ok(res) } #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub struct TributarySpec { serai_block: [u8; 32], start_time: u64, set: ExternalValidatorSet, #[borsh( serialize_with = "borsh_serialize_validators", deserialize_with = "borsh_deserialize_validators" )] validators: Vec<(::G, u16)>, } impl TributarySpec { pub fn new( serai_block: [u8; 32], start_time: u64, set: ExternalValidatorSet, set_participants: Vec<(PublicKey, u16)>, ) -> TributarySpec { let mut validators = vec![]; for (participant, shares) in set_participants { let participant = ::read_G::<&[u8]>(&mut participant.0.as_ref()) .expect("invalid key registered as participant"); validators.push((participant, shares)); } Self { serai_block, start_time, set, validators } } pub fn set(&self) -> ExternalValidatorSet { self.set } pub fn genesis(&self) -> [u8; 32] { // Calculate the genesis for this Tributary let mut genesis = RecommendedTranscript::new(b"Serai Tributary Genesis"); // This locks it to a specific Serai chain genesis.append_message(b"serai_block", self.serai_block); genesis.append_message(b"session", self.set.session.0.to_le_bytes()); genesis.append_message(b"network", self.set.network.encode()); let genesis = genesis.challenge(b"genesis"); let genesis_ref: &[u8] = genesis.as_ref(); genesis_ref[.. 32].try_into().unwrap() } pub fn start_time(&self) -> u64 { self.start_time } pub fn n(&self, removed_validators: &[::G]) -> u16 { self .validators .iter() .map(|(validator, weight)| if removed_validators.contains(validator) { 0 } else { *weight }) .sum() } pub fn t(&self) -> u16 { // t doesn't change with regards to the amount of removed validators ((2 * self.n(&[])) / 3) + 1 } pub fn i( &self, removed_validators: &[::G], key: ::G, ) -> Option> { let mut all_is = HashMap::new(); let mut i = 1; for (validator, weight) in &self.validators { all_is.insert( *validator, Range { start: Participant::new(i).unwrap(), end: Participant::new(i + weight).unwrap() }, ); i += weight; } let original_i = all_is.get(&key)?.clone(); let mut result_i = original_i.clone(); for removed_validator in removed_validators { let removed_i = all_is .get(removed_validator) .expect("removed validator wasn't present in set to begin with"); // If the queried key was removed, return None if &original_i == removed_i { return None; } // If the removed was before the queried, shift the queried down accordingly if removed_i.start < original_i.start { let removed_shares = u16::from(removed_i.end) - u16::from(removed_i.start); result_i.start = Participant::new(u16::from(original_i.start) - removed_shares).unwrap(); result_i.end = Participant::new(u16::from(original_i.end) - removed_shares).unwrap(); } } Some(result_i) } pub fn reverse_lookup_i( &self, removed_validators: &[::G], i: Participant, ) -> Option<::G> { for (validator, _) in &self.validators { if self.i(removed_validators, *validator).map_or(false, |range| range.contains(&i)) { return Some(*validator); } } None } pub fn validators(&self) -> Vec<(::G, u64)> { self.validators.iter().map(|(validator, weight)| (*validator, u64::from(*weight))).collect() } } ================================================ FILE: coordinator/src/tributary/transaction.rs ================================================ use core::{ops::Deref, fmt::Debug}; use std::io; use zeroize::Zeroizing; use rand_core::{RngCore, CryptoRng}; use blake2::{Digest, Blake2s256}; use transcript::{Transcript, RecommendedTranscript}; use dalek_ff_group::Ristretto; use ciphersuite::{ group::{ff::Field, GroupEncoding}, Ciphersuite, }; use schnorr::SchnorrSignature; use frost::Participant; use scale::{Encode, Decode}; use processor_messages::coordinator::SubstrateSignableId; use tributary::{ TRANSACTION_SIZE_LIMIT, ReadWrite, transaction::{Signed, TransactionError, TransactionKind, Transaction as TransactionTrait}, }; #[derive(Clone, Copy, PartialEq, Eq, Debug, Encode)] pub enum Label { Preprocess, Share, } impl Label { // TODO: Should nonces be u8 thanks to our use of topics? pub fn nonce(&self) -> u32 { match self { Label::Preprocess => 0, Label::Share => 1, } } } #[derive(Clone, PartialEq, Eq)] pub struct SignData { pub plan: Id, pub attempt: u32, pub label: Label, pub data: Vec>, pub signed: Signed, } impl Debug for SignData { fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { fmt .debug_struct("SignData") .field("id", &hex::encode(self.plan.encode())) .field("attempt", &self.attempt) .field("label", &self.label) .field("signer", &hex::encode(self.signed.signer.to_bytes())) .finish_non_exhaustive() } } impl SignData { pub(crate) fn read(reader: &mut R) -> io::Result { let plan = Id::decode(&mut scale::IoReader(&mut *reader)) .map_err(|_| io::Error::other("invalid plan in SignData"))?; let mut attempt = [0; 4]; reader.read_exact(&mut attempt)?; let attempt = u32::from_le_bytes(attempt); let mut label = [0; 1]; reader.read_exact(&mut label)?; let label = match label[0] { 0 => Label::Preprocess, 1 => Label::Share, _ => Err(io::Error::other("invalid label in SignData"))?, }; let data = { let mut data_pieces = [0]; reader.read_exact(&mut data_pieces)?; if data_pieces[0] == 0 { Err(io::Error::other("zero pieces of data in SignData"))?; } let mut all_data = vec![]; for _ in 0 .. data_pieces[0] { let mut data_len = [0; 2]; reader.read_exact(&mut data_len)?; let mut data = vec![0; usize::from(u16::from_le_bytes(data_len))]; reader.read_exact(&mut data)?; all_data.push(data); } all_data }; let signed = Signed::read_without_nonce(reader, label.nonce())?; Ok(SignData { plan, attempt, label, data, signed }) } pub(crate) fn write(&self, writer: &mut W) -> io::Result<()> { writer.write_all(&self.plan.encode())?; writer.write_all(&self.attempt.to_le_bytes())?; writer.write_all(&[match self.label { Label::Preprocess => 0, Label::Share => 1, }])?; writer.write_all(&[u8::try_from(self.data.len()).unwrap()])?; for data in &self.data { if data.len() > u16::MAX.into() { // Currently, the largest individual preprocess is a Monero transaction // It provides 4 commitments per input (128 bytes), a 64-byte proof for them, along with a // key image and proof (96 bytes) // Even with all of that, we could support 227 inputs in a single TX // Monero is limited to ~120 inputs per TX // // Bitcoin has a much higher input count of 520, yet it only uses 64 bytes per preprocess Err(io::Error::other("signing data exceeded 65535 bytes"))?; } writer.write_all(&u16::try_from(data.len()).unwrap().to_le_bytes())?; writer.write_all(data)?; } self.signed.write_without_nonce(writer) } } #[derive(Clone, PartialEq, Eq)] pub enum Transaction { RemoveParticipantDueToDkg { participant: ::G, signed: Signed, }, DkgCommitments { attempt: u32, commitments: Vec>, signed: Signed, }, DkgShares { attempt: u32, // Sending Participant, Receiving Participant, Share shares: Vec>>, confirmation_nonces: [u8; 64], signed: Signed, }, InvalidDkgShare { attempt: u32, accuser: Participant, faulty: Participant, blame: Option>, signed: Signed, }, DkgConfirmed { attempt: u32, confirmation_share: [u8; 32], signed: Signed, }, // Co-sign a Substrate block. CosignSubstrateBlock([u8; 32]), // When we have synchrony on a batch, we can allow signing it // TODO (never?): This is less efficient compared to an ExternalBlock provided transaction, // which would be binding over the block hash and automatically achieve synchrony on all // relevant batches. ExternalBlock was removed for this due to complexity around the pipeline // with the current processor, yet it would still be an improvement. Batch { block: [u8; 32], batch: u32, }, // When a Serai block is finalized, with the contained batches, we can allow the associated plan // IDs SubstrateBlock(u64), SubstrateSign(SignData), Sign(SignData<[u8; 32]>), // This is defined as an Unsigned transaction in order to de-duplicate SignCompleted amongst // reporters (who should all report the same thing) // We do still track the signer in order to prevent a single signer from publishing arbitrarily // many TXs without penalty // Here, they're denoted as the first_signer, as only the signer of the first TX to be included // with this pairing will be remembered on-chain SignCompleted { plan: [u8; 32], tx_hash: Vec, first_signer: ::G, signature: SchnorrSignature, }, SlashReport(Vec, Signed), } impl Debug for Transaction { fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { match self { Transaction::RemoveParticipantDueToDkg { participant, signed } => fmt .debug_struct("Transaction::RemoveParticipantDueToDkg") .field("participant", &hex::encode(participant.to_bytes())) .field("signer", &hex::encode(signed.signer.to_bytes())) .finish_non_exhaustive(), Transaction::DkgCommitments { attempt, commitments: _, signed } => fmt .debug_struct("Transaction::DkgCommitments") .field("attempt", attempt) .field("signer", &hex::encode(signed.signer.to_bytes())) .finish_non_exhaustive(), Transaction::DkgShares { attempt, signed, .. } => fmt .debug_struct("Transaction::DkgShares") .field("attempt", attempt) .field("signer", &hex::encode(signed.signer.to_bytes())) .finish_non_exhaustive(), Transaction::InvalidDkgShare { attempt, accuser, faulty, .. } => fmt .debug_struct("Transaction::InvalidDkgShare") .field("attempt", attempt) .field("accuser", accuser) .field("faulty", faulty) .finish_non_exhaustive(), Transaction::DkgConfirmed { attempt, confirmation_share: _, signed } => fmt .debug_struct("Transaction::DkgConfirmed") .field("attempt", attempt) .field("signer", &hex::encode(signed.signer.to_bytes())) .finish_non_exhaustive(), Transaction::CosignSubstrateBlock(block) => fmt .debug_struct("Transaction::CosignSubstrateBlock") .field("block", &hex::encode(block)) .finish(), Transaction::Batch { block, batch } => fmt .debug_struct("Transaction::Batch") .field("block", &hex::encode(block)) .field("batch", &batch) .finish(), Transaction::SubstrateBlock(block) => { fmt.debug_struct("Transaction::SubstrateBlock").field("block", block).finish() } Transaction::SubstrateSign(sign_data) => { fmt.debug_struct("Transaction::SubstrateSign").field("sign_data", sign_data).finish() } Transaction::Sign(sign_data) => { fmt.debug_struct("Transaction::Sign").field("sign_data", sign_data).finish() } Transaction::SignCompleted { plan, tx_hash, .. } => fmt .debug_struct("Transaction::SignCompleted") .field("plan", &hex::encode(plan)) .field("tx_hash", &hex::encode(tx_hash)) .finish_non_exhaustive(), Transaction::SlashReport(points, signed) => fmt .debug_struct("Transaction::SignCompleted") .field("points", points) .field("signed", signed) .finish(), } } } impl ReadWrite for Transaction { fn read(reader: &mut R) -> io::Result { let mut kind = [0]; reader.read_exact(&mut kind)?; match kind[0] { 0 => Ok(Transaction::RemoveParticipantDueToDkg { participant: Ristretto::read_G(reader)?, signed: Signed::read_without_nonce(reader, 0)?, }), 1 => { let mut attempt = [0; 4]; reader.read_exact(&mut attempt)?; let attempt = u32::from_le_bytes(attempt); let commitments = { let mut commitments_len = [0; 1]; reader.read_exact(&mut commitments_len)?; let commitments_len = usize::from(commitments_len[0]); if commitments_len == 0 { Err(io::Error::other("zero commitments in DkgCommitments"))?; } let mut each_commitments_len = [0; 2]; reader.read_exact(&mut each_commitments_len)?; let each_commitments_len = usize::from(u16::from_le_bytes(each_commitments_len)); if (commitments_len * each_commitments_len) > TRANSACTION_SIZE_LIMIT { Err(io::Error::other( "commitments present in transaction exceeded transaction size limit", ))?; } let mut commitments = vec![vec![]; commitments_len]; for commitments in &mut commitments { *commitments = vec![0; each_commitments_len]; reader.read_exact(commitments)?; } commitments }; let signed = Signed::read_without_nonce(reader, 0)?; Ok(Transaction::DkgCommitments { attempt, commitments, signed }) } 2 => { let mut attempt = [0; 4]; reader.read_exact(&mut attempt)?; let attempt = u32::from_le_bytes(attempt); let shares = { let mut share_quantity = [0; 1]; reader.read_exact(&mut share_quantity)?; let mut key_share_quantity = [0; 1]; reader.read_exact(&mut key_share_quantity)?; let mut share_len = [0; 2]; reader.read_exact(&mut share_len)?; let share_len = usize::from(u16::from_le_bytes(share_len)); let mut all_shares = vec![]; for _ in 0 .. share_quantity[0] { let mut shares = vec![]; for _ in 0 .. key_share_quantity[0] { let mut share = vec![0; share_len]; reader.read_exact(&mut share)?; shares.push(share); } all_shares.push(shares); } all_shares }; let mut confirmation_nonces = [0; 64]; reader.read_exact(&mut confirmation_nonces)?; let signed = Signed::read_without_nonce(reader, 1)?; Ok(Transaction::DkgShares { attempt, shares, confirmation_nonces, signed }) } 3 => { let mut attempt = [0; 4]; reader.read_exact(&mut attempt)?; let attempt = u32::from_le_bytes(attempt); let mut accuser = [0; 2]; reader.read_exact(&mut accuser)?; let accuser = Participant::new(u16::from_le_bytes(accuser)) .ok_or_else(|| io::Error::other("invalid participant in InvalidDkgShare"))?; let mut faulty = [0; 2]; reader.read_exact(&mut faulty)?; let faulty = Participant::new(u16::from_le_bytes(faulty)) .ok_or_else(|| io::Error::other("invalid participant in InvalidDkgShare"))?; let mut blame_len = [0; 2]; reader.read_exact(&mut blame_len)?; let mut blame = vec![0; u16::from_le_bytes(blame_len).into()]; reader.read_exact(&mut blame)?; // This shares a nonce with DkgConfirmed as only one is expected let signed = Signed::read_without_nonce(reader, 2)?; Ok(Transaction::InvalidDkgShare { attempt, accuser, faulty, blame: Some(blame).filter(|blame| !blame.is_empty()), signed, }) } 4 => { let mut attempt = [0; 4]; reader.read_exact(&mut attempt)?; let attempt = u32::from_le_bytes(attempt); let mut confirmation_share = [0; 32]; reader.read_exact(&mut confirmation_share)?; let signed = Signed::read_without_nonce(reader, 2)?; Ok(Transaction::DkgConfirmed { attempt, confirmation_share, signed }) } 5 => { let mut block = [0; 32]; reader.read_exact(&mut block)?; Ok(Transaction::CosignSubstrateBlock(block)) } 6 => { let mut block = [0; 32]; reader.read_exact(&mut block)?; let mut batch = [0; 4]; reader.read_exact(&mut batch)?; Ok(Transaction::Batch { block, batch: u32::from_le_bytes(batch) }) } 7 => { let mut block = [0; 8]; reader.read_exact(&mut block)?; Ok(Transaction::SubstrateBlock(u64::from_le_bytes(block))) } 8 => SignData::read(reader).map(Transaction::SubstrateSign), 9 => SignData::read(reader).map(Transaction::Sign), 10 => { let mut plan = [0; 32]; reader.read_exact(&mut plan)?; let mut tx_hash_len = [0]; reader.read_exact(&mut tx_hash_len)?; let mut tx_hash = vec![0; usize::from(tx_hash_len[0])]; reader.read_exact(&mut tx_hash)?; let first_signer = Ristretto::read_G(reader)?; let signature = SchnorrSignature::::read(reader)?; Ok(Transaction::SignCompleted { plan, tx_hash, first_signer, signature }) } 11 => { let mut len = [0]; reader.read_exact(&mut len)?; let len = len[0]; // If the set has as many validators as MAX_KEY_SHARES_PER_SET, then the amount of distinct // validators (the amount of validators reported on) will be at most // `MAX_KEY_SHARES_PER_SET - 1` if u32::from(len) > (serai_client::validator_sets::primitives::MAX_KEY_SHARES_PER_SET - 1) { Err(io::Error::other("more points reported than allowed validator"))?; } let mut points = vec![0u32; len.into()]; for points in &mut points { let mut these_points = [0; 4]; reader.read_exact(&mut these_points)?; *points = u32::from_le_bytes(these_points); } Ok(Transaction::SlashReport(points, Signed::read_without_nonce(reader, 0)?)) } _ => Err(io::Error::other("invalid transaction type")), } } fn write(&self, writer: &mut W) -> io::Result<()> { match self { Transaction::RemoveParticipantDueToDkg { participant, signed } => { writer.write_all(&[0])?; writer.write_all(&participant.to_bytes())?; signed.write_without_nonce(writer) } Transaction::DkgCommitments { attempt, commitments, signed } => { writer.write_all(&[1])?; writer.write_all(&attempt.to_le_bytes())?; if commitments.is_empty() { Err(io::Error::other("zero commitments in DkgCommitments"))? } writer.write_all(&[u8::try_from(commitments.len()).unwrap()])?; for commitments_i in commitments { if commitments_i.len() != commitments[0].len() { Err(io::Error::other("commitments of differing sizes in DkgCommitments"))? } } writer.write_all(&u16::try_from(commitments[0].len()).unwrap().to_le_bytes())?; for commitments in commitments { writer.write_all(commitments)?; } signed.write_without_nonce(writer) } Transaction::DkgShares { attempt, shares, confirmation_nonces, signed } => { writer.write_all(&[2])?; writer.write_all(&attempt.to_le_bytes())?; // `shares` is a Vec which is supposed to map to a HashMap>. Since we // bound participants to 150, this conversion is safe if a valid in-memory transaction. writer.write_all(&[u8::try_from(shares.len()).unwrap()])?; // This assumes at least one share is being sent to another party writer.write_all(&[u8::try_from(shares[0].len()).unwrap()])?; let share_len = shares[0][0].len(); // For BLS12-381 G2, this would be: // - A 32-byte share // - A 96-byte ephemeral key // - A 128-byte signature // Hence why this has to be u16 writer.write_all(&u16::try_from(share_len).unwrap().to_le_bytes())?; for these_shares in shares { assert_eq!(these_shares.len(), shares[0].len(), "amount of sent shares was variable"); for share in these_shares { assert_eq!(share.len(), share_len, "sent shares were of variable length"); writer.write_all(share)?; } } writer.write_all(confirmation_nonces)?; signed.write_without_nonce(writer) } Transaction::InvalidDkgShare { attempt, accuser, faulty, blame, signed } => { writer.write_all(&[3])?; writer.write_all(&attempt.to_le_bytes())?; writer.write_all(&u16::from(*accuser).to_le_bytes())?; writer.write_all(&u16::from(*faulty).to_le_bytes())?; // Flattens Some(vec![]) to None on the expectation no actual blame will be 0-length assert!(blame.as_ref().map_or(1, Vec::len) != 0); let blame_len = u16::try_from(blame.as_ref().unwrap_or(&vec![]).len()).expect("blame exceeded 64 KB"); writer.write_all(&blame_len.to_le_bytes())?; writer.write_all(blame.as_ref().unwrap_or(&vec![]))?; signed.write_without_nonce(writer) } Transaction::DkgConfirmed { attempt, confirmation_share, signed } => { writer.write_all(&[4])?; writer.write_all(&attempt.to_le_bytes())?; writer.write_all(confirmation_share)?; signed.write_without_nonce(writer) } Transaction::CosignSubstrateBlock(block) => { writer.write_all(&[5])?; writer.write_all(block) } Transaction::Batch { block, batch } => { writer.write_all(&[6])?; writer.write_all(block)?; writer.write_all(&batch.to_le_bytes()) } Transaction::SubstrateBlock(block) => { writer.write_all(&[7])?; writer.write_all(&block.to_le_bytes()) } Transaction::SubstrateSign(data) => { writer.write_all(&[8])?; data.write(writer) } Transaction::Sign(data) => { writer.write_all(&[9])?; data.write(writer) } Transaction::SignCompleted { plan, tx_hash, first_signer, signature } => { writer.write_all(&[10])?; writer.write_all(plan)?; writer .write_all(&[u8::try_from(tx_hash.len()).expect("tx hash length exceed 255 bytes")])?; writer.write_all(tx_hash)?; writer.write_all(&first_signer.to_bytes())?; signature.write(writer) } Transaction::SlashReport(points, signed) => { writer.write_all(&[11])?; writer.write_all(&[u8::try_from(points.len()).unwrap()])?; for points in points { writer.write_all(&points.to_le_bytes())?; } signed.write_without_nonce(writer) } } } } impl TransactionTrait for Transaction { fn kind(&self) -> TransactionKind<'_> { match self { Transaction::RemoveParticipantDueToDkg { participant, signed } => { TransactionKind::Signed((b"remove", participant.to_bytes()).encode(), signed) } Transaction::DkgCommitments { attempt, commitments: _, signed } | Transaction::DkgShares { attempt, signed, .. } | Transaction::InvalidDkgShare { attempt, signed, .. } | Transaction::DkgConfirmed { attempt, signed, .. } => { TransactionKind::Signed((b"dkg", attempt).encode(), signed) } Transaction::CosignSubstrateBlock(_) => TransactionKind::Provided("cosign"), Transaction::Batch { .. } => TransactionKind::Provided("batch"), Transaction::SubstrateBlock(_) => TransactionKind::Provided("serai"), Transaction::SubstrateSign(data) => { TransactionKind::Signed((b"substrate", data.plan, data.attempt).encode(), &data.signed) } Transaction::Sign(data) => { TransactionKind::Signed((b"sign", data.plan, data.attempt).encode(), &data.signed) } Transaction::SignCompleted { .. } => TransactionKind::Unsigned, Transaction::SlashReport(_, signed) => { TransactionKind::Signed(b"slash_report".to_vec(), signed) } } } fn hash(&self) -> [u8; 32] { let mut tx = self.serialize(); if let TransactionKind::Signed(_, signed) = self.kind() { // Make sure the part we're cutting off is the signature assert_eq!(tx.drain((tx.len() - 64) ..).collect::>(), signed.signature.serialize()); } Blake2s256::digest([b"Coordinator Tributary Transaction".as_slice(), &tx].concat()).into() } fn verify(&self) -> Result<(), TransactionError> { // TODO: Check SubstrateSign's lengths here if let Transaction::SignCompleted { first_signer, signature, .. } = self { if !signature.verify(*first_signer, self.sign_completed_challenge()) { Err(TransactionError::InvalidContent)?; } } Ok(()) } } impl Transaction { // Used to initially construct transactions so we can then get sig hashes and perform signing pub fn empty_signed() -> Signed { Signed { signer: Ristretto::generator(), nonce: 0, signature: SchnorrSignature:: { R: Ristretto::generator(), s: ::F::ZERO, }, } } // Sign a transaction pub fn sign( &mut self, rng: &mut R, genesis: [u8; 32], key: &Zeroizing<::F>, ) { fn signed(tx: &mut Transaction) -> (u32, &mut Signed) { #[allow(clippy::match_same_arms)] // Doesn't make semantic sense here let nonce = match tx { Transaction::RemoveParticipantDueToDkg { .. } => 0, Transaction::DkgCommitments { .. } => 0, Transaction::DkgShares { .. } => 1, Transaction::InvalidDkgShare { .. } | Transaction::DkgConfirmed { .. } => 2, Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"), Transaction::Batch { .. } => panic!("signing Batch"), Transaction::SubstrateBlock(_) => panic!("signing SubstrateBlock"), Transaction::SubstrateSign(data) => data.label.nonce(), Transaction::Sign(data) => data.label.nonce(), Transaction::SignCompleted { .. } => panic!("signing SignCompleted"), Transaction::SlashReport(_, _) => 0, }; ( nonce, #[allow(clippy::match_same_arms)] match tx { Transaction::RemoveParticipantDueToDkg { ref mut signed, .. } | Transaction::DkgCommitments { ref mut signed, .. } | Transaction::DkgShares { ref mut signed, .. } | Transaction::InvalidDkgShare { ref mut signed, .. } | Transaction::DkgConfirmed { ref mut signed, .. } => signed, Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"), Transaction::Batch { .. } => panic!("signing Batch"), Transaction::SubstrateBlock(_) => panic!("signing SubstrateBlock"), Transaction::SubstrateSign(ref mut data) => &mut data.signed, Transaction::Sign(ref mut data) => &mut data.signed, Transaction::SignCompleted { .. } => panic!("signing SignCompleted"), Transaction::SlashReport(_, ref mut signed) => signed, }, ) } let (nonce, signed_ref) = signed(self); signed_ref.signer = Ristretto::generator() * key.deref(); signed_ref.nonce = nonce; let sig_nonce = Zeroizing::new(::F::random(rng)); signed(self).1.signature.R = ::generator() * sig_nonce.deref(); let sig_hash = self.sig_hash(genesis); signed(self).1.signature = SchnorrSignature::::sign(key, sig_nonce, sig_hash); } pub fn sign_completed_challenge(&self) -> ::F { if let Transaction::SignCompleted { plan, tx_hash, first_signer, signature } = self { let mut transcript = RecommendedTranscript::new(b"Coordinator Tributary Transaction SignCompleted"); transcript.append_message(b"plan", plan); transcript.append_message(b"tx_hash", tx_hash); transcript.append_message(b"signer", first_signer.to_bytes()); transcript.append_message(b"nonce", signature.R.to_bytes()); Ristretto::hash_to_F(b"SignCompleted signature", &transcript.challenge(b"challenge")) } else { panic!("sign_completed_challenge called on transaction which wasn't SignCompleted") } } } ================================================ FILE: coordinator/tributary/Cargo.toml ================================================ [package] name = "tributary-chain" version = "0.1.0" description = "A micro-blockchain to provide consensus and ordering to P2P communication" license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/tributary" authors = ["Luke Parker "] edition = "2021" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] async-trait = { version = "0.1", default-features = false } thiserror = { version = "1", default-features = false } subtle = { version = "^2", default-features = false, features = ["std"] } zeroize = { version = "^1.5", default-features = false, features = ["std"] } rand = { version = "0.8", default-features = false, features = ["std"] } rand_chacha = { version = "0.3", default-features = false, features = ["std"] } blake2 = { version = "0.10", default-features = false, features = ["std"] } transcript = { package = "flexible-transcript", path = "../../crypto/transcript", default-features = false, features = ["std", "recommended"] } dalek-ff-group = { path = "../../crypto/dalek-ff-group" } ciphersuite = { package = "ciphersuite", path = "../../crypto/ciphersuite", default-features = false, features = ["std"] } schnorr = { package = "schnorr-signatures", path = "../../crypto/schnorr", default-features = false, features = ["std"] } hex = { version = "0.4", default-features = false, features = ["std"] } log = { version = "0.4", default-features = false, features = ["std"] } serai-db = { path = "../../common/db" } scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] } futures-util = { version = "0.3", default-features = false, features = ["std", "sink", "channel"] } futures-channel = { version = "0.3", default-features = false, features = ["std", "sink"] } tendermint = { package = "tendermint-machine", path = "./tendermint" } tokio = { version = "1", default-features = false, features = ["sync", "time", "rt"] } [dev-dependencies] tokio = { version = "1", features = ["macros"] } [features] tests = [] ================================================ FILE: coordinator/tributary/LICENSE ================================================ AGPL-3.0-only license Copyright (c) 2023 Luke Parker This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License Version 3 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see . ================================================ FILE: coordinator/tributary/README.md ================================================ # Tributary A verifiable, ordered broadcast layer implemented as a BFT micro-blockchain. ================================================ FILE: coordinator/tributary/src/block.rs ================================================ use std::{ io, collections::{VecDeque, HashSet, HashMap}, }; use thiserror::Error; use blake2::{Digest, Blake2s256}; use tendermint::ext::{Network, Commit}; use crate::{ transaction::{ TransactionError, Signed, TransactionKind, Transaction as TransactionTrait, GAIN, verify_transaction, }, BLOCK_SIZE_LIMIT, ReadWrite, merkle, Transaction, tendermint::tx::verify_tendermint_tx, }; #[derive(Clone, PartialEq, Eq, Debug, Error)] pub enum BlockError { /// Block was too large. #[error("block exceeded size limit")] TooLargeBlock, /// Header specified a parent which wasn't the chain tip. #[error("header doesn't build off the chain tip")] InvalidParent, /// Header specified an invalid transactions merkle tree hash. #[error("header transactions hash is incorrect")] InvalidTransactions, /// An unsigned transaction which was already added to the chain was present again. #[error("an unsigned transaction which was already added to the chain was present again")] UnsignedAlreadyIncluded, /// A provided transaction which was already added to the chain was present again. #[error("an provided transaction which was already added to the chain was present again")] ProvidedAlreadyIncluded, /// Transactions weren't ordered as expected (Provided, followed by Unsigned, followed by Signed). #[error("transactions weren't ordered as expected (Provided, Unsigned, Signed)")] WrongTransactionOrder, /// The block had a provided transaction this validator has yet to be provided. #[error("block had a provided transaction not yet locally provided: {0:?}")] NonLocalProvided([u8; 32]), /// The provided transaction was distinct from the locally provided transaction. #[error("block had a distinct provided transaction")] DistinctProvided, /// An included transaction was invalid. #[error("included transaction had an error")] TransactionError(TransactionError), } #[derive(Clone, PartialEq, Eq, Debug)] pub struct BlockHeader { pub parent: [u8; 32], pub transactions: [u8; 32], } impl ReadWrite for BlockHeader { fn read(reader: &mut R) -> io::Result { let mut header = BlockHeader { parent: [0; 32], transactions: [0; 32] }; reader.read_exact(&mut header.parent)?; reader.read_exact(&mut header.transactions)?; Ok(header) } fn write(&self, writer: &mut W) -> io::Result<()> { writer.write_all(&self.parent)?; writer.write_all(&self.transactions) } } impl BlockHeader { pub fn hash(&self) -> [u8; 32] { Blake2s256::digest([b"tributary_block".as_ref(), &self.serialize()].concat()).into() } } #[derive(Clone, PartialEq, Eq, Debug)] pub struct Block { pub header: BlockHeader, pub transactions: Vec>, } impl ReadWrite for Block { fn read(reader: &mut R) -> io::Result { let header = BlockHeader::read(reader)?; let mut txs = [0; 4]; reader.read_exact(&mut txs)?; let txs = u32::from_le_bytes(txs); let mut transactions = Vec::with_capacity(usize::try_from(txs).unwrap()); for _ in 0 .. txs { transactions.push(Transaction::read(reader)?); } Ok(Block { header, transactions }) } fn write(&self, writer: &mut W) -> io::Result<()> { self.header.write(writer)?; writer.write_all(&u32::try_from(self.transactions.len()).unwrap().to_le_bytes())?; for tx in &self.transactions { tx.write(writer)?; } Ok(()) } } impl Block { /// Create a new block. /// /// mempool is expected to only have valid, non-conflicting transactions, sorted by nonce. pub(crate) fn new(parent: [u8; 32], provided: Vec, mempool: Vec>) -> Self { let mut txs = vec![]; for tx in provided { txs.push(Transaction::Application(tx)) } let mut signed = vec![]; let mut unsigned = vec![]; for tx in mempool { match tx.kind() { TransactionKind::Signed(_, _) => signed.push(tx), TransactionKind::Unsigned => unsigned.push(tx), TransactionKind::Provided(_) => panic!("provided transaction entered mempool"), } } // unsigned first txs.extend(unsigned); // then signed txs.extend(signed); // Check TXs are sorted by nonce. let nonce = |tx: &Transaction| { if let TransactionKind::Signed(_, Signed { nonce, .. }) = tx.kind() { *nonce } else { 0 } }; let mut last = 0; for tx in &txs { let nonce = nonce(tx); if nonce < last { panic!("TXs in mempool weren't ordered by nonce"); } last = nonce; } let mut res = Block { header: BlockHeader { parent, transactions: [0; 32] }, transactions: txs }; while res.serialize().len() > BLOCK_SIZE_LIMIT { assert!(res.transactions.pop().is_some()); } let hashes = res.transactions.iter().map(Transaction::hash).collect::>(); res.header.transactions = merkle(&hashes); res } pub fn parent(&self) -> [u8; 32] { self.header.parent } pub fn hash(&self) -> [u8; 32] { self.header.hash() } #[allow(clippy::too_many_arguments)] pub(crate) fn verify( &self, genesis: [u8; 32], last_block: [u8; 32], mut locally_provided: HashMap<&'static str, VecDeque>, get_and_increment_nonce: &mut G, schema: &N::SignatureScheme, commit: impl Fn(u64) -> Option>, provided_or_unsigned_in_chain: impl Fn([u8; 32]) -> bool, allow_non_local_provided: bool, ) -> Result<(), BlockError> { #[derive(Clone, Copy, PartialEq, Eq, Debug)] enum Order { Provided, Unsigned, Signed, } impl From for u8 { fn from(order: Order) -> u8 { match order { Order::Provided => 0, Order::Unsigned => 1, Order::Signed => 2, } } } if self.serialize().len() > BLOCK_SIZE_LIMIT { Err(BlockError::TooLargeBlock)?; } if self.header.parent != last_block { Err(BlockError::InvalidParent)?; } let mut last_tx_order = Order::Provided; let mut included_in_block = HashSet::new(); let mut txs = Vec::with_capacity(self.transactions.len()); for tx in &self.transactions { let tx_hash = tx.hash(); txs.push(tx_hash); let current_tx_order = match tx.kind() { TransactionKind::Provided(order) => { if provided_or_unsigned_in_chain(tx_hash) { Err(BlockError::ProvidedAlreadyIncluded)?; } if let Some(local) = locally_provided.get_mut(order).and_then(VecDeque::pop_front) { // Since this was a provided TX, it must be an application TX let Transaction::Application(tx) = tx else { Err(BlockError::NonLocalProvided(txs.pop().unwrap()))? }; if tx != &local { Err(BlockError::DistinctProvided)?; } } else if !allow_non_local_provided { Err(BlockError::NonLocalProvided(txs.pop().unwrap()))? }; Order::Provided } TransactionKind::Unsigned => { // check we don't already have the tx in the chain if provided_or_unsigned_in_chain(tx_hash) || included_in_block.contains(&tx_hash) { Err(BlockError::UnsignedAlreadyIncluded)?; } included_in_block.insert(tx_hash); Order::Unsigned } TransactionKind::Signed(..) => Order::Signed, }; // enforce Provided => Unsigned => Signed order if u8::from(current_tx_order) < u8::from(last_tx_order) { Err(BlockError::WrongTransactionOrder)?; } last_tx_order = current_tx_order; match tx { Transaction::Tendermint(tx) => match verify_tendermint_tx::(tx, schema, &commit) { Ok(()) => {} Err(e) => Err(BlockError::TransactionError(e))?, }, Transaction::Application(tx) => { match verify_transaction(tx, genesis, get_and_increment_nonce) { Ok(()) => {} Err(e) => Err(BlockError::TransactionError(e))?, } } } } if merkle(&txs) != self.header.transactions { Err(BlockError::InvalidTransactions)?; } Ok(()) } } ================================================ FILE: coordinator/tributary/src/blockchain.rs ================================================ use std::collections::{VecDeque, HashSet}; use dalek_ff_group::Ristretto; use ciphersuite::{group::GroupEncoding, Ciphersuite}; use serai_db::{Get, DbTxn, Db}; use scale::Decode; use tendermint::ext::{Network, Commit}; use crate::{ ReadWrite, ProvidedError, ProvidedTransactions, BlockError, Block, Mempool, Transaction, transaction::{Signed, TransactionKind, TransactionError, Transaction as TransactionTrait}, }; #[derive(Debug)] pub(crate) struct Blockchain { db: Option, genesis: [u8; 32], block_number: u64, tip: [u8; 32], participants: HashSet<::G>, provided: ProvidedTransactions, mempool: Mempool, pub(crate) next_block_notifications: VecDeque>, } impl Blockchain { fn tip_key(genesis: [u8; 32]) -> Vec { D::key(b"tributary_blockchain", b"tip", genesis) } fn block_number_key(&self) -> Vec { D::key(b"tributary_blockchain", b"block_number", self.genesis) } fn block_key(genesis: &[u8], hash: &[u8; 32]) -> Vec { D::key(b"tributary_blockchain", b"block", [genesis, hash].concat()) } fn block_hash_key(genesis: &[u8], block_number: u64) -> Vec { D::key(b"tributary_blockchain", b"block_hash", [genesis, &block_number.to_le_bytes()].concat()) } fn commit_key(genesis: &[u8], hash: &[u8; 32]) -> Vec { D::key(b"tributary_blockchain", b"commit", [genesis, hash].concat()) } fn block_after_key(genesis: &[u8], hash: &[u8; 32]) -> Vec { D::key(b"tributary_blockchain", b"block_after", [genesis, hash].concat()) } fn unsigned_included_key(genesis: &[u8], hash: &[u8; 32]) -> Vec { D::key(b"tributary_blockchain", b"unsigned_included", [genesis, hash].concat()) } fn provided_included_key(genesis: &[u8], hash: &[u8; 32]) -> Vec { D::key(b"tributary_blockchain", b"provided_included", [genesis, hash].concat()) } fn next_nonce_key( genesis: &[u8; 32], signer: &::G, order: &[u8], ) -> Vec { D::key( b"tributary_blockchain", b"next_nonce", [genesis.as_ref(), signer.to_bytes().as_ref(), order].concat(), ) } pub(crate) fn new( db: D, genesis: [u8; 32], participants: &[::G], ) -> Self { let mut res = Self { db: Some(db.clone()), genesis, participants: participants.iter().copied().collect(), block_number: 0, tip: genesis, provided: ProvidedTransactions::new(db.clone(), genesis), mempool: Mempool::new(db, genesis), next_block_notifications: VecDeque::new(), }; if let Some((block_number, tip)) = { let db = res.db.as_ref().unwrap(); db.get(res.block_number_key()).map(|number| (number, db.get(Self::tip_key(genesis)).unwrap())) } { res.block_number = u64::from_le_bytes(block_number.try_into().unwrap()); res.tip.copy_from_slice(&tip); } res } pub(crate) fn tip(&self) -> [u8; 32] { self.tip } pub(crate) fn block_number(&self) -> u64 { self.block_number } pub(crate) fn block_from_db(db: &D, genesis: [u8; 32], block: &[u8; 32]) -> Option> { db.get(Self::block_key(&genesis, block)) .map(|bytes| Block::::read::<&[u8]>(&mut bytes.as_ref()).unwrap()) } pub(crate) fn commit_from_db(db: &D, genesis: [u8; 32], block: &[u8; 32]) -> Option> { db.get(Self::commit_key(&genesis, block)) } pub(crate) fn block_hash_from_db(db: &D, genesis: [u8; 32], block: u64) -> Option<[u8; 32]> { db.get(Self::block_hash_key(&genesis, block)).map(|h| h.try_into().unwrap()) } pub(crate) fn commit(&self, block: &[u8; 32]) -> Option> { Self::commit_from_db(self.db.as_ref().unwrap(), self.genesis, block) } pub(crate) fn block_hash(&self, block: u64) -> Option<[u8; 32]> { Self::block_hash_from_db(self.db.as_ref().unwrap(), self.genesis, block) } pub(crate) fn commit_by_block_number(&self, block: u64) -> Option> { self.commit(&self.block_hash(block)?) } pub(crate) fn block_after(db: &D, genesis: [u8; 32], block: &[u8; 32]) -> Option<[u8; 32]> { db.get(Self::block_after_key(&genesis, block)).map(|bytes| bytes.try_into().unwrap()) } pub(crate) fn locally_provided_txs_in_block( db: &D, genesis: &[u8; 32], block: &[u8; 32], order: &str, ) -> bool { let local_key = ProvidedTransactions::::locally_provided_quantity_key(genesis, order); let local = db.get(local_key).map_or(0, |bytes| u32::from_le_bytes(bytes.try_into().unwrap())); let block_key = ProvidedTransactions::::block_provided_quantity_key(genesis, block, order); let block = db.get(block_key).map_or(0, |bytes| u32::from_le_bytes(bytes.try_into().unwrap())); local >= block } pub(crate) fn tip_from_db(db: &D, genesis: [u8; 32]) -> [u8; 32] { db.get(Self::tip_key(genesis)).map_or(genesis, |bytes| bytes.try_into().unwrap()) } pub(crate) fn add_transaction( &mut self, internal: bool, tx: Transaction, schema: &N::SignatureScheme, ) -> Result { let db = self.db.as_ref().unwrap(); let genesis = self.genesis; let commit = |block: u64| -> Option> { let hash = Self::block_hash_from_db(db, genesis, block)?; // we must have a commit per valid hash let commit = Self::commit_from_db(db, genesis, &hash).unwrap(); // commit has to be valid if it is coming from our db Some(Commit::::decode(&mut commit.as_ref()).unwrap()) }; let unsigned_in_chain = |hash: [u8; 32]| db.get(Self::unsigned_included_key(&self.genesis, &hash)).is_some(); self.mempool.add::( |signer, order| { if self.participants.contains(&signer) { Some( db.get(Self::next_nonce_key(&self.genesis, &signer, &order)) .map_or(0, |bytes| u32::from_le_bytes(bytes.try_into().unwrap())), ) } else { None } }, internal, tx, schema, unsigned_in_chain, commit, ) } pub(crate) fn provide_transaction(&mut self, tx: T) -> Result<(), ProvidedError> { self.provided.provide(tx) } pub(crate) fn next_nonce( &self, signer: &::G, order: &[u8], ) -> Option { if let Some(next_nonce) = self.mempool.next_nonce_in_mempool(signer, order.to_vec()) { return Some(next_nonce); } if self.participants.contains(signer) { Some( self .db .as_ref() .unwrap() .get(Self::next_nonce_key(&self.genesis, signer, order)) .map_or(0, |bytes| u32::from_le_bytes(bytes.try_into().unwrap())), ) } else { None } } pub(crate) fn build_block(&mut self, schema: &N::SignatureScheme) -> Block { let block = Block::new( self.tip, self.provided.transactions.values().flatten().cloned().collect(), self.mempool.block(), ); // build_block should not return invalid blocks self.verify_block::(&block, schema, false).unwrap(); block } pub(crate) fn verify_block( &self, block: &Block, schema: &N::SignatureScheme, allow_non_local_provided: bool, ) -> Result<(), BlockError> { let db = self.db.as_ref().unwrap(); let provided_or_unsigned_in_chain = |hash: [u8; 32]| { db.get(Self::unsigned_included_key(&self.genesis, &hash)).is_some() || db.get(Self::provided_included_key(&self.genesis, &hash)).is_some() }; let commit = |block: u64| -> Option> { let commit = self.commit_by_block_number(block)?; // commit has to be valid if it is coming from our db Some(Commit::::decode(&mut commit.as_ref()).unwrap()) }; let mut txn_db = db.clone(); let mut txn = txn_db.txn(); let res = block.verify::( self.genesis, self.tip, self.provided.transactions.clone(), &mut |signer, order| { if self.participants.contains(signer) { let key = Self::next_nonce_key(&self.genesis, signer, order); let next = txn .get(&key) .map_or(0, |next_nonce| u32::from_le_bytes(next_nonce.try_into().unwrap())); txn.put(key, (next + 1).to_le_bytes()); Some(next) } else { None } }, schema, &commit, provided_or_unsigned_in_chain, allow_non_local_provided, ); // Drop this TXN's changes as we're solely verifying the block drop(txn); res } /// Add a block. pub(crate) fn add_block( &mut self, block: &Block, commit: Vec, schema: &N::SignatureScheme, ) -> Result<(), BlockError> { self.verify_block::(block, schema, true)?; log::info!( "adding block {} to tributary {} with {} TXs", hex::encode(block.hash()), hex::encode(self.genesis), block.transactions.len(), ); // None of the following assertions should be reachable since we verified the block // Take it from the Option so Rust doesn't consider self as mutably borrowed thanks to the // existence of the txn let mut db = self.db.take().unwrap(); let mut txn = db.txn(); self.tip = block.hash(); txn.put(Self::tip_key(self.genesis), self.tip); self.block_number += 1; txn.put(self.block_number_key(), self.block_number.to_le_bytes()); txn.put(Self::block_hash_key(&self.genesis, self.block_number), self.tip); txn.put(Self::block_key(&self.genesis, &self.tip), block.serialize()); txn.put(Self::commit_key(&self.genesis, &self.tip), commit); txn.put(Self::block_after_key(&self.genesis, &block.parent()), block.hash()); for tx in &block.transactions { match tx.kind() { TransactionKind::Provided(order) => { let hash = tx.hash(); self.provided.complete(&mut txn, order, self.tip, hash); txn.put(Self::provided_included_key(&self.genesis, &hash), []); } TransactionKind::Unsigned => { let hash = tx.hash(); // Save as included on chain txn.put(Self::unsigned_included_key(&self.genesis, &hash), []); // remove from the mempool self.mempool.remove(&hash); } TransactionKind::Signed(order, Signed { signer, nonce, .. }) => { let next_nonce = nonce + 1; txn.put(Self::next_nonce_key(&self.genesis, signer, &order), next_nonce.to_le_bytes()); self.mempool.remove(&tx.hash()); } } } txn.commit(); self.db = Some(db); for tx in self.next_block_notifications.drain(..) { let _ = tx.send(()); } Ok(()) } } ================================================ FILE: coordinator/tributary/src/lib.rs ================================================ use core::{marker::PhantomData, fmt::Debug}; use std::{sync::Arc, io}; use async_trait::async_trait; use zeroize::Zeroizing; use dalek_ff_group::Ristretto; use ciphersuite::Ciphersuite; use scale::Decode; use futures_channel::mpsc::UnboundedReceiver; use futures_util::{StreamExt, SinkExt}; use ::tendermint::{ ext::{BlockNumber, Commit, Block as BlockTrait, Network}, SignedMessageFor, SyncedBlock, SyncedBlockSender, SyncedBlockResultReceiver, MessageSender, TendermintMachine, TendermintHandle, }; pub use ::tendermint::Evidence; use serai_db::Db; use tokio::sync::RwLock; mod merkle; pub(crate) use merkle::*; pub mod transaction; pub use transaction::{TransactionError, Signed, TransactionKind, Transaction as TransactionTrait}; use crate::tendermint::tx::TendermintTx; mod provided; pub(crate) use provided::*; pub use provided::ProvidedError; mod block; pub use block::*; mod blockchain; pub(crate) use blockchain::*; mod mempool; pub(crate) use mempool::*; pub mod tendermint; pub(crate) use crate::tendermint::*; #[cfg(any(test, feature = "tests"))] pub mod tests; /// Size limit for an individual transaction. pub const TRANSACTION_SIZE_LIMIT: usize = 3_000_000; /// Amount of transactions a single account may have in the mempool. pub const ACCOUNT_MEMPOOL_LIMIT: u32 = 50; /// Block size limit. // This targets a growth limit of roughly 45 GB a day, under load, in order to prevent a malicious // participant from flooding disks and causing out of space errors in order processes. pub const BLOCK_SIZE_LIMIT: usize = 3_001_000; pub(crate) const TENDERMINT_MESSAGE: u8 = 0; pub(crate) const TRANSACTION_MESSAGE: u8 = 1; #[allow(clippy::large_enum_variant)] #[derive(Clone, PartialEq, Eq, Debug)] pub enum Transaction { Tendermint(TendermintTx), Application(T), } impl ReadWrite for Transaction { fn read(reader: &mut R) -> io::Result { let mut kind = [0]; reader.read_exact(&mut kind)?; match kind[0] { 0 => { let tx = TendermintTx::read(reader)?; Ok(Transaction::Tendermint(tx)) } 1 => { let tx = T::read(reader)?; Ok(Transaction::Application(tx)) } _ => Err(io::Error::other("invalid transaction type")), } } fn write(&self, writer: &mut W) -> io::Result<()> { match self { Transaction::Tendermint(tx) => { writer.write_all(&[0])?; tx.write(writer) } Transaction::Application(tx) => { writer.write_all(&[1])?; tx.write(writer) } } } } impl Transaction { pub fn hash(&self) -> [u8; 32] { match self { Transaction::Tendermint(tx) => tx.hash(), Transaction::Application(tx) => tx.hash(), } } pub fn kind(&self) -> TransactionKind<'_> { match self { Transaction::Tendermint(tx) => tx.kind(), Transaction::Application(tx) => tx.kind(), } } } /// An item which can be read and written. pub trait ReadWrite: Sized { fn read(reader: &mut R) -> io::Result; fn write(&self, writer: &mut W) -> io::Result<()>; fn serialize(&self) -> Vec { // BlockHeader is 64 bytes and likely the smallest item in this system let mut buf = Vec::with_capacity(64); self.write(&mut buf).unwrap(); buf } } #[async_trait] pub trait P2p: 'static + Send + Sync + Clone + Debug { /// Broadcast a message to all other members of the Tributary with the specified genesis. /// /// The Tributary will re-broadcast consensus messages on a fixed interval to ensure they aren't /// prematurely dropped from the P2P layer. THe P2P layer SHOULD perform content-based /// deduplication to ensure a sane amount of load. async fn broadcast(&self, genesis: [u8; 32], msg: Vec); } #[async_trait] impl P2p for Arc

{ async fn broadcast(&self, genesis: [u8; 32], msg: Vec) { (*self).broadcast(genesis, msg).await } } #[derive(Clone)] pub struct Tributary { db: D, genesis: [u8; 32], network: TendermintNetwork, synced_block: Arc>>>, synced_block_result: Arc>, messages: Arc>>>, } impl Tributary { pub async fn new( db: D, genesis: [u8; 32], start_time: u64, key: Zeroizing<::F>, validators: Vec<(::G, u64)>, p2p: P, ) -> Option { log::info!("new Tributary with genesis {}", hex::encode(genesis)); let validators_vec = validators.iter().map(|validator| validator.0).collect::>(); let signer = Arc::new(Signer::new(genesis, key)); let validators = Arc::new(Validators::new(genesis, validators)?); let mut blockchain = Blockchain::new(db.clone(), genesis, &validators_vec); let block_number = BlockNumber(blockchain.block_number()); let start_time = if let Some(commit) = blockchain.commit(&blockchain.tip()) { Commit::::decode(&mut commit.as_ref()).unwrap().end_time } else { start_time }; let proposal = TendermintBlock( blockchain.build_block::>(&validators).serialize(), ); let blockchain = Arc::new(RwLock::new(blockchain)); let network = TendermintNetwork { genesis, signer, validators, blockchain, p2p }; let TendermintHandle { synced_block, synced_block_result, messages, machine } = TendermintMachine::new( db.clone(), network.clone(), genesis, block_number, start_time, proposal, ) .await; tokio::spawn(machine.run()); Some(Self { db, genesis, network, synced_block: Arc::new(RwLock::new(synced_block)), synced_block_result: Arc::new(RwLock::new(synced_block_result)), messages: Arc::new(RwLock::new(messages)), }) } pub fn block_time() -> u32 { TendermintNetwork::::block_time() } pub fn genesis(&self) -> [u8; 32] { self.genesis } pub async fn block_number(&self) -> u64 { self.network.blockchain.read().await.block_number() } pub async fn tip(&self) -> [u8; 32] { self.network.blockchain.read().await.tip() } pub fn reader(&self) -> TributaryReader { TributaryReader(self.db.clone(), self.genesis, PhantomData) } pub async fn provide_transaction(&self, tx: T) -> Result<(), ProvidedError> { self.network.blockchain.write().await.provide_transaction(tx) } pub async fn next_nonce( &self, signer: &::G, order: &[u8], ) -> Option { self.network.blockchain.read().await.next_nonce(signer, order) } // Returns Ok(true) if new, Ok(false) if an already present unsigned, or the error. // Safe to be &self since the only meaningful usage of self is self.network.blockchain which // successfully acquires its own write lock pub async fn add_transaction(&self, tx: T) -> Result { let tx = Transaction::Application(tx); let mut to_broadcast = vec![TRANSACTION_MESSAGE]; tx.write(&mut to_broadcast).unwrap(); let res = self.network.blockchain.write().await.add_transaction::>( true, tx, &self.network.signature_scheme(), ); if res == Ok(true) { self.network.p2p.broadcast(self.genesis, to_broadcast).await; } res } async fn sync_block_internal( &self, block: Block, commit: Vec, result: &mut UnboundedReceiver, ) -> bool { let (tip, block_number) = { let blockchain = self.network.blockchain.read().await; (blockchain.tip(), blockchain.block_number()) }; if block.header.parent != tip { log::debug!("told to sync a block whose parent wasn't our tip"); return false; } let block = TendermintBlock(block.serialize()); let mut commit_ref = commit.as_ref(); let Ok(commit) = Commit::>::decode(&mut commit_ref) else { log::error!("sent an invalidly serialized commit"); return false; }; // Storage DoS vector. We *could* truncate to solely the relevant portion, trying to save this, // yet then we'd have to test the truncation was performed correctly. if !commit_ref.is_empty() { log::error!("sent an commit with additional data after it"); return false; } if !self.network.verify_commit(block.id(), &commit) { log::error!("sent an invalid commit"); return false; } let number = BlockNumber(block_number + 1); self.synced_block.write().await.send(SyncedBlock { number, block, commit }).await.unwrap(); result.next().await.unwrap() } // Sync a block. // TODO: Since we have a static validator set, we should only need the tail commit? pub async fn sync_block(&self, block: Block, commit: Vec) -> bool { let mut result = self.synced_block_result.write().await; self.sync_block_internal(block, commit, &mut result).await } // Return true if the message should be rebroadcasted. pub async fn handle_message(&self, msg: &[u8]) -> bool { match msg.first() { Some(&TRANSACTION_MESSAGE) => { let Ok(tx) = Transaction::read::<&[u8]>(&mut &msg[1 ..]) else { log::error!("received invalid transaction message"); return false; }; // TODO: Sync mempools with fellow peers // Can we just rebroadcast transactions not included for at least two blocks? let res = self.network.blockchain.write().await.add_transaction::>( false, tx, &self.network.signature_scheme(), ); log::debug!("received transaction message. valid new transaction: {res:?}"); res == Ok(true) } Some(&TENDERMINT_MESSAGE) => { let Ok(msg) = SignedMessageFor::>::decode::<&[u8]>(&mut &msg[1 ..]) else { log::error!("received invalid tendermint message"); return false; }; self.messages.write().await.send(msg).await.unwrap(); false } _ => false, } } /// Get a Future which will resolve once the next block has been added. pub async fn next_block_notification( &self, ) -> impl Send + Sync + core::future::Future> { let (tx, rx) = tokio::sync::oneshot::channel(); self.network.blockchain.write().await.next_block_notifications.push_back(tx); rx } } #[derive(Clone)] pub struct TributaryReader(D, [u8; 32], PhantomData); impl TributaryReader { pub fn genesis(&self) -> [u8; 32] { self.1 } // Since these values are static once set, they can be safely read from the database without lock // acquisition pub fn block(&self, hash: &[u8; 32]) -> Option> { Blockchain::::block_from_db(&self.0, self.1, hash) } pub fn commit(&self, hash: &[u8; 32]) -> Option> { Blockchain::::commit_from_db(&self.0, self.1, hash) } pub fn parsed_commit(&self, hash: &[u8; 32]) -> Option> { self.commit(hash).map(|commit| Commit::::decode(&mut commit.as_ref()).unwrap()) } pub fn block_after(&self, hash: &[u8; 32]) -> Option<[u8; 32]> { Blockchain::::block_after(&self.0, self.1, hash) } pub fn time_of_block(&self, hash: &[u8; 32]) -> Option { self .commit(hash) .map(|commit| Commit::::decode(&mut commit.as_ref()).unwrap().end_time) } pub fn locally_provided_txs_in_block(&self, hash: &[u8; 32], order: &str) -> bool { Blockchain::::locally_provided_txs_in_block(&self.0, &self.1, hash, order) } // This isn't static, yet can be read with only minor discrepancy risks pub fn tip(&self) -> [u8; 32] { Blockchain::::tip_from_db(&self.0, self.1) } } ================================================ FILE: coordinator/tributary/src/mempool.rs ================================================ use std::collections::HashMap; use dalek_ff_group::Ristretto; use ciphersuite::Ciphersuite; use serai_db::{DbTxn, Db}; use tendermint::ext::{Network, Commit}; use crate::{ ACCOUNT_MEMPOOL_LIMIT, ReadWrite, transaction::{ Signed, TransactionKind, TransactionError, Transaction as TransactionTrait, verify_transaction, }, tendermint::tx::verify_tendermint_tx, Transaction, }; #[derive(Clone, PartialEq, Eq, Debug)] pub(crate) struct Mempool { db: D, genesis: [u8; 32], last_nonce_in_mempool: HashMap<(::G, Vec), u32>, txs: HashMap<[u8; 32], Transaction>, txs_per_signer: HashMap<::G, u32>, } impl Mempool { fn transaction_key(&self, hash: &[u8]) -> Vec { D::key(b"tributary_mempool", b"transaction", [self.genesis.as_ref(), hash].concat()) } fn current_mempool_key(&self) -> Vec { D::key(b"tributary_mempool", b"current", self.genesis) } // save given tx to the mempool db fn save_tx(&mut self, tx: Transaction) { let tx_hash = tx.hash(); let transaction_key = self.transaction_key(&tx_hash); let current_mempool_key = self.current_mempool_key(); let mut current_mempool = self.db.get(¤t_mempool_key).unwrap_or(vec![]); let mut txn = self.db.txn(); txn.put(transaction_key, tx.serialize()); current_mempool.extend(tx_hash); txn.put(current_mempool_key, current_mempool); txn.commit(); self.txs.insert(tx_hash, tx); } fn unsigned_already_exist( &self, hash: [u8; 32], unsigned_in_chain: impl Fn([u8; 32]) -> bool, ) -> bool { unsigned_in_chain(hash) || self.txs.contains_key(&hash) } pub(crate) fn new(db: D, genesis: [u8; 32]) -> Self { let mut res = Mempool { db, genesis, last_nonce_in_mempool: HashMap::new(), txs: HashMap::new(), txs_per_signer: HashMap::new(), }; let current_mempool = res.db.get(res.current_mempool_key()).unwrap_or(vec![]); for hash in current_mempool.chunks(32) { let hash: [u8; 32] = hash.try_into().unwrap(); let tx: Transaction = Transaction::read::<&[u8]>(&mut res.db.get(res.transaction_key(&hash)).unwrap().as_ref()) .unwrap(); debug_assert_eq!(tx.hash(), hash); match tx { Transaction::Tendermint(tx) => { res.txs.insert(hash, Transaction::Tendermint(tx)); } Transaction::Application(tx) => match tx.kind() { TransactionKind::Signed(order, Signed { signer, nonce, .. }) => { let amount = *res.txs_per_signer.get(signer).unwrap_or(&0) + 1; res.txs_per_signer.insert(*signer, amount); if let Some(prior_nonce) = res.last_nonce_in_mempool.insert((*signer, order.clone()), *nonce) { assert_eq!(prior_nonce, nonce - 1); } res.txs.insert(hash, Transaction::Application(tx)); } TransactionKind::Unsigned => { res.txs.insert(hash, Transaction::Application(tx)); } _ => panic!("mempool database had a provided transaction"), }, } } res } // Returns Ok(true) if new, Ok(false) if an already present unsigned, or the error. pub(crate) fn add< N: Network, F: FnOnce(::G, Vec) -> Option, >( &mut self, blockchain_next_nonce: F, internal: bool, tx: Transaction, schema: &N::SignatureScheme, unsigned_in_chain: impl Fn([u8; 32]) -> bool, commit: impl Fn(u64) -> Option>, ) -> Result { match &tx { Transaction::Tendermint(tendermint_tx) => { // All Tendermint transactions should be unsigned assert_eq!(TransactionKind::Unsigned, tendermint_tx.kind()); // check we have the tx in the pool/chain if self.unsigned_already_exist(tx.hash(), unsigned_in_chain) { return Ok(false); } // verify the tx verify_tendermint_tx::(tendermint_tx, schema, commit)?; } Transaction::Application(app_tx) => { match app_tx.kind() { TransactionKind::Signed(order, Signed { signer, .. }) => { // Get the nonce from the blockchain let Some(blockchain_next_nonce) = blockchain_next_nonce(*signer, order.clone()) else { // Not a participant Err(TransactionError::InvalidSigner)? }; let mut next_nonce = blockchain_next_nonce; if let Some(mempool_last_nonce) = self.last_nonce_in_mempool.get(&(*signer, order.clone())) { assert!(*mempool_last_nonce >= blockchain_next_nonce); next_nonce = *mempool_last_nonce + 1; } // If we have too many transactions from this sender, don't add this yet UNLESS we are // this sender let amount_in_pool = *self.txs_per_signer.get(signer).unwrap_or(&0) + 1; if !internal && (amount_in_pool > ACCOUNT_MEMPOOL_LIMIT) { Err(TransactionError::TooManyInMempool)?; } verify_transaction(app_tx, self.genesis, &mut |_, _| Some(next_nonce))?; self.last_nonce_in_mempool.insert((*signer, order.clone()), next_nonce); self.txs_per_signer.insert(*signer, amount_in_pool); } TransactionKind::Unsigned => { // check we have the tx in the pool/chain if self.unsigned_already_exist(tx.hash(), unsigned_in_chain) { return Ok(false); } app_tx.verify()?; } TransactionKind::Provided(_) => Err(TransactionError::ProvidedAddedToMempool)?, } } } // Save the TX to the pool self.save_tx(tx); Ok(true) } // Returns None if the mempool doesn't have a nonce tracked. pub(crate) fn next_nonce_in_mempool( &self, signer: &::G, order: Vec, ) -> Option { self.last_nonce_in_mempool.get(&(*signer, order)).copied().map(|nonce| nonce + 1) } /// Get transactions to include in a block. pub(crate) fn block(&mut self) -> Vec> { let mut unsigned = vec![]; let mut signed = vec![]; for hash in self.txs.keys().copied().collect::>() { let tx = &self.txs[&hash]; match tx.kind() { TransactionKind::Signed(_, Signed { .. }) => { signed.push(tx.clone()); } TransactionKind::Unsigned => { unsigned.push(tx.clone()); } _ => panic!("provided transaction entered mempool"), } } // Sort signed by nonce let nonce = |tx: &Transaction| { if let TransactionKind::Signed(_, Signed { nonce, .. }) = tx.kind() { *nonce } else { unreachable!() } }; signed.sort_by(|a, b| nonce(a).partial_cmp(&nonce(b)).unwrap()); // unsigned first, then signed. unsigned.append(&mut signed); unsigned } /// Remove a transaction from the mempool. pub(crate) fn remove(&mut self, tx: &[u8; 32]) { let transaction_key = self.transaction_key(tx); let current_mempool_key = self.current_mempool_key(); let current_mempool = self.db.get(¤t_mempool_key).unwrap_or(vec![]); let mut i = 0; while i < current_mempool.len() { if ¤t_mempool[i .. (i + 32)] == tx { break; } i += 32; } // This doesn't have to be atomic with any greater operation let mut txn = self.db.txn(); txn.del(transaction_key); if i != current_mempool.len() { txn .put(current_mempool_key, [¤t_mempool[.. i], ¤t_mempool[(i + 32) ..]].concat()); } txn.commit(); if let Some(tx) = self.txs.remove(tx) { if let TransactionKind::Signed(order, Signed { signer, nonce, .. }) = tx.kind() { let amount = *self.txs_per_signer.get(signer).unwrap() - 1; self.txs_per_signer.insert(*signer, amount); if self.last_nonce_in_mempool.get(&(*signer, order.clone())) == Some(nonce) { self.last_nonce_in_mempool.remove(&(*signer, order)); } } } } #[cfg(test)] pub(crate) fn txs(&self) -> &HashMap<[u8; 32], Transaction> { &self.txs } } ================================================ FILE: coordinator/tributary/src/merkle.rs ================================================ use blake2::{Digest, Blake2s256}; pub(crate) fn merkle(hash_args: &[[u8; 32]]) -> [u8; 32] { let mut hashes = Vec::with_capacity(hash_args.len()); for hash in hash_args { hashes.push(Blake2s256::digest([b"leaf_hash".as_ref(), hash].concat())); } let zero = [0; 32]; let mut interim; while hashes.len() > 1 { interim = Vec::with_capacity((hashes.len() + 1) / 2); let mut i = 0; while i < hashes.len() { interim.push(Blake2s256::digest( [ b"branch_hash".as_ref(), hashes[i].as_ref(), hashes.get(i + 1).map_or(zero.as_ref(), AsRef::as_ref), ] .concat(), )); i += 2; } hashes = interim; } hashes.first().copied().map_or(zero, Into::into) } ================================================ FILE: coordinator/tributary/src/provided.rs ================================================ use std::collections::{VecDeque, HashMap}; use thiserror::Error; use serai_db::{Get, DbTxn, Db}; use crate::transaction::{TransactionKind, TransactionError, Transaction, verify_transaction}; #[derive(Clone, PartialEq, Eq, Debug, Error)] pub enum ProvidedError { /// The provided transaction's kind wasn't Provided #[error("transaction wasn't a provided transaction")] NotProvided, /// The provided transaction was invalid #[error("provided transaction was invalid")] InvalidProvided(TransactionError), /// Transaction was already provided #[error("transaction was already provided")] AlreadyProvided, /// Local transaction mismatches the on-chain provided #[error("local provides mismatches on-chain provided")] LocalMismatchesOnChain, } #[derive(Clone, PartialEq, Eq, Debug)] pub struct ProvidedTransactions { db: D, genesis: [u8; 32], pub(crate) transactions: HashMap<&'static str, VecDeque>, } impl ProvidedTransactions { fn transaction_key(&self, hash: &[u8]) -> Vec { D::key(b"tributary_provided", b"transaction", [self.genesis.as_ref(), hash].concat()) } fn current_provided_key(&self) -> Vec { D::key(b"tributary_provided", b"current", self.genesis) } pub(crate) fn locally_provided_quantity_key(genesis: &[u8; 32], order: &str) -> Vec { D::key(b"tributary_provided", b"local_quantity", [genesis, order.as_bytes()].concat()) } pub(crate) fn on_chain_provided_quantity_key(genesis: &[u8; 32], order: &str) -> Vec { D::key(b"tributary_provided", b"on_chain_quantity", [genesis, order.as_bytes()].concat()) } pub(crate) fn block_provided_quantity_key( genesis: &[u8; 32], block: &[u8; 32], order: &str, ) -> Vec { D::key(b"tributary_provided", b"block_quantity", [genesis, block, order.as_bytes()].concat()) } pub(crate) fn on_chain_provided_key(genesis: &[u8; 32], order: &str, id: u32) -> Vec { D::key( b"tributary_provided", b"on_chain_tx", [genesis, order.as_bytes(), &id.to_le_bytes()].concat(), ) } pub(crate) fn new(db: D, genesis: [u8; 32]) -> Self { let mut res = ProvidedTransactions { db, genesis, transactions: HashMap::new() }; let currently_provided = res.db.get(res.current_provided_key()).unwrap_or(vec![]); let mut i = 0; while i < currently_provided.len() { let tx = T::read::<&[u8]>( &mut res.db.get(res.transaction_key(¤tly_provided[i .. (i + 32)])).unwrap().as_ref(), ) .unwrap(); let TransactionKind::Provided(order) = tx.kind() else { panic!("provided transaction saved to disk wasn't provided"); }; if !res.transactions.contains_key(order) { res.transactions.insert(order, VecDeque::new()); } res.transactions.get_mut(order).unwrap().push_back(tx); i += 32; } res } /// Provide a transaction for inclusion in a block. pub(crate) fn provide(&mut self, tx: T) -> Result<(), ProvidedError> { let TransactionKind::Provided(order) = tx.kind() else { Err(ProvidedError::NotProvided)? }; match verify_transaction(&tx, self.genesis, &mut |_, _| None) { Ok(()) => {} Err(e) => Err(ProvidedError::InvalidProvided(e))?, } let tx_hash = tx.hash(); // Check it wasn't already provided let provided_key = self.transaction_key(&tx_hash); if self.db.get(&provided_key).is_some() { Err(ProvidedError::AlreadyProvided)?; } // get local and on-chain tx numbers let local_key = Self::locally_provided_quantity_key(&self.genesis, order); let mut local_quantity = self.db.get(&local_key).map_or(0, |bytes| u32::from_le_bytes(bytes.try_into().unwrap())); let on_chain_key = Self::on_chain_provided_quantity_key(&self.genesis, order); let on_chain_quantity = self.db.get(on_chain_key).map_or(0, |bytes| u32::from_le_bytes(bytes.try_into().unwrap())); let current_provided_key = self.current_provided_key(); // This would have a race-condition with multiple calls to provide, though this takes &mut self // peventing multiple calls at once let mut txn = self.db.txn(); txn.put(provided_key, tx.serialize()); let this_provided_id = local_quantity; local_quantity += 1; txn.put(local_key, local_quantity.to_le_bytes()); if this_provided_id < on_chain_quantity { // Verify against the on-chain version if tx_hash.as_ref() != txn.get(Self::on_chain_provided_key(&self.genesis, order, this_provided_id)).unwrap() { Err(ProvidedError::LocalMismatchesOnChain)?; } txn.commit(); } else { let mut currently_provided = txn.get(¤t_provided_key).unwrap_or(vec![]); currently_provided.extend(tx_hash); txn.put(current_provided_key, currently_provided); txn.commit(); if !self.transactions.contains_key(order) { self.transactions.insert(order, VecDeque::new()); } self.transactions.get_mut(order).unwrap().push_back(tx); } Ok(()) } /// Complete a provided transaction, no longer proposing it nor voting for its validity. pub(crate) fn complete( &mut self, txn: &mut D::Transaction<'_>, order: &'static str, block: [u8; 32], tx: [u8; 32], ) { if let Some(next_tx) = self.transactions.get_mut(order).and_then(VecDeque::pop_front) { assert_eq!(next_tx.hash(), tx); let current_provided_key = self.current_provided_key(); let mut currently_provided = txn.get(¤t_provided_key).unwrap(); // Find this TX's hash let mut i = 0; loop { if currently_provided[i .. (i + 32)] == tx { assert_eq!(¤tly_provided.drain(i .. (i + 32)).collect::>(), &tx); break; } i += 32; if i >= currently_provided.len() { panic!("couldn't find completed TX in currently provided"); } } txn.put(current_provided_key, currently_provided); } // bump the on-chain tx number. let on_chain_key = Self::on_chain_provided_quantity_key(&self.genesis, order); let block_order_key = Self::block_provided_quantity_key(&self.genesis, &block, order); let mut on_chain_quantity = self.db.get(&on_chain_key).map_or(0, |bytes| u32::from_le_bytes(bytes.try_into().unwrap())); let this_provided_id = on_chain_quantity; txn.put(Self::on_chain_provided_key(&self.genesis, order, this_provided_id), tx); on_chain_quantity += 1; txn.put(on_chain_key, on_chain_quantity.to_le_bytes()); txn.put(block_order_key, on_chain_quantity.to_le_bytes()); } } ================================================ FILE: coordinator/tributary/src/tendermint/mod.rs ================================================ use core::ops::Deref; use std::{sync::Arc, collections::HashMap}; use async_trait::async_trait; use subtle::ConstantTimeEq; use zeroize::{Zeroize, Zeroizing}; use rand::{SeedableRng, seq::SliceRandom}; use rand_chacha::ChaCha12Rng; use transcript::{Transcript, RecommendedTranscript}; use dalek_ff_group::Ristretto; use ciphersuite::{ group::{ GroupEncoding, ff::{Field, PrimeField}, }, Ciphersuite, }; use schnorr::{ SchnorrSignature, aggregate::{SchnorrAggregator, SchnorrAggregate}, }; use serai_db::Db; use scale::{Encode, Decode}; use tendermint::{ SignedMessageFor, ext::{ BlockNumber, RoundNumber, Signer as SignerTrait, SignatureScheme, Weights, Block as BlockTrait, BlockError as TendermintBlockError, Commit, Network, }, SlashEvent, }; use tokio::sync::RwLock; use crate::{ TENDERMINT_MESSAGE, TRANSACTION_MESSAGE, ReadWrite, transaction::Transaction as TransactionTrait, Transaction, BlockHeader, Block, BlockError, Blockchain, P2p, }; pub mod tx; use tx::TendermintTx; const DST: &[u8] = b"Tributary Tendermint Commit Aggregator"; fn challenge( genesis: [u8; 32], key: [u8; 32], nonce: &[u8], msg: &[u8], ) -> ::F { let mut transcript = RecommendedTranscript::new(b"Tributary Chain Tendermint Message"); transcript.append_message(b"genesis", genesis); transcript.append_message(b"key", key); transcript.append_message(b"nonce", nonce); transcript.append_message(b"message", msg); ::F::from_bytes_mod_order_wide(&transcript.challenge(b"schnorr").into()) } #[derive(Clone, PartialEq, Eq, Debug)] pub struct Signer { genesis: [u8; 32], key: Zeroizing<::F>, } impl Signer { pub(crate) fn new(genesis: [u8; 32], key: Zeroizing<::F>) -> Signer { Signer { genesis, key } } } #[async_trait] impl SignerTrait for Signer { type ValidatorId = [u8; 32]; type Signature = [u8; 64]; /// Returns the validator's current ID. Returns None if they aren't a current validator. async fn validator_id(&self) -> Option { Some((Ristretto::generator() * self.key.deref()).to_bytes()) } /// Sign a signature with the current validator's private key. async fn sign(&self, msg: &[u8]) -> Self::Signature { let mut nonce = Zeroizing::new(RecommendedTranscript::new(b"Tributary Chain Tendermint Nonce")); nonce.append_message(b"genesis", self.genesis); nonce.append_message(b"key", Zeroizing::new(self.key.deref().to_repr()).as_ref()); nonce.append_message(b"message", msg); let mut nonce = nonce.challenge(b"nonce"); let mut nonce_arr = [0; 64]; nonce_arr.copy_from_slice(nonce.as_ref()); let nonce_ref: &mut [u8] = nonce.as_mut(); nonce_ref.zeroize(); let nonce_ref: &[u8] = nonce.as_ref(); assert_eq!(nonce_ref, [0; 64].as_ref()); let nonce = Zeroizing::new(::F::from_bytes_mod_order_wide(&nonce_arr)); nonce_arr.zeroize(); assert!(!bool::from(nonce.ct_eq(&::F::ZERO))); let challenge = challenge( self.genesis, (Ristretto::generator() * self.key.deref()).to_bytes(), (Ristretto::generator() * nonce.deref()).to_bytes().as_ref(), msg, ); let sig = SchnorrSignature::::sign(&self.key, nonce, challenge).serialize(); let mut res = [0; 64]; res.copy_from_slice(&sig); res } } #[derive(Clone, PartialEq, Eq, Debug)] pub struct Validators { genesis: [u8; 32], total_weight: u64, weights: HashMap<[u8; 32], u64>, robin: Vec<[u8; 32]>, } impl Validators { pub(crate) fn new( genesis: [u8; 32], validators: Vec<(::G, u64)>, ) -> Option { let mut total_weight = 0; let mut weights = HashMap::new(); let mut transcript = RecommendedTranscript::new(b"Round Robin Randomization"); let mut robin = vec![]; for (validator, weight) in validators { let validator = validator.to_bytes(); if weight == 0 { return None; } total_weight += weight; weights.insert(validator, weight); transcript.append_message(b"validator", validator); transcript.append_message(b"weight", weight.to_le_bytes()); robin.extend(vec![validator; usize::try_from(weight).unwrap()]); } robin.shuffle(&mut ChaCha12Rng::from_seed(transcript.rng_seed(b"robin"))); Some(Validators { genesis, total_weight, weights, robin }) } } impl SignatureScheme for Validators { type ValidatorId = [u8; 32]; type Signature = [u8; 64]; type AggregateSignature = Vec; type Signer = Arc; #[must_use] fn verify(&self, validator: Self::ValidatorId, msg: &[u8], sig: &Self::Signature) -> bool { if !self.weights.contains_key(&validator) { return false; } let Ok(validator_point) = Ristretto::read_G::<&[u8]>(&mut validator.as_ref()) else { return false; }; let Ok(actual_sig) = SchnorrSignature::::read::<&[u8]>(&mut sig.as_ref()) else { return false; }; actual_sig.verify(validator_point, challenge(self.genesis, validator, &sig[.. 32], msg)) } fn aggregate( &self, validators: &[Self::ValidatorId], msg: &[u8], sigs: &[Self::Signature], ) -> Self::AggregateSignature { assert_eq!(validators.len(), sigs.len()); let mut aggregator = SchnorrAggregator::::new(DST); for (key, sig) in validators.iter().zip(sigs) { let actual_sig = SchnorrSignature::::read::<&[u8]>(&mut sig.as_ref()).unwrap(); let challenge = challenge(self.genesis, *key, actual_sig.R.to_bytes().as_ref(), msg); aggregator.aggregate(challenge, actual_sig); } let aggregate = aggregator.complete().unwrap(); aggregate.serialize() } #[must_use] fn verify_aggregate( &self, signers: &[Self::ValidatorId], msg: &[u8], sig: &Self::AggregateSignature, ) -> bool { let Ok(aggregate) = SchnorrAggregate::::read::<&[u8]>(&mut sig.as_slice()) else { return false; }; if signers.len() != aggregate.Rs().len() { return false; } let mut challenges = vec![]; for (key, nonce) in signers.iter().zip(aggregate.Rs()) { challenges.push(challenge(self.genesis, *key, nonce.to_bytes().as_ref(), msg)); } aggregate.verify( DST, signers .iter() .zip(challenges) .map(|(s, c)| (::read_G(&mut s.as_slice()).unwrap(), c)) .collect::>() .as_slice(), ) } } impl Weights for Validators { type ValidatorId = [u8; 32]; fn total_weight(&self) -> u64 { self.total_weight } fn weight(&self, validator: Self::ValidatorId) -> u64 { self.weights[&validator] } fn proposer(&self, block: BlockNumber, round: RoundNumber) -> Self::ValidatorId { let block = usize::try_from(block.0).unwrap(); let round = usize::try_from(round.0).unwrap(); // If multiple rounds are used, a naive block + round would cause the same index to be chosen // in quick succession. // Accordingly, if we use additional rounds, jump halfway around. // While this is still game-able, it's not explicitly reusing indexes immediately after each // other. self.robin [(block + (if round == 0 { 0 } else { round + (self.robin.len() / 2) })) % self.robin.len()] } } #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] pub struct TendermintBlock(pub Vec); impl BlockTrait for TendermintBlock { type Id = [u8; 32]; fn id(&self) -> Self::Id { BlockHeader::read::<&[u8]>(&mut self.0.as_ref()).unwrap().hash() } } #[derive(Clone, Debug)] pub struct TendermintNetwork { pub(crate) genesis: [u8; 32], pub(crate) signer: Arc, pub(crate) validators: Arc, pub(crate) blockchain: Arc>>, pub(crate) p2p: P, } pub const BLOCK_PROCESSING_TIME: u32 = 999; pub const LATENCY_TIME: u32 = 1667; pub const TARGET_BLOCK_TIME: u32 = BLOCK_PROCESSING_TIME + (3 * LATENCY_TIME); #[async_trait] impl Network for TendermintNetwork { type Db = D; type ValidatorId = [u8; 32]; type SignatureScheme = Arc; type Weights = Arc; type Block = TendermintBlock; // These are in milliseconds and create a six-second block time. // The block time is the latency on message delivery (where a message is some piece of data // embedded in a transaction) times three plus the block processing time, hence why it should be // kept low. const BLOCK_PROCESSING_TIME: u32 = BLOCK_PROCESSING_TIME; const LATENCY_TIME: u32 = LATENCY_TIME; fn signer(&self) -> Arc { self.signer.clone() } fn signature_scheme(&self) -> Arc { self.validators.clone() } fn weights(&self) -> Arc { self.validators.clone() } async fn broadcast(&mut self, msg: SignedMessageFor) { let mut to_broadcast = vec![TENDERMINT_MESSAGE]; to_broadcast.extend(msg.encode()); self.p2p.broadcast(self.genesis, to_broadcast).await } async fn slash(&mut self, validator: Self::ValidatorId, slash_event: SlashEvent) { log::error!( "validator {} triggered a slash event on tributary {} (with evidence: {})", hex::encode(validator), hex::encode(self.genesis), matches!(slash_event, SlashEvent::WithEvidence(_)), ); let signer = self.signer(); let Some(tx) = (match slash_event { SlashEvent::WithEvidence(evidence) => { // create an unsigned evidence tx Some(TendermintTx::SlashEvidence(evidence)) } SlashEvent::Id(_reason, _block, _round) => { // TODO: Increase locally observed slash points None } }) else { return; }; // add tx to blockchain and broadcast to peers let mut to_broadcast = vec![TRANSACTION_MESSAGE]; tx.write(&mut to_broadcast).unwrap(); if self.blockchain.write().await.add_transaction::( true, Transaction::Tendermint(tx), &self.signature_scheme(), ) == Ok(true) { self.p2p.broadcast(signer.genesis, to_broadcast).await; } } async fn validate(&self, block: &Self::Block) -> Result<(), TendermintBlockError> { let block = Block::read::<&[u8]>(&mut block.0.as_ref()).map_err(|_| TendermintBlockError::Fatal)?; self .blockchain .read() .await .verify_block::(&block, &self.signature_scheme(), false) .map_err(|e| match e { BlockError::NonLocalProvided(_) => TendermintBlockError::Temporal, _ => { log::warn!("Tributary Tendermint validate returning BlockError::Fatal due to {e:?}"); TendermintBlockError::Fatal } }) } async fn add_block( &mut self, serialized_block: Self::Block, commit: Commit, ) -> Option { let invalid_block = || { // There's a fatal flaw in the code, it's behind a hard fork, or the validators turned // malicious // All justify a halt to then achieve social consensus from // TODO: Under multiple validator sets, a small validator set turning malicious knocks // off the entire network. That's an unacceptable DoS. panic!("validators added invalid block to tributary {}", hex::encode(self.genesis)); }; // Tendermint should only produce valid commits assert!(self.verify_commit(serialized_block.id(), &commit)); let Ok(block) = Block::read::<&[u8]>(&mut serialized_block.0.as_ref()) else { return invalid_block(); }; let encoded_commit = commit.encode(); loop { let block_res = self.blockchain.write().await.add_block::( &block, encoded_commit.clone(), &self.signature_scheme(), ); match block_res { Ok(()) => { // If we successfully added this block, break break; } Err(BlockError::NonLocalProvided(hash)) => { log::error!( "missing provided transaction {} which other validators on tributary {} had", hex::encode(hash), hex::encode(self.genesis) ); tokio::time::sleep(core::time::Duration::from_secs(5)).await; } _ => return invalid_block(), } } Some(TendermintBlock( self.blockchain.write().await.build_block::(&self.signature_scheme()).serialize(), )) } } ================================================ FILE: coordinator/tributary/src/tendermint/tx.rs ================================================ use std::io; use scale::{Encode, Decode, IoReader}; use blake2::{Digest, Blake2s256}; use dalek_ff_group::Ristretto; use ciphersuite::Ciphersuite; use crate::{ transaction::{Transaction, TransactionKind, TransactionError}, ReadWrite, }; use tendermint::{ verify_tendermint_evidence, ext::{Network, Commit}, }; pub use tendermint::{Evidence, decode_signed_message}; #[allow(clippy::large_enum_variant)] #[derive(Clone, PartialEq, Eq, Debug)] pub enum TendermintTx { SlashEvidence(Evidence), } impl ReadWrite for TendermintTx { fn read(reader: &mut R) -> io::Result { Evidence::decode(&mut IoReader(reader)) .map(TendermintTx::SlashEvidence) .map_err(|_| io::Error::new(io::ErrorKind::InvalidData, "invalid evidence format")) } fn write(&self, writer: &mut W) -> io::Result<()> { match self { TendermintTx::SlashEvidence(ev) => writer.write_all(&ev.encode()), } } } impl Transaction for TendermintTx { fn kind(&self) -> TransactionKind<'_> { // There's an assert elsewhere in the codebase expecting this behavior // If we do want to add Provided/Signed TendermintTxs, review the implications carefully TransactionKind::Unsigned } fn hash(&self) -> [u8; 32] { Blake2s256::digest(self.serialize()).into() } fn sig_hash(&self, _genesis: [u8; 32]) -> ::F { match self { TendermintTx::SlashEvidence(_) => panic!("sig_hash called on slash evidence transaction"), } } fn verify(&self) -> Result<(), TransactionError> { Ok(()) } } pub(crate) fn verify_tendermint_tx( tx: &TendermintTx, schema: &N::SignatureScheme, commit: impl Fn(u64) -> Option>, ) -> Result<(), TransactionError> { tx.verify()?; match tx { TendermintTx::SlashEvidence(ev) => verify_tendermint_evidence::(ev, schema, commit) .map_err(|_| TransactionError::InvalidContent)?, } Ok(()) } ================================================ FILE: coordinator/tributary/src/tests/block.rs ================================================ use std::{sync::Arc, io, collections::HashMap, fmt::Debug}; use blake2::{Digest, Blake2s256}; use dalek_ff_group::Ristretto; use ciphersuite::{ group::{ff::Field, Group}, Ciphersuite, }; use schnorr::SchnorrSignature; use serai_db::MemDb; use tendermint::ext::Commit; use crate::{ ReadWrite, BlockError, Block, Transaction, tests::p2p::DummyP2p, transaction::{TransactionError, Signed, TransactionKind, Transaction as TransactionTrait}, tendermint::{TendermintNetwork, Validators}, }; type N = TendermintNetwork; // A transaction solely defined by its nonce and a distinguisher (to allow creating distinct TXs // sharing a nonce). #[derive(Clone, PartialEq, Eq, Debug)] struct NonceTransaction(u32, u8, Signed); impl NonceTransaction { fn new(nonce: u32, distinguisher: u8) -> Self { NonceTransaction( nonce, distinguisher, Signed { signer: ::G::identity(), nonce, signature: SchnorrSignature:: { R: ::G::identity(), s: ::F::ZERO, }, }, ) } } impl ReadWrite for NonceTransaction { fn read(reader: &mut R) -> io::Result { let mut nonce = [0; 4]; reader.read_exact(&mut nonce)?; let nonce = u32::from_le_bytes(nonce); let mut distinguisher = [0]; reader.read_exact(&mut distinguisher)?; Ok(NonceTransaction::new(nonce, distinguisher[0])) } fn write(&self, writer: &mut W) -> io::Result<()> { writer.write_all(&self.0.to_le_bytes())?; writer.write_all(&[self.1]) } } impl TransactionTrait for NonceTransaction { fn kind(&self) -> TransactionKind<'_> { TransactionKind::Signed(vec![], &self.2) } fn hash(&self) -> [u8; 32] { Blake2s256::digest([self.0.to_le_bytes().as_ref(), &[self.1]].concat()).into() } fn verify(&self) -> Result<(), TransactionError> { Ok(()) } } #[test] fn empty_block() { const GENESIS: [u8; 32] = [0xff; 32]; const LAST: [u8; 32] = [0x01; 32]; let validators = Arc::new(Validators::new(GENESIS, vec![]).unwrap()); let commit = |_: u64| -> Option>> { Some(Commit::> { end_time: 0, validators: vec![], signature: vec![] }) }; let provided_or_unsigned_in_chain = |_: [u8; 32]| false; Block::::new(LAST, vec![], vec![]) .verify::( GENESIS, LAST, HashMap::new(), &mut |_, _| None, &validators, commit, provided_or_unsigned_in_chain, false, ) .unwrap(); } #[test] fn duplicate_nonces() { const GENESIS: [u8; 32] = [0xff; 32]; const LAST: [u8; 32] = [0x01; 32]; let validators = Arc::new(Validators::new(GENESIS, vec![]).unwrap()); // Run once without duplicating a nonce, and once with, so that's confirmed to be the faulty // component for i in [1, 0] { let mut mempool = vec![]; let mut insert = |tx: NonceTransaction| mempool.push(Transaction::Application(tx)); insert(NonceTransaction::new(0, 0)); insert(NonceTransaction::new(i, 1)); let commit = |_: u64| -> Option>> { Some(Commit::> { end_time: 0, validators: vec![], signature: vec![] }) }; let provided_or_unsigned_in_chain = |_: [u8; 32]| false; let mut last_nonce = 0; let res = Block::new(LAST, vec![], mempool).verify::( GENESIS, LAST, HashMap::new(), &mut |_, _| { let res = last_nonce; last_nonce += 1; Some(res) }, &validators, commit, provided_or_unsigned_in_chain, false, ); if i == 1 { res.unwrap(); } else { assert_eq!(res, Err(BlockError::TransactionError(TransactionError::InvalidNonce))); } } } ================================================ FILE: coordinator/tributary/src/tests/blockchain.rs ================================================ use core::ops::Deref; use std::{ collections::{VecDeque, HashMap}, sync::Arc, io, }; use zeroize::Zeroizing; use rand::rngs::OsRng; use blake2::{Digest, Blake2s256}; use dalek_ff_group::Ristretto; use ciphersuite::{group::ff::Field, Ciphersuite}; use serai_db::{DbTxn, Db, MemDb}; use crate::{ ReadWrite, TransactionKind, transaction::Transaction as TransactionTrait, TransactionError, Transaction, ProvidedError, ProvidedTransactions, merkle, BlockError, Block, Blockchain, tendermint::{TendermintNetwork, Validators, Signer, TendermintBlock}, tests::{ ProvidedTransaction, SignedTransaction, random_provided_transaction, p2p::DummyP2p, new_genesis, random_evidence_tx, }, }; type N = TendermintNetwork; fn new_blockchain( genesis: [u8; 32], participants: &[::G], ) -> (MemDb, Blockchain) { let db = MemDb::new(); let blockchain = Blockchain::new(db.clone(), genesis, participants); assert_eq!(blockchain.tip(), genesis); assert_eq!(blockchain.block_number(), 0); (db, blockchain) } #[test] fn block_addition() { let genesis = new_genesis(); let validators = Arc::new(Validators::new(genesis, vec![]).unwrap()); let (db, mut blockchain) = new_blockchain::(genesis, &[]); let block = blockchain.build_block::(&validators); assert_eq!(block.header.parent, genesis); assert_eq!(block.header.transactions, [0; 32]); blockchain.verify_block::(&block, &validators, false).unwrap(); assert!(blockchain.add_block::(&block, vec![], &validators).is_ok()); assert_eq!(blockchain.tip(), block.hash()); assert_eq!(blockchain.block_number(), 1); assert_eq!( Blockchain::::block_after(&db, genesis, &block.parent()).unwrap(), block.hash() ); } #[test] fn invalid_block() { let genesis = new_genesis(); let validators = Arc::new(Validators::new(genesis, vec![]).unwrap()); let (_, mut blockchain) = new_blockchain::(genesis, &[]); let block = blockchain.build_block::(&validators); // Mutate parent { #[allow(clippy::redundant_clone)] // False positive let mut block = block.clone(); block.header.parent = Blake2s256::digest(block.header.parent).into(); assert!(blockchain.verify_block::(&block, &validators, false).is_err()); } // Mutate transactions merkle { let mut block = block; block.header.transactions = Blake2s256::digest(block.header.transactions).into(); assert!(blockchain.verify_block::(&block, &validators, false).is_err()); } let key = Zeroizing::new(::F::random(&mut OsRng)); let tx = crate::tests::signed_transaction(&mut OsRng, genesis, &key, 0); // Not a participant { // Manually create the block to bypass build_block's checks let block = Block::new(blockchain.tip(), vec![], vec![Transaction::Application(tx.clone())]); assert_eq!(block.header.transactions, merkle(&[tx.hash()])); assert!(blockchain.verify_block::(&block, &validators, false).is_err()); } // Run the rest of the tests with them as a participant let (_, blockchain) = new_blockchain(genesis, &[tx.1.signer]); // Re-run the not a participant block to make sure it now works { let block = Block::new(blockchain.tip(), vec![], vec![Transaction::Application(tx.clone())]); assert_eq!(block.header.transactions, merkle(&[tx.hash()])); blockchain.verify_block::(&block, &validators, false).unwrap(); } { // Add a valid transaction let (_, mut blockchain) = new_blockchain(genesis, &[tx.1.signer]); blockchain .add_transaction::(true, Transaction::Application(tx.clone()), &validators) .unwrap(); let mut block = blockchain.build_block::(&validators); assert_eq!(block.header.transactions, merkle(&[tx.hash()])); blockchain.verify_block::(&block, &validators, false).unwrap(); // And verify mutating the transactions merkle now causes a failure block.header.transactions = merkle(&[]); assert!(blockchain.verify_block::(&block, &validators, false).is_err()); } { // Invalid nonce let tx = crate::tests::signed_transaction(&mut OsRng, genesis, &key, 5); // Manually create the block to bypass build_block's checks let block = Block::new(blockchain.tip(), vec![], vec![Transaction::Application(tx)]); assert!(blockchain.verify_block::(&block, &validators, false).is_err()); } { // Invalid signature let (_, mut blockchain) = new_blockchain(genesis, &[tx.1.signer]); blockchain.add_transaction::(true, Transaction::Application(tx), &validators).unwrap(); let mut block = blockchain.build_block::(&validators); blockchain.verify_block::(&block, &validators, false).unwrap(); match &mut block.transactions[0] { Transaction::Application(tx) => { tx.1.signature.s += ::F::ONE; } _ => panic!("non-signed tx found"), } assert!(blockchain.verify_block::(&block, &validators, false).is_err()); // Make sure this isn't because the merkle changed due to the transaction hash including the // signature (which it explicitly isn't allowed to anyways) assert_eq!(block.header.transactions, merkle(&[block.transactions[0].hash()])); } } #[test] fn signed_transaction() { let genesis = new_genesis(); let validators = Arc::new(Validators::new(genesis, vec![]).unwrap()); let key = Zeroizing::new(::F::random(&mut OsRng)); let tx = crate::tests::signed_transaction(&mut OsRng, genesis, &key, 0); let signer = tx.1.signer; let (_, mut blockchain) = new_blockchain::(genesis, &[signer]); assert_eq!(blockchain.next_nonce(&signer, &[]), Some(0)); let test = |blockchain: &mut Blockchain, mempool: Vec>| { let tip = blockchain.tip(); for tx in mempool.clone() { let Transaction::Application(tx) = tx else { panic!("tendermint tx found"); }; let next_nonce = blockchain.next_nonce(&signer, &[]).unwrap(); blockchain.add_transaction::(true, Transaction::Application(tx), &validators).unwrap(); assert_eq!(next_nonce + 1, blockchain.next_nonce(&signer, &[]).unwrap()); } let block = blockchain.build_block::(&validators); assert_eq!(block, Block::new(blockchain.tip(), vec![], mempool.clone())); assert_eq!(blockchain.tip(), tip); assert_eq!(block.header.parent, tip); // Make sure all transactions were included assert_eq!(block.transactions, mempool); // Make sure the merkle was correct assert_eq!( block.header.transactions, merkle(&mempool.iter().map(Transaction::hash).collect::>()) ); // Verify and add the block blockchain.verify_block::(&block, &validators, false).unwrap(); assert!(blockchain.add_block::(&block, vec![], &validators).is_ok()); assert_eq!(blockchain.tip(), block.hash()); }; // Test with a single nonce test(&mut blockchain, vec![Transaction::Application(tx)]); assert_eq!(blockchain.next_nonce(&signer, &[]), Some(1)); // Test with a flood of nonces let mut mempool = vec![]; for nonce in 1 .. 64 { mempool.push(Transaction::Application(crate::tests::signed_transaction( &mut OsRng, genesis, &key, nonce, ))); } test(&mut blockchain, mempool); assert_eq!(blockchain.next_nonce(&signer, &[]), Some(64)); } #[test] fn provided_transaction() { let genesis = new_genesis(); let validators = Arc::new(Validators::new(genesis, vec![]).unwrap()); let (db, mut blockchain) = new_blockchain::(genesis, &[]); let tx = random_provided_transaction(&mut OsRng, "order1"); // This should be providable let mut temp_db = MemDb::new(); let mut txs = ProvidedTransactions::<_, ProvidedTransaction>::new(temp_db.clone(), genesis); txs.provide(tx.clone()).unwrap(); assert_eq!(txs.provide(tx.clone()), Err(ProvidedError::AlreadyProvided)); assert_eq!( ProvidedTransactions::<_, ProvidedTransaction>::new(temp_db.clone(), genesis).transactions, HashMap::from([("order1", VecDeque::from([tx.clone()]))]), ); let mut txn = temp_db.txn(); txs.complete(&mut txn, "order1", [0u8; 32], tx.hash()); txn.commit(); assert!(ProvidedTransactions::<_, ProvidedTransaction>::new(db.clone(), genesis) .transactions .is_empty()); // case we have the block's provided txs in our local as well { // Non-provided transactions should fail verification because we don't have them locally. let block = Block::new(blockchain.tip(), vec![tx.clone()], vec![]); assert!(blockchain.verify_block::(&block, &validators, false).is_err()); // Provided transactions should pass verification blockchain.provide_transaction(tx.clone()).unwrap(); blockchain.verify_block::(&block, &validators, false).unwrap(); // add_block should work for verified blocks assert!(blockchain.add_block::(&block, vec![], &validators).is_ok()); let block = Block::new(blockchain.tip(), vec![tx.clone()], vec![]); // The provided transaction should no longer considered provided but added to chain, // causing this error assert_eq!( blockchain.verify_block::(&block, &validators, false), Err(BlockError::ProvidedAlreadyIncluded) ); } // case we don't have the block's provided txs in our local { let tx1 = random_provided_transaction(&mut OsRng, "order1"); let tx2 = random_provided_transaction(&mut OsRng, "order1"); let tx3 = random_provided_transaction(&mut OsRng, "order2"); let tx4 = random_provided_transaction(&mut OsRng, "order2"); // add_block DOES NOT fail for unverified provided transactions if told to add them, // since now we can have them later. let block1 = Block::new(blockchain.tip(), vec![tx1.clone(), tx3.clone()], vec![]); assert!(blockchain.add_block::(&block1, vec![], &validators).is_ok()); // in fact, we can have many blocks that have provided txs that we don't have locally. let block2 = Block::new(blockchain.tip(), vec![tx2.clone(), tx4.clone()], vec![]); assert!(blockchain.add_block::(&block2, vec![], &validators).is_ok()); // make sure we won't return ok for the block before we actually got the txs let TransactionKind::Provided(order) = tx1.kind() else { panic!("tx wasn't provided") }; assert!(!Blockchain::::locally_provided_txs_in_block( &db, &genesis, &block1.hash(), order )); // provide the first tx blockchain.provide_transaction(tx1).unwrap(); // it should be ok for this order now, since the second tx has different order. assert!(Blockchain::::locally_provided_txs_in_block( &db, &genesis, &block1.hash(), order )); // give the second tx let TransactionKind::Provided(order) = tx3.kind() else { panic!("tx wasn't provided") }; assert!(!Blockchain::::locally_provided_txs_in_block( &db, &genesis, &block1.hash(), order )); blockchain.provide_transaction(tx3).unwrap(); // it should be ok now for the first block assert!(Blockchain::::locally_provided_txs_in_block( &db, &genesis, &block1.hash(), order )); // provide the second block txs let TransactionKind::Provided(order) = tx4.kind() else { panic!("tx wasn't provided") }; // not ok yet assert!(!Blockchain::::locally_provided_txs_in_block( &db, &genesis, &block2.hash(), order )); blockchain.provide_transaction(tx4).unwrap(); // ok now assert!(Blockchain::::locally_provided_txs_in_block( &db, &genesis, &block2.hash(), order )); // provide the second block txs let TransactionKind::Provided(order) = tx2.kind() else { panic!("tx wasn't provided") }; assert!(!Blockchain::::locally_provided_txs_in_block( &db, &genesis, &block2.hash(), order )); blockchain.provide_transaction(tx2).unwrap(); assert!(Blockchain::::locally_provided_txs_in_block( &db, &genesis, &block2.hash(), order )); } } #[tokio::test] async fn tendermint_evidence_tx() { let genesis = new_genesis(); let key = Zeroizing::new(::F::random(&mut OsRng)); let signer = Signer::new(genesis, key.clone()); let signer_id = Ristretto::generator() * key.deref(); let validators = Arc::new(Validators::new(genesis, vec![(signer_id, 1)]).unwrap()); let (_, mut blockchain) = new_blockchain::(genesis, &[]); let test = |blockchain: &mut Blockchain, mempool: Vec>, validators: Arc| { let tip = blockchain.tip(); for tx in mempool.clone() { let Transaction::Tendermint(tx) = tx else { panic!("non-tendermint tx found"); }; blockchain.add_transaction::(true, Transaction::Tendermint(tx), &validators).unwrap(); } let block = blockchain.build_block::(&validators); assert_eq!(blockchain.tip(), tip); assert_eq!(block.header.parent, tip); // Make sure all transactions were included for bt in &block.transactions { assert!(mempool.contains(bt)); } // Verify and add the block blockchain.verify_block::(&block, &validators, false).unwrap(); assert!(blockchain.add_block::(&block, vec![], &validators).is_ok()); assert_eq!(blockchain.tip(), block.hash()); }; // test with single tx let tx = random_evidence_tx::(signer.into(), TendermintBlock(vec![0x12])).await; test(&mut blockchain, vec![Transaction::Tendermint(tx)], validators); // test with multiple txs let mut mempool: Vec> = vec![]; let mut signers = vec![]; for _ in 0 .. 5 { let key = Zeroizing::new(::F::random(&mut OsRng)); let signer = Signer::new(genesis, key.clone()); let signer_id = Ristretto::generator() * key.deref(); signers.push((signer_id, 1)); mempool.push(Transaction::Tendermint( random_evidence_tx::(signer.into(), TendermintBlock(vec![0x12])).await, )); } // update validators let validators = Arc::new(Validators::new(genesis, signers).unwrap()); test(&mut blockchain, mempool, validators); } #[tokio::test] async fn block_tx_ordering() { #[derive(Debug, PartialEq, Eq, Clone)] enum SignedTx { Signed(Box), Provided(Box), } impl ReadWrite for SignedTx { fn read(reader: &mut R) -> io::Result { let mut kind = [0]; reader.read_exact(&mut kind)?; match kind[0] { 0 => Ok(SignedTx::Signed(Box::new(SignedTransaction::read(reader)?))), 1 => Ok(SignedTx::Provided(Box::new(ProvidedTransaction::read(reader)?))), _ => Err(io::Error::other("invalid transaction type")), } } fn write(&self, writer: &mut W) -> io::Result<()> { match self { SignedTx::Signed(signed) => { writer.write_all(&[0])?; signed.write(writer) } SignedTx::Provided(pro) => { writer.write_all(&[1])?; pro.write(writer) } } } } impl TransactionTrait for SignedTx { fn kind(&self) -> TransactionKind<'_> { match self { SignedTx::Signed(signed) => signed.kind(), SignedTx::Provided(pro) => pro.kind(), } } fn hash(&self) -> [u8; 32] { match self { SignedTx::Signed(signed) => signed.hash(), SignedTx::Provided(pro) => pro.hash(), } } fn verify(&self) -> Result<(), TransactionError> { Ok(()) } } let genesis = new_genesis(); let key = Zeroizing::new(::F::random(&mut OsRng)); // signer let signer = crate::tests::signed_transaction(&mut OsRng, genesis, &key, 0).1.signer; let validators = Arc::new(Validators::new(genesis, vec![(signer, 1)]).unwrap()); let (_, mut blockchain) = new_blockchain::(genesis, &[signer]); let tip = blockchain.tip(); // add txs let mut mempool = vec![]; let mut provided_txs = vec![]; for i in 0 .. 128 { let signed_tx = Transaction::Application(SignedTx::Signed(Box::new( crate::tests::signed_transaction(&mut OsRng, genesis, &key, i), ))); blockchain.add_transaction::(true, signed_tx.clone(), &validators).unwrap(); mempool.push(signed_tx); let unsigned_tx = Transaction::Tendermint( random_evidence_tx::( Signer::new(genesis, key.clone()).into(), TendermintBlock(vec![u8::try_from(i).unwrap()]), ) .await, ); blockchain.add_transaction::(true, unsigned_tx.clone(), &validators).unwrap(); mempool.push(unsigned_tx); let provided_tx = SignedTx::Provided(Box::new(random_provided_transaction(&mut OsRng, "order1"))); blockchain.provide_transaction(provided_tx.clone()).unwrap(); provided_txs.push(provided_tx); } let block = blockchain.build_block::(&validators); assert_eq!(blockchain.tip(), tip); assert_eq!(block.header.parent, tip); // Make sure all transactions were included assert_eq!(block.transactions.len(), 3 * 128); for bt in &block.transactions[128 ..] { assert!(mempool.contains(bt)); } // check the tx order let txs = &block.transactions; for tx in txs.iter().take(128) { assert!(matches!(tx.kind(), TransactionKind::Provided(..))); } for tx in txs.iter().take(128).skip(128) { assert!(matches!(tx.kind(), TransactionKind::Unsigned)); } for tx in txs.iter().take(128).skip(256) { assert!(matches!(tx.kind(), TransactionKind::Signed(..))); } // should be a valid block blockchain.verify_block::(&block, &validators, false).unwrap(); // Unsigned before Provided { let mut block = block.clone(); // Doesn't use swap to preserve the order of Provided, as that's checked before kind ordering let unsigned = block.transactions.remove(128); block.transactions.insert(0, unsigned); assert_eq!( blockchain.verify_block::(&block, &validators, false).unwrap_err(), BlockError::WrongTransactionOrder ); } // Signed before Provided { let mut block = block.clone(); let signed = block.transactions.remove(256); block.transactions.insert(0, signed); assert_eq!( blockchain.verify_block::(&block, &validators, false).unwrap_err(), BlockError::WrongTransactionOrder ); } // Signed before Unsigned { let mut block = block; block.transactions.swap(128, 256); assert_eq!( blockchain.verify_block::(&block, &validators, false).unwrap_err(), BlockError::WrongTransactionOrder ); } } ================================================ FILE: coordinator/tributary/src/tests/mempool.rs ================================================ use std::{sync::Arc, collections::HashMap}; use zeroize::Zeroizing; use rand::{RngCore, rngs::OsRng}; use dalek_ff_group::Ristretto; use ciphersuite::{group::ff::Field, Ciphersuite}; use tendermint::ext::Commit; use serai_db::MemDb; use crate::{ transaction::{TransactionError, Transaction as TransactionTrait}, tendermint::{TendermintBlock, Validators, Signer, TendermintNetwork}, ACCOUNT_MEMPOOL_LIMIT, Transaction, Mempool, tests::{SignedTransaction, signed_transaction, p2p::DummyP2p, random_evidence_tx}, }; type N = TendermintNetwork; fn new_mempool() -> ([u8; 32], MemDb, Mempool) { let mut genesis = [0; 32]; OsRng.fill_bytes(&mut genesis); let db = MemDb::new(); (genesis, db.clone(), Mempool::new(db, genesis)) } #[tokio::test] async fn mempool_addition() { let (genesis, db, mut mempool) = new_mempool::(); let commit = |_: u64| -> Option>> { Some(Commit::> { end_time: 0, validators: vec![], signature: vec![] }) }; let unsigned_in_chain = |_: [u8; 32]| false; let key = Zeroizing::new(::F::random(&mut OsRng)); let first_tx = signed_transaction(&mut OsRng, genesis, &key, 0); let signer = first_tx.1.signer; assert_eq!(mempool.next_nonce_in_mempool(&signer, vec![]), None); // validators let validators = Arc::new(Validators::new(genesis, vec![(signer, 1)]).unwrap()); // Add TX 0 assert!(mempool .add::( &|_, _| Some(0), true, Transaction::Application(first_tx.clone()), &validators, unsigned_in_chain, commit, ) .unwrap()); assert_eq!(mempool.next_nonce_in_mempool(&signer, vec![]), Some(1)); // add a tendermint evidence tx let evidence_tx = random_evidence_tx::(Signer::new(genesis, key.clone()).into(), TendermintBlock(vec![])) .await; assert!(mempool .add::( &|_, _| None, true, Transaction::Tendermint(evidence_tx.clone()), &validators, unsigned_in_chain, commit, ) .unwrap()); // Test reloading works assert_eq!(mempool, Mempool::new(db, genesis)); // Adding them again should fail assert_eq!( mempool.add::( &|_, _| Some(0), true, Transaction::Application(first_tx.clone()), &validators, unsigned_in_chain, commit, ), Err(TransactionError::InvalidNonce) ); assert_eq!( mempool.add::( &|_, _| None, true, Transaction::Tendermint(evidence_tx.clone()), &validators, unsigned_in_chain, commit, ), Ok(false) ); // Do the same with the next nonce let second_tx = signed_transaction(&mut OsRng, genesis, &key, 1); assert_eq!( mempool.add::( &|_, _| Some(0), true, Transaction::Application(second_tx.clone()), &validators, unsigned_in_chain, commit, ), Ok(true) ); assert_eq!(mempool.next_nonce_in_mempool(&signer, vec![]), Some(2)); assert_eq!( mempool.add::( &|_, _| Some(0), true, Transaction::Application(second_tx.clone()), &validators, unsigned_in_chain, commit, ), Err(TransactionError::InvalidNonce) ); // If the mempool doesn't have a nonce for an account, it should successfully use the // blockchain's let second_key = Zeroizing::new(::F::random(&mut OsRng)); let tx = signed_transaction(&mut OsRng, genesis, &second_key, 2); let second_signer = tx.1.signer; assert_eq!(mempool.next_nonce_in_mempool(&second_signer, vec![]), None); assert!(mempool .add::( &|_, _| Some(2), true, Transaction::Application(tx.clone()), &validators, unsigned_in_chain, commit ) .unwrap()); assert_eq!(mempool.next_nonce_in_mempool(&second_signer, vec![]), Some(3)); // Getting a block should work assert_eq!(mempool.block().len(), 4); // Removing should successfully prune mempool.remove(&tx.hash()); assert_eq!( mempool.txs(), &HashMap::from([ (first_tx.hash(), Transaction::Application(first_tx)), (second_tx.hash(), Transaction::Application(second_tx)), (evidence_tx.hash(), Transaction::Tendermint(evidence_tx)) ]) ); } #[test] fn too_many_mempool() { let (genesis, _, mut mempool) = new_mempool::(); let validators = Arc::new(Validators::new(genesis, vec![]).unwrap()); let commit = |_: u64| -> Option>> { Some(Commit::> { end_time: 0, validators: vec![], signature: vec![] }) }; let unsigned_in_chain = |_: [u8; 32]| false; let key = Zeroizing::new(::F::random(&mut OsRng)); // We should be able to add transactions up to the limit for i in 0 .. ACCOUNT_MEMPOOL_LIMIT { assert!(mempool .add::( &|_, _| Some(0), false, Transaction::Application(signed_transaction(&mut OsRng, genesis, &key, i)), &validators, unsigned_in_chain, commit, ) .unwrap()); } // Yet adding more should fail assert_eq!( mempool.add::( &|_, _| Some(0), false, Transaction::Application(signed_transaction( &mut OsRng, genesis, &key, ACCOUNT_MEMPOOL_LIMIT )), &validators, unsigned_in_chain, commit, ), Err(TransactionError::TooManyInMempool) ); } ================================================ FILE: coordinator/tributary/src/tests/merkle.rs ================================================ use std::collections::HashSet; use rand::{RngCore, rngs::OsRng}; #[test] fn merkle() { let mut used = HashSet::new(); // Test this produces a unique root let mut test = |hashes: &[[u8; 32]]| { let hash = crate::merkle(hashes); assert!(!used.contains(&hash)); used.insert(hash); }; // Zero should be a special case which return 0 assert_eq!(crate::merkle(&[]), [0; 32]); test(&[]); let mut one = [0; 32]; OsRng.fill_bytes(&mut one); let mut two = [0; 32]; OsRng.fill_bytes(&mut two); let mut three = [0; 32]; OsRng.fill_bytes(&mut three); // Make sure it's deterministic assert_eq!(crate::merkle(&[one]), crate::merkle(&[one])); // Test a few basic structures test(&[one]); test(&[one, two]); test(&[one, two, three]); test(&[one, three]); } ================================================ FILE: coordinator/tributary/src/tests/mod.rs ================================================ #[cfg(test)] mod tendermint; mod transaction; pub use transaction::*; #[cfg(test)] mod merkle; #[cfg(test)] mod block; #[cfg(test)] mod blockchain; #[cfg(test)] mod mempool; #[cfg(test)] mod p2p; ================================================ FILE: coordinator/tributary/src/tests/p2p.rs ================================================ pub use crate::P2p; #[derive(Clone, Debug)] pub struct DummyP2p; #[async_trait::async_trait] impl P2p for DummyP2p { async fn broadcast(&self, _: [u8; 32], _: Vec) { unimplemented!() } } ================================================ FILE: coordinator/tributary/src/tests/tendermint.rs ================================================ use tendermint::ext::Network; use crate::{ P2p, TendermintTx, tendermint::{TARGET_BLOCK_TIME, TendermintNetwork}, }; #[test] fn assert_target_block_time() { use serai_db::MemDb; #[derive(Clone, Debug)] pub struct DummyP2p; #[async_trait::async_trait] impl P2p for DummyP2p { async fn broadcast(&self, _: [u8; 32], _: Vec) { unimplemented!() } } // Type paremeters don't matter here since we only need to call the block_time() // and it only relies on the constants of the trait implementation. block_time() is in seconds, // TARGET_BLOCK_TIME is in milliseconds. assert_eq!( as Network>::block_time(), TARGET_BLOCK_TIME / 1000 ) } ================================================ FILE: coordinator/tributary/src/tests/transaction/mod.rs ================================================ use core::ops::Deref; use std::{sync::Arc, io}; use zeroize::Zeroizing; use rand::{RngCore, CryptoRng, rngs::OsRng}; use blake2::{Digest, Blake2s256}; use dalek_ff_group::Ristretto; use ciphersuite::{ group::{ff::Field, Group}, Ciphersuite, }; use schnorr::SchnorrSignature; use scale::Encode; use ::tendermint::{ ext::{Network, Signer as SignerTrait, SignatureScheme, BlockNumber, RoundNumber}, SignedMessageFor, DataFor, Message, SignedMessage, Data, Evidence, }; use crate::{ transaction::{Signed, TransactionError, TransactionKind, Transaction, verify_transaction}, ReadWrite, tendermint::{tx::TendermintTx, Validators, Signer}, }; #[cfg(test)] mod signed; #[cfg(test)] mod tendermint; pub fn random_signed(rng: &mut R) -> Signed { Signed { signer: ::G::random(&mut *rng), nonce: u32::try_from(rng.next_u64() >> 32 >> 1).unwrap(), signature: SchnorrSignature:: { R: ::G::random(&mut *rng), s: ::F::random(rng), }, } } pub fn random_signed_with_nonce(rng: &mut R, nonce: u32) -> Signed { let mut signed = random_signed(rng); signed.nonce = nonce; signed } #[derive(Clone, PartialEq, Eq, Debug)] pub struct ProvidedTransaction(pub Vec); impl ReadWrite for ProvidedTransaction { fn read(reader: &mut R) -> io::Result { let mut len = [0; 4]; reader.read_exact(&mut len)?; let mut data = vec![0; usize::try_from(u32::from_le_bytes(len)).unwrap()]; reader.read_exact(&mut data)?; Ok(ProvidedTransaction(data)) } fn write(&self, writer: &mut W) -> io::Result<()> { writer.write_all(&u32::try_from(self.0.len()).unwrap().to_le_bytes())?; writer.write_all(&self.0) } } impl Transaction for ProvidedTransaction { fn kind(&self) -> TransactionKind<'_> { match self.0[0] { 1 => TransactionKind::Provided("order1"), 2 => TransactionKind::Provided("order2"), _ => panic!("unknown order"), } } fn hash(&self) -> [u8; 32] { Blake2s256::digest(self.serialize()).into() } fn verify(&self) -> Result<(), TransactionError> { Ok(()) } } pub fn random_provided_transaction( rng: &mut R, order: &str, ) -> ProvidedTransaction { let mut data = vec![0; 512]; rng.fill_bytes(&mut data); data[0] = match order { "order1" => 1, "order2" => 2, _ => panic!("unknown order"), }; ProvidedTransaction(data) } #[derive(Clone, PartialEq, Eq, Debug)] pub struct SignedTransaction(pub Vec, pub Signed); impl ReadWrite for SignedTransaction { fn read(reader: &mut R) -> io::Result { let mut len = [0; 4]; reader.read_exact(&mut len)?; let mut data = vec![0; usize::try_from(u32::from_le_bytes(len)).unwrap()]; reader.read_exact(&mut data)?; Ok(SignedTransaction(data, Signed::read(reader)?)) } fn write(&self, writer: &mut W) -> io::Result<()> { writer.write_all(&u32::try_from(self.0.len()).unwrap().to_le_bytes())?; writer.write_all(&self.0)?; self.1.write(writer) } } impl Transaction for SignedTransaction { fn kind(&self) -> TransactionKind<'_> { TransactionKind::Signed(vec![], &self.1) } fn hash(&self) -> [u8; 32] { let serialized = self.serialize(); Blake2s256::digest(&serialized[.. (serialized.len() - 64)]).into() } fn verify(&self) -> Result<(), TransactionError> { Ok(()) } } pub fn signed_transaction( rng: &mut R, genesis: [u8; 32], key: &Zeroizing<::F>, nonce: u32, ) -> SignedTransaction { let mut data = vec![0; 512]; rng.fill_bytes(&mut data); let signer = ::generator() * **key; let mut tx = SignedTransaction(data, Signed { signer, nonce, signature: random_signed(rng).signature }); let sig_nonce = Zeroizing::new(::F::random(rng)); tx.1.signature.R = Ristretto::generator() * sig_nonce.deref(); tx.1.signature = SchnorrSignature::sign(key, sig_nonce, tx.sig_hash(genesis)); verify_transaction(&tx, genesis, &mut |_, _| Some(tx.1.nonce)).unwrap(); tx } pub fn random_signed_transaction( rng: &mut R, ) -> ([u8; 32], SignedTransaction) { let mut genesis = [0; 32]; rng.fill_bytes(&mut genesis); let key = Zeroizing::new(::F::random(&mut *rng)); // Shift over an additional bit to ensure it won't overflow when incremented let nonce = u32::try_from(rng.next_u64() >> 32 >> 1).unwrap(); (genesis, signed_transaction(rng, genesis, &key, nonce)) } pub fn new_genesis() -> [u8; 32] { let mut genesis = [0; 32]; OsRng.fill_bytes(&mut genesis); genesis } pub async fn tendermint_meta() -> ([u8; 32], Signer, [u8; 32], Arc) { // signer let genesis = new_genesis(); let signer = Signer::new(genesis, Zeroizing::new(::F::random(&mut OsRng))); let validator_id = signer.validator_id().await.unwrap(); // schema let signer_pub = ::read_G::<&[u8]>(&mut validator_id.as_slice()).unwrap(); let validators = Arc::new(Validators::new(genesis, vec![(signer_pub, 1)]).unwrap()); (genesis, signer, validator_id, validators) } pub async fn signed_from_data( signer: ::Signer, signer_id: N::ValidatorId, block_number: u64, round_number: u32, data: DataFor, ) -> SignedMessageFor { let msg = Message { sender: signer_id, block: BlockNumber(block_number), round: RoundNumber(round_number), data, }; let sig = signer.sign(&msg.encode()).await; SignedMessage { msg, sig } } pub async fn random_evidence_tx( signer: ::Signer, b: N::Block, ) -> TendermintTx { // Creates a TX with an invalid valid round number // TODO: Use a random failure reason let data = Data::Proposal(Some(RoundNumber(0)), b); let signer_id = signer.validator_id().await.unwrap(); let signed = signed_from_data::(signer, signer_id, 0, 0, data).await; TendermintTx::SlashEvidence(Evidence::InvalidValidRound(signed.encode())) } ================================================ FILE: coordinator/tributary/src/tests/transaction/signed.rs ================================================ use rand::rngs::OsRng; use blake2::{Digest, Blake2s256}; use dalek_ff_group::Ristretto; use ciphersuite::{group::ff::Field, Ciphersuite}; use crate::{ ReadWrite, transaction::{Signed, Transaction, verify_transaction}, tests::{random_signed, random_signed_transaction}, }; #[test] fn serialize_signed() { let signed = random_signed(&mut rand::rngs::OsRng); assert_eq!(Signed::read::<&[u8]>(&mut signed.serialize().as_ref()).unwrap(), signed); } #[test] fn sig_hash() { let (genesis, tx1) = random_signed_transaction(&mut OsRng); assert!(tx1.sig_hash(genesis) != tx1.sig_hash(Blake2s256::digest(genesis).into())); let (_, tx2) = random_signed_transaction(&mut OsRng); assert!(tx1.hash() != tx2.hash()); assert!(tx1.sig_hash(genesis) != tx2.sig_hash(genesis)); } #[test] fn signed_transaction() { let (genesis, tx) = random_signed_transaction(&mut OsRng); // Mutate various properties and verify it no longer works // Different genesis assert!(verify_transaction(&tx, Blake2s256::digest(genesis).into(), &mut |_, _| Some( tx.1.nonce )) .is_err()); // Different data { let mut tx = tx.clone(); tx.0 = Blake2s256::digest(tx.0).to_vec(); assert!(verify_transaction(&tx, genesis, &mut |_, _| Some(tx.1.nonce)).is_err()); } // Different signer { let mut tx = tx.clone(); tx.1.signer += Ristretto::generator(); assert!(verify_transaction(&tx, genesis, &mut |_, _| Some(tx.1.nonce)).is_err()); } // Different nonce { #[allow(clippy::redundant_clone)] // False positive? let mut tx = tx.clone(); tx.1.nonce = tx.1.nonce.wrapping_add(1); assert!(verify_transaction(&tx, genesis, &mut |_, _| Some(tx.1.nonce)).is_err()); } // Different signature { let mut tx = tx.clone(); tx.1.signature.R += Ristretto::generator(); assert!(verify_transaction(&tx, genesis, &mut |_, _| Some(tx.1.nonce)).is_err()); } { let mut tx = tx.clone(); tx.1.signature.s += ::F::ONE; assert!(verify_transaction(&tx, genesis, &mut |_, _| Some(tx.1.nonce)).is_err()); } // Sanity check the original TX was never mutated and is valid verify_transaction(&tx, genesis, &mut |_, _| Some(tx.1.nonce)).unwrap(); } #[test] fn invalid_nonce() { let (genesis, tx) = random_signed_transaction(&mut OsRng); assert!(verify_transaction(&tx, genesis, &mut |_, _| Some(tx.1.nonce.wrapping_add(1)),).is_err()); } ================================================ FILE: coordinator/tributary/src/tests/transaction/tendermint.rs ================================================ use std::sync::Arc; use zeroize::Zeroizing; use rand::{RngCore, rngs::OsRng}; use dalek_ff_group::Ristretto; use ciphersuite::{Ciphersuite, group::ff::Field}; use scale::Encode; use tendermint::{ time::CanonicalInstant, round::RoundData, Data, commit_msg, Evidence, ext::{RoundNumber, Commit, Signer as SignerTrait}, }; use serai_db::MemDb; use crate::{ ReadWrite, tendermint::{ tx::{TendermintTx, verify_tendermint_tx}, TendermintBlock, Signer, Validators, TendermintNetwork, }, tests::{ p2p::DummyP2p, SignedTransaction, random_evidence_tx, tendermint_meta, signed_from_data, }, }; type N = TendermintNetwork; #[tokio::test] async fn serialize_tendermint() { // make a tendermint tx with random evidence let (_, signer, _, _) = tendermint_meta().await; let tx = random_evidence_tx::(signer.into(), TendermintBlock(vec![])).await; let res = TendermintTx::read::<&[u8]>(&mut tx.serialize().as_ref()).unwrap(); assert_eq!(res, tx); } #[tokio::test] async fn invalid_valid_round() { // signer let (_, signer, signer_id, validators) = tendermint_meta().await; let commit = |_: u64| -> Option>> { Some(Commit::> { end_time: 0, validators: vec![], signature: vec![] }) }; let valid_round_tx = |valid_round| { let signer = signer.clone(); async move { let data = Data::Proposal(valid_round, TendermintBlock(vec![])); let signed = signed_from_data::(signer.clone().into(), signer_id, 0, 0, data).await; (signed.clone(), TendermintTx::SlashEvidence(Evidence::InvalidValidRound(signed.encode()))) } }; // This should be invalid evidence if a valid valid round is specified let (_, tx) = valid_round_tx(None).await; assert!(verify_tendermint_tx::(&tx, &validators, commit).is_err()); // If an invalid valid round is specified (>= current), this should be invalid evidence let (mut signed, tx) = valid_round_tx(Some(RoundNumber(0))).await; // should pass verify_tendermint_tx::(&tx, &validators, commit).unwrap(); // change the signature let mut random_sig = [0u8; 64]; OsRng.fill_bytes(&mut random_sig); signed.sig = random_sig; let tx = TendermintTx::SlashEvidence(Evidence::InvalidValidRound(signed.encode())); // should fail assert!(verify_tendermint_tx::(&tx, &validators, commit).is_err()); } #[tokio::test] async fn invalid_precommit_signature() { let (_, signer, signer_id, validators) = tendermint_meta().await; let commit = |i: u64| -> Option>> { assert_eq!(i, 0); Some(Commit::> { end_time: 0, validators: vec![], signature: vec![] }) }; let precommit = |precommit| { let signer = signer.clone(); async move { let signed = signed_from_data::(signer.clone().into(), signer_id, 1, 0, Data::Precommit(precommit)) .await; (signed.clone(), TendermintTx::SlashEvidence(Evidence::InvalidPrecommit(signed.encode()))) } }; // Empty Precommit should fail. assert!(verify_tendermint_tx::(&precommit(None).await.1, &validators, commit).is_err()); // valid precommit signature should fail. let block_id = [0x22u8; 32]; let last_end_time = RoundData::::new(RoundNumber(0), CanonicalInstant::new(commit(0).unwrap().end_time)) .end_time(); let commit_msg = commit_msg(last_end_time.canonical(), block_id.as_ref()); assert!(verify_tendermint_tx::( &precommit(Some((block_id, signer.clone().sign(&commit_msg).await))).await.1, &validators, commit ) .is_err()); // any other signature can be used as evidence. { let (mut signed, tx) = precommit(Some((block_id, signer.sign(&[]).await))).await; verify_tendermint_tx::(&tx, &validators, commit).unwrap(); // So long as we can authenticate where it came from let mut random_sig = [0u8; 64]; OsRng.fill_bytes(&mut random_sig); signed.sig = random_sig; let tx = TendermintTx::SlashEvidence(Evidence::InvalidPrecommit(signed.encode())); assert!(verify_tendermint_tx::(&tx, &validators, commit).is_err()); } } #[tokio::test] async fn evidence_with_prevote() { let (_, signer, signer_id, validators) = tendermint_meta().await; let commit = |_: u64| -> Option>> { Some(Commit::> { end_time: 0, validators: vec![], signature: vec![] }) }; let prevote = |block_id| { let signer = signer.clone(); async move { // it should fail for all reasons. let mut txs = vec![]; txs.push(TendermintTx::SlashEvidence(Evidence::InvalidPrecommit( signed_from_data::(signer.clone().into(), signer_id, 0, 0, Data::Prevote(block_id)) .await .encode(), ))); txs.push(TendermintTx::SlashEvidence(Evidence::InvalidValidRound( signed_from_data::(signer.clone().into(), signer_id, 0, 0, Data::Prevote(block_id)) .await .encode(), ))); // Since these require a second message, provide this one again // ConflictingMessages can be fired for actually conflicting Prevotes however txs.push(TendermintTx::SlashEvidence(Evidence::ConflictingMessages( signed_from_data::(signer.clone().into(), signer_id, 0, 0, Data::Prevote(block_id)) .await .encode(), signed_from_data::(signer.clone().into(), signer_id, 0, 0, Data::Prevote(block_id)) .await .encode(), ))); txs } }; // No prevote message alone should be valid as slash evidence at this time for prevote in prevote(None).await { assert!(verify_tendermint_tx::(&prevote, &validators, commit).is_err()); } for prevote in prevote(Some([0x22u8; 32])).await { assert!(verify_tendermint_tx::(&prevote, &validators, commit).is_err()); } } #[tokio::test] async fn conflicting_msgs_evidence_tx() { let (genesis, signer, signer_id, validators) = tendermint_meta().await; let commit = |i: u64| -> Option>> { assert_eq!(i, 0); Some(Commit::> { end_time: 0, validators: vec![], signature: vec![] }) }; // Block b, round n let signed_for_b_r = |block, round, data| { let signer = signer.clone(); async move { signed_from_data::(signer.clone().into(), signer_id, block, round, data).await } }; // Proposal { // non-conflicting data should fail let signed_1 = signed_for_b_r(0, 0, Data::Proposal(None, TendermintBlock(vec![0x11]))).await; let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages( signed_1.encode(), signed_1.encode(), )); assert!(verify_tendermint_tx::(&tx, &validators, commit).is_err()); // conflicting data should pass let signed_2 = signed_for_b_r(0, 0, Data::Proposal(None, TendermintBlock(vec![0x22]))).await; let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages( signed_1.encode(), signed_2.encode(), )); verify_tendermint_tx::(&tx, &validators, commit).unwrap(); // Except if it has a distinct round number, as we don't check cross-round conflicts // (except for Precommit) let signed_2 = signed_for_b_r(0, 1, Data::Proposal(None, TendermintBlock(vec![0x22]))).await; let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages( signed_1.encode(), signed_2.encode(), )); verify_tendermint_tx::(&tx, &validators, commit).unwrap_err(); // Proposals for different block numbers should also fail as evidence let signed_2 = signed_for_b_r(1, 0, Data::Proposal(None, TendermintBlock(vec![0x22]))).await; let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages( signed_1.encode(), signed_2.encode(), )); verify_tendermint_tx::(&tx, &validators, commit).unwrap_err(); } // Prevote { // non-conflicting data should fail let signed_1 = signed_for_b_r(0, 0, Data::Prevote(Some([0x11; 32]))).await; let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages( signed_1.encode(), signed_1.encode(), )); assert!(verify_tendermint_tx::(&tx, &validators, commit).is_err()); // conflicting data should pass let signed_2 = signed_for_b_r(0, 0, Data::Prevote(Some([0x22; 32]))).await; let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages( signed_1.encode(), signed_2.encode(), )); verify_tendermint_tx::(&tx, &validators, commit).unwrap(); // Except if it has a distinct round number, as we don't check cross-round conflicts // (except for Precommit) let signed_2 = signed_for_b_r(0, 1, Data::Prevote(Some([0x22; 32]))).await; let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages( signed_1.encode(), signed_2.encode(), )); verify_tendermint_tx::(&tx, &validators, commit).unwrap_err(); // Proposals for different block numbers should also fail as evidence let signed_2 = signed_for_b_r(1, 0, Data::Prevote(Some([0x22; 32]))).await; let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages( signed_1.encode(), signed_2.encode(), )); verify_tendermint_tx::(&tx, &validators, commit).unwrap_err(); } // msgs from different senders should fail { let signed_1 = signed_for_b_r(0, 0, Data::Proposal(None, TendermintBlock(vec![0x11]))).await; let signer_2 = Signer::new(genesis, Zeroizing::new(::F::random(&mut OsRng))); let signed_id_2 = signer_2.validator_id().await.unwrap(); let signed_2 = signed_from_data::( signer_2.into(), signed_id_2, 0, 0, Data::Proposal(None, TendermintBlock(vec![0x22])), ) .await; let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages( signed_1.encode(), signed_2.encode(), )); // update schema so that we don't fail due to invalid signature let signer_pub = ::read_G::<&[u8]>(&mut signer_id.as_slice()).unwrap(); let signer_pub_2 = ::read_G::<&[u8]>(&mut signed_id_2.as_slice()).unwrap(); let validators = Arc::new(Validators::new(genesis, vec![(signer_pub, 1), (signer_pub_2, 1)]).unwrap()); assert!(verify_tendermint_tx::(&tx, &validators, commit).is_err()); } // msgs with different steps should fail { let signed_1 = signed_for_b_r(0, 0, Data::Proposal(None, TendermintBlock(vec![]))).await; let signed_2 = signed_for_b_r(0, 0, Data::Prevote(None)).await; let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages( signed_1.encode(), signed_2.encode(), )); assert!(verify_tendermint_tx::(&tx, &validators, commit).is_err()); } } ================================================ FILE: coordinator/tributary/src/transaction.rs ================================================ use core::fmt::Debug; use std::io; use zeroize::Zeroize; use thiserror::Error; use blake2::{Digest, Blake2b512}; use dalek_ff_group::Ristretto; use ciphersuite::{ group::{Group, GroupEncoding}, Ciphersuite, }; use schnorr::SchnorrSignature; use crate::{TRANSACTION_SIZE_LIMIT, ReadWrite}; #[derive(Clone, PartialEq, Eq, Debug, Error)] pub enum TransactionError { /// Transaction exceeded the size limit. #[error("transaction is too large")] TooLargeTransaction, /// Transaction's signer isn't a participant. #[error("invalid signer")] InvalidSigner, /// Transaction's nonce isn't the prior nonce plus one. #[error("invalid nonce")] InvalidNonce, /// Transaction's signature is invalid. #[error("invalid signature")] InvalidSignature, /// Transaction's content is invalid. #[error("transaction content is invalid")] InvalidContent, /// Transaction's signer has too many transactions in the mempool. #[error("signer has too many transactions in the mempool")] TooManyInMempool, /// Provided Transaction added to mempool. #[error("provided transaction added to mempool")] ProvidedAddedToMempool, } /// Data for a signed transaction. #[derive(Clone, PartialEq, Eq, Debug)] pub struct Signed { pub signer: ::G, pub nonce: u32, pub signature: SchnorrSignature, } impl ReadWrite for Signed { fn read(reader: &mut R) -> io::Result { let signer = Ristretto::read_G(reader)?; let mut nonce = [0; 4]; reader.read_exact(&mut nonce)?; let nonce = u32::from_le_bytes(nonce); if nonce >= (u32::MAX - 1) { Err(io::Error::other("nonce exceeded limit"))?; } let mut signature = SchnorrSignature::::read(reader)?; if signature.R.is_identity().into() { // Anyone malicious could remove this and try to find zero signatures // We should never produce zero signatures though meaning this should never come up // If it does somehow come up, this is a decent courtesy signature.zeroize(); Err(io::Error::other("signature nonce was identity"))?; } Ok(Signed { signer, nonce, signature }) } fn write(&self, writer: &mut W) -> io::Result<()> { // This is either an invalid signature or a private key leak if self.signature.R.is_identity().into() { Err(io::Error::other("signature nonce was identity"))?; } writer.write_all(&self.signer.to_bytes())?; writer.write_all(&self.nonce.to_le_bytes())?; self.signature.write(writer) } } impl Signed { pub fn read_without_nonce(reader: &mut R, nonce: u32) -> io::Result { let signer = Ristretto::read_G(reader)?; let mut signature = SchnorrSignature::::read(reader)?; if signature.R.is_identity().into() { // Anyone malicious could remove this and try to find zero signatures // We should never produce zero signatures though meaning this should never come up // If it does somehow come up, this is a decent courtesy signature.zeroize(); Err(io::Error::other("signature nonce was identity"))?; } Ok(Signed { signer, nonce, signature }) } pub fn write_without_nonce(&self, writer: &mut W) -> io::Result<()> { // This is either an invalid signature or a private key leak if self.signature.R.is_identity().into() { Err(io::Error::other("signature nonce was identity"))?; } writer.write_all(&self.signer.to_bytes())?; self.signature.write(writer) } } #[allow(clippy::large_enum_variant)] #[derive(Clone, PartialEq, Eq, Debug)] pub enum TransactionKind<'a> { /// This transaction should be provided by every validator, in an exact order. /// /// The contained static string names the orderer to use. This allows two distinct provided /// transaction kinds, without a synchronized order, to be ordered within their own kind without /// requiring ordering with each other. /// /// The only malleability is in when this transaction appears on chain. The block producer will /// include it when they have it. Block verification will fail for validators without it. /// /// If a supermajority of validators produce a commit for a block with a provided transaction /// which isn't locally held, the block will be added to the local chain. When the transaction is /// locally provided, it will be compared for correctness to the on-chain version /// /// In order to ensure TXs aren't accidentally provided multiple times, all provided transactions /// must have a unique hash which is also unique to all Unsigned transactions. Provided(&'static str), /// An unsigned transaction, only able to be included by the block producer. /// /// Once an Unsigned transaction is included on-chain, it may not be included again. In order to /// have multiple Unsigned transactions with the same values included on-chain, some distinct /// nonce must be included in order to cause a distinct hash. /// /// The hash must also be unique with all Provided transactions. Unsigned, /// A signed transaction. Signed(Vec, &'a Signed), } // TODO: Should this be renamed TransactionTrait now that a literal Transaction exists? // Or should the literal Transaction be renamed to Event? pub trait Transaction: 'static + Send + Sync + Clone + Eq + Debug + ReadWrite { /// Return what type of transaction this is. fn kind(&self) -> TransactionKind<'_>; /// Return the hash of this transaction. /// /// The hash must NOT commit to the signature. fn hash(&self) -> [u8; 32]; /// Perform transaction-specific verification. fn verify(&self) -> Result<(), TransactionError>; /// Obtain the challenge for this transaction's signature. /// /// Do not override this unless you know what you're doing. /// /// Panics if called on non-signed transactions. fn sig_hash(&self, genesis: [u8; 32]) -> ::F { match self.kind() { TransactionKind::Signed(order, Signed { signature, .. }) => { ::F::from_bytes_mod_order_wide( &Blake2b512::digest( [ b"Tributary Signed Transaction", genesis.as_ref(), &self.hash(), order.as_ref(), signature.R.to_bytes().as_ref(), ] .concat(), ) .into(), ) } _ => panic!("sig_hash called on non-signed transaction"), } } } pub trait GAIN: FnMut(&::G, &[u8]) -> Option {} impl::G, &[u8]) -> Option> GAIN for F {} pub(crate) fn verify_transaction( tx: &T, genesis: [u8; 32], get_and_increment_nonce: &mut F, ) -> Result<(), TransactionError> { if tx.serialize().len() > TRANSACTION_SIZE_LIMIT { Err(TransactionError::TooLargeTransaction)?; } tx.verify()?; match tx.kind() { TransactionKind::Provided(_) | TransactionKind::Unsigned => {} TransactionKind::Signed(order, Signed { signer, nonce, signature }) => { if let Some(next_nonce) = get_and_increment_nonce(signer, &order) { if *nonce != next_nonce { Err(TransactionError::InvalidNonce)?; } } else { // Not a participant Err(TransactionError::InvalidSigner)?; } // TODO: Use a batch verification here if !signature.verify(*signer, tx.sig_hash(genesis)) { Err(TransactionError::InvalidSignature)?; } } } Ok(()) } ================================================ FILE: coordinator/tributary/tendermint/Cargo.toml ================================================ [package] name = "tendermint-machine" version = "0.2.0" description = "An implementation of the Tendermint state machine in Rust" license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/tendermint" authors = ["Luke Parker "] edition = "2021" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] async-trait = { version = "0.1", default-features = false } thiserror = { version = "1", default-features = false } hex = { version = "0.4", default-features = false, features = ["std"] } log = { version = "0.4", default-features = false, features = ["std"] } parity-scale-codec = { version = "3", default-features = false, features = ["std", "derive"] } futures-util = { version = "0.3", default-features = false, features = ["std", "async-await-macro", "sink", "channel"] } futures-channel = { version = "0.3", default-features = false, features = ["std", "sink"] } patchable-async-sleep = { version = "0.1", path = "../../../common/patchable-async-sleep", default-features = false } serai-db = { path = "../../../common/db", version = "0.1", default-features = false } [dev-dependencies] tokio = { version = "1", features = ["sync", "rt-multi-thread", "macros"] } ================================================ FILE: coordinator/tributary/tendermint/LICENSE ================================================ MIT License Copyright (c) 2022-2023 Luke Parker Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: coordinator/tributary/tendermint/README.md ================================================ # Tendermint An implementation of the Tendermint state machine in Rust. This is solely the state machine, intended to be mapped to any arbitrary system. It supports an arbitrary signature scheme, weighting, and block definition accordingly. It is not intended to work with the Cosmos SDK, solely to be an implementation of the [academic protocol](https://arxiv.org/pdf/1807.04938.pdf). ### Caveats - Only SCALE serialization is supported currently. Ideally, everything from SCALE to borsh to bincode would be supported. SCALE was chosen due to this being under Serai, which uses Substrate, which uses SCALE. Accordingly, when deciding which of the three (mutually incompatible) options to support... - The only supported runtime is tokio due to requiring a `sleep` implementation. Ideally, the runtime choice will be moved to a feature in the future. - It is possible for `add_block` to be called on a block which failed (or never went through in the first place) validation. This is a break from the paper which is accepted here. This is for two reasons. 1) Serai needing this functionality. 2) If a block is committed which is invalid, either there's a malicious majority now defining consensus OR the local node is malicious by virtue of being faulty. Considering how either represents a fatal circumstance, except with regards to system like Serai which have their own logic for pseudo-valid blocks, it is accepted as a possible behavior with the caveat any consumers must be aware of it. No machine will vote nor precommit to a block it considers invalid, so for a network with an honest majority, this is a non-issue. ### Paper The [paper](https://arxiv.org/abs/1807.04938) describes the algorithm with pseudocode on page 6. This pseudocode isn't directly implementable, nor does it specify faulty behavior. Instead, it's solely a series of conditions which trigger events in order to successfully achieve consensus. The included pseudocode segments can be minimally described as follows: ``` 01-09 Init 10-10 StartRound(0) 11-21 StartRound 22-27 Fresh proposal 28-33 Proposal building off a valid round with prevotes 34-35 2f+1 prevote -> schedule timeout prevote 36-43 First proposal with prevotes -> precommit Some 44-46 2f+1 nil prevote -> precommit nil 47-48 2f+1 precommit -> schedule timeout precommit 49-54 First proposal with precommits -> finalize 55-56 f+1 round > local round, jump 57-60 on timeout propose 61-64 on timeout prevote 65-67 on timeout precommit ``` The corresponding Rust code implementing these tasks are marked with their related line numbers. ================================================ FILE: coordinator/tributary/tendermint/src/block.rs ================================================ use std::{ sync::Arc, collections::{HashSet, HashMap}, }; use serai_db::{Get, DbTxn, Db}; use crate::{ time::CanonicalInstant, ext::{RoundNumber, BlockNumber, Block, Network}, round::RoundData, message_log::MessageLog, Step, Data, DataFor, Message, MessageFor, }; pub(crate) struct BlockData { db: N::Db, genesis: [u8; 32], pub(crate) number: BlockNumber, pub(crate) validator_id: Option, pub(crate) our_proposal: Option, pub(crate) log: MessageLog, pub(crate) slashes: HashSet, // We track the end times of each round for two reasons: // 1) Knowing the start time of the next round // 2) Validating precommits, which include the end time of the round which produced it // This HashMap contains the end time of the round we're currently in and every round prior pub(crate) end_time: HashMap, pub(crate) round: Option>, pub(crate) locked: Option<(RoundNumber, ::Id)>, pub(crate) valid: Option<(RoundNumber, N::Block)>, } impl BlockData { pub(crate) fn new( db: N::Db, genesis: [u8; 32], weights: Arc, number: BlockNumber, validator_id: Option, our_proposal: Option, ) -> BlockData { BlockData { db, genesis, number, validator_id, our_proposal, log: MessageLog::new(weights), slashes: HashSet::new(), end_time: HashMap::new(), // The caller of BlockData::new is expected to be populated after by the caller round: None, locked: None, valid: None, } } pub(crate) fn round(&self) -> &RoundData { self.round.as_ref().unwrap() } pub(crate) fn round_mut(&mut self) -> &mut RoundData { self.round.as_mut().unwrap() } // Populate the end time up to the specified round // This is generally used when moving to the next round, where this will only populate one time, // yet is also used when jumping rounds (when 33% of the validators are on a round ahead of us) pub(crate) fn populate_end_time(&mut self, round: RoundNumber) { // Starts from the current round since we only start the current round once we have have all // the prior time data for r in (self.round().number.0 + 1) ..= round.0 { self.end_time.insert( RoundNumber(r), RoundData::::new(RoundNumber(r), self.end_time[&RoundNumber(r - 1)]).end_time(), ); } } // Start a new round. Optionally takes in the time for when this is the first round, and the time // isn't simply the time of the prior round (yet rather the prior block). Returns the proposal // data, if we are the proposer. pub(crate) fn new_round( &mut self, round: RoundNumber, proposer: N::ValidatorId, time: Option, ) -> Option> { debug_assert_eq!(round.0 == 0, time.is_some()); // If this is the first round, we don't have a prior round's end time to use as the start // We use the passed in time instead // If this isn't the first round, ensure we have the prior round's end time by populating the // map with all rounds till this round // This can happen we jump from round x to round x+n, where n != 1 // The paper says to do so whenever you observe a sufficient amount of peers on a higher round if round.0 != 0 { self.populate_end_time(round); } // L11-13 self.round = Some(RoundData::::new( round, time.unwrap_or_else(|| self.end_time[&RoundNumber(round.0 - 1)]), )); self.end_time.insert(round, self.round().end_time()); // L14-21 if Some(proposer) == self.validator_id { let (round, block) = self.valid.clone().unzip(); block.or_else(|| self.our_proposal.clone()).map(|block| Data::Proposal(round, block)) } else { self.round_mut().set_timeout(Step::Propose); None } } // Transform Data into an actual Message, using the contextual data from this block pub(crate) fn message(&mut self, data: DataFor) -> Option> { debug_assert_eq!( self.round().step, match data.step() { Step::Propose | Step::Prevote => Step::Propose, Step::Precommit => Step::Prevote, }, ); // Tendermint always sets the round's step to whatever it just broadcasted // Consolidate all of those here to ensure they aren't missed by an oversight // 27, 33, 41, 46, 60, 64 self.round_mut().step = data.step(); // Only return a message to if we're actually a current validator let round_number = self.round().number; let res = self.validator_id.map(|validator_id| Message { sender: validator_id, block: self.number, round: round_number, data, }); if let Some(res) = res.as_ref() { const LATEST_BLOCK_KEY: &[u8] = b"tendermint-machine-sent_block"; const LATEST_ROUND_KEY: &[u8] = b"tendermint-machine-sent_round"; const PROPOSE_KEY: &[u8] = b"tendermint-machine-sent_propose"; const PEVOTE_KEY: &[u8] = b"tendermint-machine-sent_prevote"; const PRECOMMIT_KEY: &[u8] = b"tendermint-machine-sent_commit"; let genesis = self.genesis; let key = |prefix: &[u8]| [prefix, &genesis].concat(); let mut txn = self.db.txn(); // Ensure we haven't prior sent a message for a future block/round let last_block_or_round = |txn: &mut ::Transaction<'_>, prefix, current| { let key = key(prefix); let latest = u64::from_le_bytes(txn.get(key.as_slice()).unwrap_or(vec![0; 8]).try_into().unwrap()); if latest > current { None?; } if current > latest { txn.put(&key, current.to_le_bytes()); return Some(true); } Some(false) }; let new_block = last_block_or_round(&mut txn, LATEST_BLOCK_KEY, self.number.0)?; if new_block { // Delete the latest round key txn.del(key(LATEST_ROUND_KEY)); } let new_round = last_block_or_round(&mut txn, LATEST_ROUND_KEY, round_number.0.into())?; if new_block || new_round { // Delete the messages for the old round txn.del(key(PROPOSE_KEY)); txn.del(key(PEVOTE_KEY)); txn.del(key(PRECOMMIT_KEY)); } // Check we haven't sent this message within this round let msg_key = key(match res.data.step() { Step::Propose => PROPOSE_KEY, Step::Prevote => PEVOTE_KEY, Step::Precommit => PRECOMMIT_KEY, }); if txn.get(&msg_key).is_some() { assert!(!new_block); assert!(!new_round); None?; } // Put that we're sending this message to the DB txn.put(&msg_key, []); txn.commit(); } res } } ================================================ FILE: coordinator/tributary/tendermint/src/ext.rs ================================================ use core::{hash::Hash, fmt::Debug}; use std::{sync::Arc, collections::HashSet}; use async_trait::async_trait; use thiserror::Error; use parity_scale_codec::{Encode, Decode}; use crate::{SignedMessageFor, SlashEvent, commit_msg}; /// An alias for a series of traits required for a type to be usable as a validator ID, /// automatically implemented for all types satisfying those traits. pub trait ValidatorId: Send + Sync + Clone + Copy + PartialEq + Eq + Hash + Debug + Encode + Decode { } impl ValidatorId for V { } /// An alias for a series of traits required for a type to be usable as a signature, /// automatically implemented for all types satisfying those traits. pub trait Signature: Send + Sync + Clone + PartialEq + Eq + Debug + Encode + Decode {} impl Signature for S {} // Type aliases which are distinct according to the type system /// A struct containing a Block Number, wrapped to have a distinct type. #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode)] pub struct BlockNumber(pub u64); /// A struct containing a round number, wrapped to have a distinct type. #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode)] pub struct RoundNumber(pub u32); /// A signer for a validator. #[async_trait] pub trait Signer: Send + Sync { // Type used to identify validators. type ValidatorId: ValidatorId; /// Signature type. type Signature: Signature; /// Returns the validator's current ID. Returns None if they aren't a current validator. async fn validator_id(&self) -> Option; /// Sign a signature with the current validator's private key. async fn sign(&self, msg: &[u8]) -> Self::Signature; } #[async_trait] impl Signer for Arc { type ValidatorId = S::ValidatorId; type Signature = S::Signature; async fn validator_id(&self) -> Option { self.as_ref().validator_id().await } async fn sign(&self, msg: &[u8]) -> Self::Signature { self.as_ref().sign(msg).await } } /// A signature scheme used by validators. pub trait SignatureScheme: Send + Sync + Clone { // Type used to identify validators. type ValidatorId: ValidatorId; /// Signature type. type Signature: Signature; /// Type representing an aggregate signature. This would presumably be a BLS signature, /// yet even with Schnorr signatures /// [half-aggregation is possible](https://eprint.iacr.org/2021/350). /// It could even be a threshold signature scheme, though that's currently unexpected. type AggregateSignature: Signature; /// Type representing a signer of this scheme. type Signer: Signer; /// Verify a signature from the validator in question. #[must_use] fn verify(&self, validator: Self::ValidatorId, msg: &[u8], sig: &Self::Signature) -> bool; /// Aggregate signatures. /// It may panic if corrupted data passed in. fn aggregate( &self, validators: &[Self::ValidatorId], msg: &[u8], sigs: &[Self::Signature], ) -> Self::AggregateSignature; /// Verify an aggregate signature for the list of signers. #[must_use] fn verify_aggregate( &self, signers: &[Self::ValidatorId], msg: &[u8], sig: &Self::AggregateSignature, ) -> bool; } impl SignatureScheme for Arc { type ValidatorId = S::ValidatorId; type Signature = S::Signature; type AggregateSignature = S::AggregateSignature; type Signer = S::Signer; fn verify(&self, validator: Self::ValidatorId, msg: &[u8], sig: &Self::Signature) -> bool { self.as_ref().verify(validator, msg, sig) } fn aggregate( &self, validators: &[Self::ValidatorId], msg: &[u8], sigs: &[Self::Signature], ) -> Self::AggregateSignature { self.as_ref().aggregate(validators, msg, sigs) } #[must_use] fn verify_aggregate( &self, signers: &[Self::ValidatorId], msg: &[u8], sig: &Self::AggregateSignature, ) -> bool { self.as_ref().verify_aggregate(signers, msg, sig) } } /// A commit for a specific block. /// /// The list of validators have weight exceeding the threshold for a valid commit. #[derive(PartialEq, Debug, Encode, Decode)] pub struct Commit { /// End time of the round which created this commit, used as the start time of the next block. pub end_time: u64, /// Validators participating in the signature. pub validators: Vec, /// Aggregate signature. pub signature: S::AggregateSignature, } impl Clone for Commit { fn clone(&self) -> Self { Self { end_time: self.end_time, validators: self.validators.clone(), signature: self.signature.clone(), } } } /// Weights for the validators present. pub trait Weights: Send + Sync { type ValidatorId: ValidatorId; /// Total weight of all validators. fn total_weight(&self) -> u64; /// Weight for a specific validator. fn weight(&self, validator: Self::ValidatorId) -> u64; /// Threshold needed for BFT consensus. fn threshold(&self) -> u64 { ((self.total_weight() * 2) / 3) + 1 } /// Threshold preventing BFT consensus. fn fault_threshold(&self) -> u64 { (self.total_weight() - self.threshold()) + 1 } /// Weighted round robin function. fn proposer(&self, block: BlockNumber, round: RoundNumber) -> Self::ValidatorId; } impl Weights for Arc { type ValidatorId = W::ValidatorId; fn total_weight(&self) -> u64 { self.as_ref().total_weight() } fn weight(&self, validator: Self::ValidatorId) -> u64 { self.as_ref().weight(validator) } fn proposer(&self, block: BlockNumber, round: RoundNumber) -> Self::ValidatorId { self.as_ref().proposer(block, round) } } /// Simplified error enum representing a block's validity. #[derive(Clone, Copy, PartialEq, Eq, Debug, Error, Encode, Decode)] pub enum BlockError { /// Malformed block which is wholly invalid. #[error("invalid block")] Fatal, /// Valid block by syntax, with semantics which may or may not be valid yet are locally /// considered invalid. If a block fails to validate with this, a slash will not be triggered. #[error("invalid block under local view")] Temporal, } /// Trait representing a Block. pub trait Block: Send + Sync + Clone + PartialEq + Eq + Debug + Encode + Decode { // Type used to identify blocks. Presumably a cryptographic hash of the block. type Id: Send + Sync + Copy + Clone + PartialEq + Eq + AsRef<[u8]> + Debug + Encode + Decode; /// Return the deterministic, unique ID for this block. fn id(&self) -> Self::Id; } /// Trait representing the distributed system Tendermint is providing consensus over. #[async_trait] pub trait Network: Sized + Send + Sync { /// The database used to back this. type Db: serai_db::Db; // Type used to identify validators. type ValidatorId: ValidatorId; /// Signature scheme used by validators. type SignatureScheme: SignatureScheme; /// Object representing the weights of validators. type Weights: Weights; /// Type used for ordered blocks of information. type Block: Block; /// Maximum block processing time in milliseconds. /// /// This should include both the time to download the block and the actual processing time. /// /// BLOCK_PROCESSING_TIME + (3 * LATENCY_TIME) must be divisible by 1000. const BLOCK_PROCESSING_TIME: u32; /// Network latency time in milliseconds. /// /// BLOCK_PROCESSING_TIME + (3 * LATENCY_TIME) must be divisible by 1000. const LATENCY_TIME: u32; /// The block time, in seconds. Defined as the processing time plus three times the latency. fn block_time() -> u32 { let raw = Self::BLOCK_PROCESSING_TIME + (3 * Self::LATENCY_TIME); let res = raw / 1000; assert_eq!(res * 1000, raw); res } /// Return a handle on the signer in use, usable for the entire lifetime of the machine. fn signer(&self) -> ::Signer; /// Return a handle on the signing scheme in use, usable for the entire lifetime of the machine. fn signature_scheme(&self) -> Self::SignatureScheme; /// Return a handle on the validators' weights, usable for the entire lifetime of the machine. fn weights(&self) -> Self::Weights; /// Verify a commit for a given block. Intended for use when syncing or when not an active /// validator. #[must_use] fn verify_commit( &self, id: ::Id, commit: &Commit, ) -> bool { if commit.validators.iter().collect::>().len() != commit.validators.len() { return false; } if !self.signature_scheme().verify_aggregate( &commit.validators, &commit_msg(commit.end_time, id.as_ref()), &commit.signature, ) { return false; } let weights = self.weights(); commit.validators.iter().map(|v| weights.weight(*v)).sum::() >= weights.threshold() } /// Broadcast a message to the other validators. /// /// If authenticated channels have already been established, this will double-authenticate. /// Switching to unauthenticated channels in a system already providing authenticated channels is /// not recommended as this is a minor, temporal inefficiency, while downgrading channels may /// have wider implications. async fn broadcast(&mut self, msg: SignedMessageFor); /// Trigger a slash for the validator in question who was definitively malicious. /// /// The exact process of triggering a slash is undefined and left to the network as a whole. async fn slash(&mut self, validator: Self::ValidatorId, slash_event: SlashEvent); /// Validate a block. async fn validate(&self, block: &Self::Block) -> Result<(), BlockError>; /// Add a block, returning the proposal for the next one. /// /// It's possible a block, which was never validated or even failed validation, may be passed /// here if a supermajority of validators did consider it valid and created a commit for it. /// /// This deviates from the paper which will have a local node refuse to decide on a block it /// considers invalid. This library acknowledges the network did decide on it, leaving handling /// of it to the network, and outside of this scope. async fn add_block( &mut self, block: Self::Block, commit: Commit, ) -> Option; } ================================================ FILE: coordinator/tributary/tendermint/src/lib.rs ================================================ #![expect(clippy::cast_possible_truncation)] use core::fmt::Debug; use std::{ sync::Arc, time::{SystemTime, Instant, Duration}, collections::{VecDeque, HashMap}, }; use parity_scale_codec::{Encode, Decode, IoReader}; use futures_channel::mpsc; use futures_util::{ FutureExt, StreamExt, SinkExt, future::{self, Fuse}, }; use patchable_async_sleep::sleep; use serai_db::{Get, DbTxn, Db}; pub mod time; use time::{sys_time, CanonicalInstant}; pub mod round; use round::RoundData; mod block; use block::BlockData; pub(crate) mod message_log; /// Traits and types of the external network being integrated with to provide consensus over. pub mod ext; use ext::*; const MESSAGE_TAPE_KEY: &[u8] = b"tendermint-machine-message_tape"; fn message_tape_key(genesis: [u8; 32]) -> Vec { [MESSAGE_TAPE_KEY, &genesis].concat() } pub fn commit_msg(end_time: u64, id: &[u8]) -> Vec { [&end_time.to_le_bytes(), id].concat() } #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode)] pub enum Step { Propose, Prevote, Precommit, } #[derive(Clone, Eq, Debug, Encode, Decode)] pub enum Data { Proposal(Option, B), Prevote(Option), Precommit(Option<(B::Id, S)>), } impl PartialEq for Data { fn eq(&self, other: &Data) -> bool { match (self, other) { (Data::Proposal(valid_round, block), Data::Proposal(valid_round2, block2)) => { (valid_round == valid_round2) && (block == block2) } (Data::Prevote(id), Data::Prevote(id2)) => id == id2, (Data::Precommit(None), Data::Precommit(None)) => true, (Data::Precommit(Some((id, _))), Data::Precommit(Some((id2, _)))) => id == id2, _ => false, } } } impl core::hash::Hash for Data { fn hash(&self, state: &mut H) { match self { Data::Proposal(valid_round, block) => (0, valid_round, block.id().as_ref()).hash(state), Data::Prevote(id) => (1, id.as_ref().map(AsRef::<[u8]>::as_ref)).hash(state), Data::Precommit(None) => (2, 0).hash(state), Data::Precommit(Some((id, _))) => (2, 1, id.as_ref()).hash(state), } } } impl Data { pub fn step(&self) -> Step { match self { Data::Proposal(..) => Step::Propose, Data::Prevote(..) => Step::Prevote, Data::Precommit(..) => Step::Precommit, } } } #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] pub struct Message { pub sender: V, pub block: BlockNumber, pub round: RoundNumber, pub data: Data, } /// A signed Tendermint consensus message to be broadcast to the other validators. #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] pub struct SignedMessage { pub msg: Message, pub sig: S, } impl SignedMessage { /// Number of the block this message is attempting to add to the chain. pub fn block(&self) -> BlockNumber { self.msg.block } #[must_use] pub fn verify_signature>( &self, signer: &Scheme, ) -> bool { signer.verify(self.msg.sender, &self.msg.encode(), &self.sig) } } #[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, Decode)] pub enum SlashReason { FailToPropose, InvalidBlock, InvalidProposer, } #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] pub enum Evidence { ConflictingMessages(Vec, Vec), InvalidPrecommit(Vec), InvalidValidRound(Vec), } #[derive(Clone, PartialEq, Eq, Debug)] pub enum TendermintError { Malicious, Temporal, AlreadyHandled, InvalidEvidence, } // Type aliases to abstract over generic hell pub type DataFor = Data<::Block, <::SignatureScheme as SignatureScheme>::Signature>; pub(crate) type MessageFor = Message< ::ValidatorId, ::Block, <::SignatureScheme as SignatureScheme>::Signature, >; /// Type alias to the SignedMessage type for a given Network pub type SignedMessageFor = SignedMessage< ::ValidatorId, ::Block, <::SignatureScheme as SignatureScheme>::Signature, >; pub fn decode_signed_message(mut data: &[u8]) -> Option> { SignedMessageFor::::decode(&mut data).ok() } fn decode_and_verify_signed_message( data: &[u8], schema: &N::SignatureScheme, ) -> Result, TendermintError> { let msg = decode_signed_message::(data).ok_or(TendermintError::InvalidEvidence)?; // verify that evidence messages are signed correctly if !msg.verify_signature(schema) { Err(TendermintError::InvalidEvidence)?; } Ok(msg) } pub fn verify_tendermint_evidence( evidence: &Evidence, schema: &N::SignatureScheme, commit: impl Fn(u64) -> Option>, ) -> Result<(), TendermintError> { match evidence { Evidence::ConflictingMessages(first, second) => { let first = decode_and_verify_signed_message::(first, schema)?.msg; let second = decode_and_verify_signed_message::(second, schema)?.msg; // Make sure they're distinct messages, from the same sender, within the same block if (first == second) || (first.sender != second.sender) || (first.block != second.block) { Err(TendermintError::InvalidEvidence)?; } // Distinct messages within the same step if !((first.round == second.round) && (first.data.step() == second.data.step())) { Err(TendermintError::InvalidEvidence)?; } } Evidence::InvalidPrecommit(msg) => { let msg = decode_and_verify_signed_message::(msg, schema)?.msg; let Data::Precommit(Some((id, sig))) = &msg.data else { Err(TendermintError::InvalidEvidence)? }; // TODO: We need to be passed in the genesis time to handle this edge case if msg.block.0 == 0 { Err(TendermintError::InvalidEvidence)? // todo!("invalid precommit signature on first block") } // get the last commit let prior_commit = match commit(msg.block.0 - 1) { Some(c) => c, // If we have yet to sync the block in question, we will return InvalidEvidence based // on our own temporal ambiguity // This will also cause an InvalidEvidence for anything using a non-existent block, // yet that's valid behavior // TODO: Double check the ramifications of this _ => Err(TendermintError::InvalidEvidence)?, }; // calculate the end time till the msg round let mut last_end_time = CanonicalInstant::new(prior_commit.end_time); for r in 0 ..= msg.round.0 { last_end_time = RoundData::::new(RoundNumber(r), last_end_time).end_time(); } // verify that the commit was actually invalid if schema.verify(msg.sender, &commit_msg(last_end_time.canonical(), id.as_ref()), sig) { Err(TendermintError::InvalidEvidence)? } } Evidence::InvalidValidRound(msg) => { let msg = decode_and_verify_signed_message::(msg, schema)?.msg; let Data::Proposal(Some(vr), _) = &msg.data else { Err(TendermintError::InvalidEvidence)? }; if vr.0 < msg.round.0 { Err(TendermintError::InvalidEvidence)? } } } Ok(()) } #[derive(Clone, PartialEq, Eq, Debug)] pub enum SlashEvent { Id(SlashReason, u64, u32), WithEvidence(Evidence), } // Struct for if various upon handlers have been triggered to ensure they don't trigger multiple // times. #[derive(Clone, PartialEq, Eq, Debug)] struct Upons { upon_prevotes: bool, upon_successful_current_round_prevotes: bool, upon_negative_current_round_prevotes: bool, upon_precommits: bool, } /// A machine executing the Tendermint protocol. pub struct TendermintMachine { db: N::Db, genesis: [u8; 32], network: N, signer: ::Signer, validators: N::SignatureScheme, weights: Arc, queue: VecDeque>, msg_recv: mpsc::UnboundedReceiver>, synced_block_recv: mpsc::UnboundedReceiver>, synced_block_result_send: mpsc::UnboundedSender, block: BlockData, // TODO: Move this into the Block struct round_proposals: HashMap, N::Block)>, // TODO: Move this into the Round struct upons: Upons, } pub struct SyncedBlock { pub number: BlockNumber, pub block: ::Block, pub commit: Commit<::SignatureScheme>, } pub type SyncedBlockSender = mpsc::UnboundedSender>; pub type SyncedBlockResultReceiver = mpsc::UnboundedReceiver; pub type MessageSender = mpsc::UnboundedSender>; /// A Tendermint machine and its channel to receive messages from the gossip layer over. pub struct TendermintHandle { /// Channel to trigger the machine to move to the next block. /// Takes in the the previous block's commit, along with the new proposal. pub synced_block: SyncedBlockSender, /// A channel to communicate the result of a synced_block message. pub synced_block_result: SyncedBlockResultReceiver, /// Channel to send messages received from the P2P layer. pub messages: MessageSender, /// Tendermint machine to be run on an asynchronous task. pub machine: TendermintMachine, } impl TendermintMachine { // Broadcast the given piece of data // Tendermint messages always specify their block/round, yet Tendermint only ever broadcasts for // the current block/round. Accordingly, instead of manually fetching those at every call-site, // this function can simply pass the data to the block which can contextualize it fn broadcast(&mut self, data: DataFor) { if let Some(msg) = self.block.message(data) { // Push it on to the queue. This is done so we only handle one message at a time, and so we // can handle our own message before broadcasting it. That way, we fail before before // becoming malicious self.queue.push_back(msg); } } // Start a new round. Returns true if we were the proposer fn round(&mut self, round: RoundNumber, time: Option) -> bool { // Clear upons self.upons = Upons { upon_prevotes: false, upon_successful_current_round_prevotes: false, upon_negative_current_round_prevotes: false, upon_precommits: false, }; let proposer = self.weights.proposer(self.block.number, round); let res = if let Some(data) = self.block.new_round(round, proposer, time) { self.broadcast(data); true } else { false }; log::debug!( target: "tendermint", "proposer for block {}, round {round:?} was {} (me: {res})", self.block.number.0, hex::encode(proposer.encode()), ); res } // 53-54 async fn reset(&mut self, end_round: RoundNumber, proposal: Option) { // Ensure we have the end time data for the last round self.block.populate_end_time(end_round); // Sleep until this round ends let round_end = self.block.end_time[&end_round]; let time_until_round_end = round_end.instant().saturating_duration_since(Instant::now()); if time_until_round_end == Duration::ZERO { log::trace!( target: "tendermint", "resetting when prior round ended {}ms ago", Instant::now().saturating_duration_since(round_end.instant()).as_millis(), ); } log::trace!( target: "tendermint", "sleeping until round ends in {}ms", time_until_round_end.as_millis(), ); sleep(time_until_round_end).await; // Clear the message tape { let mut txn = self.db.txn(); txn.del(message_tape_key(self.genesis)); txn.commit(); } // Clear our outbound message queue self.queue = VecDeque::new(); // Create the new block self.block = BlockData::new( self.db.clone(), self.genesis, self.weights.clone(), BlockNumber(self.block.number.0 + 1), self.signer.validator_id().await, proposal, ); // Reset the round proposals self.round_proposals = HashMap::new(); // Start the first round self.round(RoundNumber(0), Some(round_end)); } async fn reset_by_commit( &mut self, commit: Commit, proposal: Option, ) { let mut round = self.block.round().number; // If this commit is for a round we don't have, jump up to it while self.block.end_time[&round].canonical() < commit.end_time { round.0 += 1; self.block.populate_end_time(round); } // If this commit is for a prior round, find it while self.block.end_time[&round].canonical() > commit.end_time { if round.0 == 0 { panic!("commit isn't for this machine's next block"); } round.0 -= 1; } debug_assert_eq!(self.block.end_time[&round].canonical(), commit.end_time); self.reset(round, proposal).await; } async fn slash(&mut self, validator: N::ValidatorId, slash_event: SlashEvent) { // TODO: If the new slash event has evidence, emit to prevent a low-importance slash from // cancelling emission of high-importance slashes if !self.block.slashes.contains(&validator) { log::info!(target: "tendermint", "Slashing validator {}", hex::encode(validator.encode())); self.block.slashes.insert(validator); self.network.slash(validator, slash_event).await; } } fn proposal_for_round(&self, round: RoundNumber) -> Option<(Option, &N::Block)> { self.round_proposals.get(&round).map(|(round, block)| (*round, block)) } // L22-27 fn upon_proposal_without_valid_round(&mut self) { if self.block.round().step != Step::Propose { return; } // If we have the proposal message... let Some((None, block)) = self.proposal_for_round(self.block.round().number) else { return; }; // There either needs to not be a locked value or it must be equivalent #[allow(clippy::map_unwrap_or)] if self .block .locked .as_ref() .map(|(_round, locked_block)| block.id() == *locked_block) .unwrap_or(true) { self.broadcast(Data::Prevote(Some(block.id()))); } else { self.broadcast(Data::Prevote(None)); } } // L28-33 fn upon_proposal_with_valid_round(&mut self) { if self.block.round().step != Step::Propose { return; } // If we have the proposal message... let Some((Some(proposal_valid_round), block)) = self.proposal_for_round(self.block.round().number) else { return; }; // Check we have the necessary prevotes if !self.block.log.has_consensus(proposal_valid_round, &Data::Prevote(Some(block.id()))) { return; } // We don't check valid round < current round as the `message` function does // If locked is None, lockedRoundp is -1 and less than valid round #[allow(clippy::map_unwrap_or)] let locked_clause_1 = self .block .locked .as_ref() .map(|(locked_round, _block)| locked_round.0 <= proposal_valid_round.0) .unwrap_or(true); // The second clause is if the locked values are equivalent. If no value is locked, they aren't #[allow(clippy::map_unwrap_or)] let locked_clause_2 = self .block .locked .as_ref() .map(|(_round, locked_block)| block.id() == *locked_block) .unwrap_or(false); if locked_clause_1 || locked_clause_2 { self.broadcast(Data::Prevote(Some(block.id()))); } else { self.broadcast(Data::Prevote(None)); } } // L34-35 fn upon_prevotes(&mut self) { if self.upons.upon_prevotes || (self.block.round().step != Step::Prevote) { return; } if self.block.log.has_participation(self.block.round().number, Step::Prevote) { self.block.round_mut().set_timeout(Step::Prevote); self.upons.upon_prevotes = true; } } // L36-43 async fn upon_successful_current_round_prevotes(&mut self) { // Returning if `self.step == Step::Propose` is equivalent to guarding `step >= prevote` if self.upons.upon_successful_current_round_prevotes || (self.block.round().step == Step::Propose) { return; } // If we have the proposal message... let Some((_, block)) = self.proposal_for_round(self.block.round().number) else { return; }; // Check we have the necessary prevotes if !self.block.log.has_consensus(self.block.round().number, &Data::Prevote(Some(block.id()))) { return; } let block = block.clone(); self.upons.upon_successful_current_round_prevotes = true; if self.block.round().step == Step::Prevote { self.block.locked = Some((self.block.round().number, block.id())); let signature = self .signer .sign(&commit_msg( self.block.end_time[&self.block.round().number].canonical(), block.id().as_ref(), )) .await; self.broadcast(Data::Precommit(Some((block.id(), signature)))); } self.block.valid = Some((self.block.round().number, block)); } // L44-46 fn upon_negative_current_round_prevotes(&mut self) { if self.upons.upon_negative_current_round_prevotes || (self.block.round().step != Step::Prevote) { return; } if self.block.log.has_consensus(self.block.round().number, &Data::Prevote(None)) { self.broadcast(Data::Precommit(None)); } self.upons.upon_negative_current_round_prevotes = true; } // L47-48 fn upon_precommits(&mut self) { if self.upons.upon_precommits { return; } if self.block.log.has_participation(self.block.round().number, Step::Precommit) { self.block.round_mut().set_timeout(Step::Precommit); self.upons.upon_precommits = true; } } // L22-48 async fn all_current_round_upons(&mut self) { self.upon_proposal_without_valid_round(); self.upon_proposal_with_valid_round(); self.upon_prevotes(); self.upon_successful_current_round_prevotes().await; self.upon_negative_current_round_prevotes(); self.upon_precommits(); } // L49-54 async fn upon_successful_precommits(&mut self, round: RoundNumber) -> bool { // If we have the proposal message... let Some((_, block)) = self.proposal_for_round(round) else { return false }; // Check we have the necessary precommits // The precommit we check we have consensus upon uses a junk signature since message equality // disregards the signature if !self .block .log .has_consensus(round, &Data::Precommit(Some((block.id(), self.signer.sign(&[]).await)))) { return false; } // Get all participants in this commit let mut validators = vec![]; let mut sigs = vec![]; // Get all precommits for this round for (validator, msgs) in &self.block.log.log[&round] { if let Some(signed) = msgs.get(&Step::Precommit) { if let Data::Precommit(Some((id, sig))) = &signed.msg.data { // If this precommit was for this block, include it if *id == block.id() { validators.push(*validator); sigs.push(sig.clone()); } } } } // Form the commit itself let commit_msg = commit_msg(self.block.end_time[&round].canonical(), block.id().as_ref()); let commit = Commit { end_time: self.block.end_time[&round].canonical(), validators: validators.clone(), signature: self.network.signature_scheme().aggregate(&validators, &commit_msg, &sigs), }; debug_assert!(self.network.verify_commit(block.id(), &commit)); // Add the block and reset the machine log::info!( target: "tendermint", "TendermintMachine produced block {}", hex::encode(block.id().as_ref()), ); let id = block.id(); let proposal = self.network.add_block(block.clone(), commit).await; log::trace!( target: "tendermint", "added block {} (produced by machine)", hex::encode(id.as_ref()), ); self.reset(round, proposal).await; true } // L49-54 async fn all_any_round_upons(&mut self, round: RoundNumber) -> bool { self.upon_successful_precommits(round).await } // Returns Ok(true) if this was a Precommit which had either no signature or its signature // validated // Returns Ok(false) if it wasn't a Precommit or the signature wasn't validated yet // Returns Err if the signature was invalid async fn verify_precommit_signature( &mut self, signed: &SignedMessageFor, ) -> Result { let msg = &signed.msg; if let Data::Precommit(precommit) = &msg.data { let Some((id, sig)) = precommit else { return Ok(true) }; // Also verify the end_time of the commit // Only perform this verification if we already have the end_time // Else, there's a DoS where we receive a precommit for some round infinitely in the future // which forces us to calculate every end time if let Some(end_time) = self.block.end_time.get(&msg.round) { if !self.validators.verify(msg.sender, &commit_msg(end_time.canonical(), id.as_ref()), sig) { log::warn!(target: "tendermint", "validator produced an invalid commit signature"); self .slash( msg.sender, SlashEvent::WithEvidence(Evidence::InvalidPrecommit(signed.encode())), ) .await; Err(TendermintError::Malicious)?; } return Ok(true); } } Ok(false) } async fn message(&mut self, signed: &SignedMessageFor) -> Result<(), TendermintError> { let msg = &signed.msg; if msg.block != self.block.number { Err(TendermintError::Temporal)?; } // If this is a precommit, verify its signature self.verify_precommit_signature(signed).await?; // Only let the proposer propose if matches!(msg.data, Data::Proposal(..)) && (msg.sender != self.weights.proposer(msg.block, msg.round)) { log::warn!(target: "tendermint", "validator who wasn't the proposer proposed"); // TODO: This should have evidence self .slash(msg.sender, SlashEvent::Id(SlashReason::InvalidProposer, msg.block.0, msg.round.0)) .await; Err(TendermintError::Malicious)?; }; // If this is a proposal, verify the block // If the block is invalid, drop the message, letting the timeout cover it // This prevents needing to check if valid inside every `upon` block if let Data::Proposal(_, block) = &msg.data { match self.network.validate(block).await { Ok(()) => {} Err(BlockError::Temporal) => { if self.block.round().step == Step::Propose { self.broadcast(Data::Prevote(None)); } Err(TendermintError::Temporal)?; } Err(BlockError::Fatal) => { log::warn!(target: "tendermint", "validator proposed a fatally invalid block"); if self.block.round().step == Step::Propose { self.broadcast(Data::Prevote(None)); } self .slash( msg.sender, SlashEvent::Id(SlashReason::InvalidBlock, self.block.number.0, msg.round.0), ) .await; Err(TendermintError::Malicious)?; } }; } // If this is a proposal, verify the valid round isn't fundamentally invalid if let Data::Proposal(Some(valid_round), _) = msg.data { if valid_round.0 >= msg.round.0 { log::warn!( target: "tendermint", "proposed proposed with a syntactically invalid valid round", ); if self.block.round().step == Step::Propose { self.broadcast(Data::Prevote(None)); } self .slash(msg.sender, SlashEvent::WithEvidence(Evidence::InvalidValidRound(msg.encode()))) .await; Err(TendermintError::Malicious)?; } } // Add it to the log, returning if it was already handled match self.block.log.log(signed.clone()) { Ok(true) => {} Ok(false) => Err(TendermintError::AlreadyHandled)?, Err(evidence) => { self.slash(msg.sender, SlashEvent::WithEvidence(evidence)).await; Err(TendermintError::Malicious)?; } } log::debug!( target: "tendermint", "received new tendermint message (block: {}, round: {}, step: {:?})", msg.block.0, msg.round.0, msg.data.step(), ); // If this is a proposal, insert it if let Data::Proposal(vr, block) = &msg.data { self.round_proposals.insert(msg.round, (*vr, block.clone())); } // L55-56 // Jump ahead if we should if (msg.round.0 > self.block.round().number.0) && (self.block.log.round_participation(msg.round) >= self.weights.fault_threshold()) { log::debug!( target: "tendermint", "jumping from round {} to round {}", self.block.round().number.0, msg.round.0, ); // Jump to the new round. let old_round = self.block.round().number; self.round(msg.round, None); // If any jumped over/to round already has precommit messages, verify their signatures for jumped in (old_round.0 + 1) ..= msg.round.0 { let jumped = RoundNumber(jumped); let round_msgs = self.block.log.log.get(&jumped).cloned().unwrap_or_default(); for (validator, msgs) in &round_msgs { if let Some(existing) = msgs.get(&Step::Precommit) { if let Ok(res) = self.verify_precommit_signature(existing).await { // Ensure this actually verified the signature instead of believing it shouldn't yet assert!(res); } else { // Remove the message so it isn't counted towards forming a commit/included in one // This won't remove the fact they precommitted for this block hash in the MessageLog // TODO: Don't even log these in the first place until we jump, preventing needing // to do this in the first place self .block .log .log .get_mut(&jumped) .unwrap() .get_mut(validator) .unwrap() .remove(&Step::Precommit) .unwrap(); } } } } } // Now that we've jumped, and: // 1) If this is a message for an old round, verified the precommit signatures // 2) If this is a message for what was the current round, verified the precommit signatures // 3) If this is a message for what was a future round, verified the precommit signatures if it // has 34+% participation // Run all `upons` run for any round, which may produce a Commit if it has 67+% participation // (returning true if it does, letting us return now) // It's necessary to verify the precommit signatures before Commit production is allowed, hence // this specific flow if self.all_any_round_upons(msg.round).await { return Ok(()); } // If this is a historic round, or a future round without sufficient participation, return if msg.round.0 != self.block.round().number.0 { return Ok(()); } // msg.round is now guaranteed to be equal to self.block.round().number debug_assert_eq!(msg.round, self.block.round().number); // Run all `upons` run for the current round self.all_current_round_upons().await; Ok(()) } /// Create a new Tendermint machine, from the specified point, with the specified block as the /// one to propose next. This will return a channel to send messages from the gossip layer and /// the machine itself. The machine should have `run` called from an asynchronous task. #[allow(clippy::new_ret_no_self)] pub async fn new( db: N::Db, network: N, genesis: [u8; 32], last_block: BlockNumber, last_time: u64, proposal: N::Block, ) -> TendermintHandle { let (msg_send, msg_recv) = mpsc::unbounded(); let (synced_block_send, synced_block_recv) = mpsc::unbounded(); let (synced_block_result_send, synced_block_result_recv) = mpsc::unbounded(); TendermintHandle { synced_block: synced_block_send, synced_block_result: synced_block_result_recv, messages: msg_send, machine: { let now = SystemTime::now(); let sys_time = sys_time(last_time); let mut negative = false; let time_until = sys_time.duration_since(now).unwrap_or_else(|_| { negative = true; now.duration_since(sys_time).unwrap_or(Duration::ZERO) }); log::info!( target: "tendermint", "new TendermintMachine building off block {} is scheduled to start in {}{}s", last_block.0, if negative { "-" } else { "" }, time_until.as_secs(), ); // If the last block hasn't ended yet, sleep until it has if !negative { sleep(time_until).await; } let signer = network.signer(); let validators = network.signature_scheme(); let weights = Arc::new(network.weights()); let validator_id = signer.validator_id().await; // L01-10 let mut machine = TendermintMachine { db: db.clone(), genesis, network, signer, validators, weights: weights.clone(), queue: VecDeque::new(), msg_recv, synced_block_recv, synced_block_result_send, block: BlockData::new( db, genesis, weights, BlockNumber(last_block.0 + 1), validator_id, Some(proposal), ), round_proposals: HashMap::new(), upons: Upons { upon_prevotes: false, upon_successful_current_round_prevotes: false, upon_negative_current_round_prevotes: false, upon_precommits: false, }, }; // The end time of the last block is the start time for this one // The Commit explicitly contains the end time, so loading the last commit will provide // this. The only exception is for the genesis block, which doesn't have a commit // Using the genesis time in place will cause this block to be created immediately // after it, without the standard amount of separation (so their times will be // equivalent or minimally offset) // For callers wishing to avoid this, they should pass (0, GENESIS + N::block_time()) machine.round(RoundNumber(0), Some(CanonicalInstant::new(last_time))); machine }, } } pub async fn run(mut self) { log::debug!(target: "tendermint", "running TendermintMachine"); let mut rebroadcast_future = Box::pin(sleep(Duration::from_secs(60))).fuse(); loop { // Also create a future for if the queue has a message // Does not pop_front as if another message has higher priority, its future will be handled // instead in this loop, and the popped value would be dropped with the next iteration let mut queue_future = if self.queue.is_empty() { Fuse::terminated() } else { future::ready(()).fuse() }; if let Some((our_message, msg, mut sig)) = futures_util::select_biased! { // Handle a new block occurring externally (from an external sync loop) // Has the highest priority as it makes all other futures here irrelevant msg = self.synced_block_recv.next() => { if let Some(SyncedBlock { number, block, commit }) = msg { // Commit is for a block we've already moved past if number != self.block.number { self.synced_block_result_send.send(false).await.unwrap(); continue; } // Commit is invalid if !self.network.verify_commit(block.id(), &commit) { self.synced_block_result_send.send(false).await.unwrap(); continue; } log::debug!( target: "tendermint", "TendermintMachine received a block from the external sync loop", ); let proposal = self.network.add_block(block, commit.clone()).await; self.reset_by_commit(commit, proposal).await; self.synced_block_result_send.send(true).await.unwrap(); None } else { break; } }, // Handle our messages () = queue_future => { Some((true, self.queue.pop_front().unwrap(), None)) }, // L57-67 // Handle any timeouts step = self.block.round().timeout_future().fuse() => { // Remove the timeout so it doesn't persist, always being the selected future due to bias // While this does enable the timeout to be entered again, the timeout setting code will // never attempt to add a timeout after its timeout has expired // (due to it setting an `upon` boolean) self.block.round_mut().timeouts.remove(&step); match step { Step::Propose => { // Only run if it's still the step in question if self.block.round().step == step { // Slash the validator for not proposing when they should've log::debug!(target: "tendermint", "validator didn't propose when they should have"); // this slash will be voted on. self.slash( self.weights.proposer(self.block.number, self.block.round().number), SlashEvent::Id( SlashReason::FailToPropose, self.block.number.0, self.block.round().number.0 ), ).await; self.broadcast(Data::Prevote(None)); } }, Step::Prevote => { // Only run if it's still the step in question if self.block.round().step == step { self.broadcast(Data::Precommit(None)) } }, Step::Precommit => { self.round(RoundNumber(self.block.round().number.0 + 1), None); } }; // Execute the upons now that the state has changed self.all_any_round_upons(self.block.round().number).await; self.all_current_round_upons().await; None }, // If it's been more than 60s, rebroadcast our own messages () = rebroadcast_future => { log::trace!("rebroadcast future hit within tendermint machine"); let key = message_tape_key(self.genesis); let messages = self.db.get(key).unwrap_or(vec![]); let mut messages = messages.as_slice(); while !messages.is_empty() { self.network.broadcast( SignedMessageFor::::decode(&mut IoReader(&mut messages)) .expect("saved invalid message to DB") ).await; } // Reset the rebroadcast future rebroadcast_future = Box::pin(sleep(core::time::Duration::from_secs(60))).fuse(); None }, // Handle any received messages msg = self.msg_recv.next() => { if let Some(msg) = msg { if !msg.verify_signature(&self.validators) { continue; } Some((false, msg.msg, Some(msg.sig))) } else { break; } } } { if our_message { assert!(sig.is_none()); sig = Some(self.signer.sign(&msg.encode()).await); } let sig = sig.unwrap(); let signed_msg = SignedMessage { msg: msg.clone(), sig: sig.clone() }; let res = self.message(&signed_msg).await; // If this is our message, and we hit an invariant, we could be slashed. // We only broadcast our message after running it ourselves, to ensure it doesn't error, to // ensure we don't get slashed on invariants. if res.is_err() && our_message { panic!("honest node (ourselves) had invalid behavior"); } // Save this message to a linear tape of all our messages for this block, if ours // TODO: Since we do this after we mark this message as sent to prevent equivocations, a // precisely time reboot could cause this message marked as sent yet not added to the tape if our_message { let message_tape_key = message_tape_key(self.genesis); let mut txn = self.db.txn(); let mut message_tape = txn.get(&message_tape_key).unwrap_or(vec![]); message_tape.extend(signed_msg.encode()); txn.put(&message_tape_key, message_tape); txn.commit(); } // Re-broadcast this since it's an original consensus message worth handling if res.is_ok() { self.network.broadcast(signed_msg).await; } } } } } ================================================ FILE: coordinator/tributary/tendermint/src/message_log.rs ================================================ use std::{sync::Arc, collections::HashMap}; use parity_scale_codec::Encode; use crate::{ext::*, RoundNumber, Step, DataFor, SignedMessageFor, Evidence}; type RoundLog = HashMap<::ValidatorId, HashMap>>; pub(crate) struct MessageLog { weights: Arc, round_participation: HashMap, participation: HashMap<(RoundNumber, Step), u64>, message_instances: HashMap<(RoundNumber, DataFor), u64>, pub(crate) log: HashMap>, } impl MessageLog { pub(crate) fn new(weights: Arc) -> MessageLog { MessageLog { weights, round_participation: HashMap::new(), participation: HashMap::new(), message_instances: HashMap::new(), log: HashMap::new(), } } // Returns true if it's a new message pub(crate) fn log(&mut self, signed: SignedMessageFor) -> Result { let msg = &signed.msg; // Clarity, and safety around default != new edge cases let round = self.log.entry(msg.round).or_insert_with(HashMap::new); let msgs = round.entry(msg.sender).or_insert_with(HashMap::new); // Handle message replays without issue. It's only multiple messages which is malicious let step = msg.data.step(); if let Some(existing) = msgs.get(&step) { if existing.msg.data != msg.data { log::debug!( target: "tendermint", "Validator sent multiple messages for the same block + round + step" ); Err(Evidence::ConflictingMessages(existing.encode(), signed.encode()))?; } return Ok(false); } // Since we have a new message, update the participation let sender_weight = self.weights.weight(msg.sender); if msgs.is_empty() { *self.round_participation.entry(msg.round).or_insert_with(|| 0) += sender_weight; } *self.participation.entry((msg.round, step)).or_insert_with(|| 0) += sender_weight; *self.message_instances.entry((msg.round, msg.data.clone())).or_insert_with(|| 0) += sender_weight; msgs.insert(step, signed); Ok(true) } // Get the participation in a given round pub(crate) fn round_participation(&self, round: RoundNumber) -> u64 { *self.round_participation.get(&round).unwrap_or(&0) } // Check if a supermajority of nodes have participated on a specific step pub(crate) fn has_participation(&self, round: RoundNumber, step: Step) -> bool { *self.participation.get(&(round, step)).unwrap_or(&0) >= self.weights.threshold() } // Check if consensus has been reached on a specific piece of data pub(crate) fn has_consensus(&self, round: RoundNumber, data: &DataFor) -> bool { *self.message_instances.get(&(round, data.clone())).unwrap_or(&0) >= self.weights.threshold() } } ================================================ FILE: coordinator/tributary/tendermint/src/round.rs ================================================ use std::{ marker::PhantomData, time::{Duration, Instant}, collections::HashMap, }; use futures_util::{FutureExt, future}; use patchable_async_sleep::sleep; use crate::{ time::CanonicalInstant, Step, ext::{RoundNumber, Network}, }; pub struct RoundData { _network: PhantomData, pub number: RoundNumber, pub start_time: CanonicalInstant, pub step: Step, pub timeouts: HashMap, } impl RoundData { pub fn new(number: RoundNumber, start_time: CanonicalInstant) -> Self { RoundData { _network: PhantomData, number, start_time, step: Step::Propose, timeouts: HashMap::new(), } } fn timeout(&self, step: Step) -> CanonicalInstant { let adjusted_block = N::BLOCK_PROCESSING_TIME * (self.number.0 + 1); let adjusted_latency = N::LATENCY_TIME * (self.number.0 + 1); let offset = Duration::from_millis( (match step { Step::Propose => adjusted_block + adjusted_latency, Step::Prevote => adjusted_block + (2 * adjusted_latency), Step::Precommit => adjusted_block + (3 * adjusted_latency), }) .into(), ); self.start_time + offset } pub fn end_time(&self) -> CanonicalInstant { self.timeout(Step::Precommit) } pub(crate) fn set_timeout(&mut self, step: Step) { let timeout = self.timeout(step).instant(); self.timeouts.entry(step).or_insert(timeout); } // Poll all set timeouts, returning the Step whose timeout has just expired pub(crate) async fn timeout_future(&self) -> Step { /* let now = Instant::now(); log::trace!( target: "tendermint", "getting timeout_future, from step {:?}, off timeouts: {:?}", self.step, self.timeouts.iter().map(|(k, v)| (k, v.duration_since(now))).collect::>() ); */ let timeout_future = |step| { let timeout = self.timeouts.get(&step).copied(); (async move { if let Some(timeout) = timeout { sleep(timeout.saturating_duration_since(Instant::now())).await; } else { future::pending::<()>().await; } step }) .fuse() }; let propose_timeout = timeout_future(Step::Propose); let prevote_timeout = timeout_future(Step::Prevote); let precommit_timeout = timeout_future(Step::Precommit); futures_util::pin_mut!(propose_timeout, prevote_timeout, precommit_timeout); futures_util::select_biased! { step = propose_timeout => step, step = prevote_timeout => step, step = precommit_timeout => step, } } } ================================================ FILE: coordinator/tributary/tendermint/src/time.rs ================================================ use core::ops::Add; use std::time::{UNIX_EPOCH, SystemTime, Instant, Duration}; #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub struct CanonicalInstant { /// Time since the epoch. time: u64, /// An Instant synchronized with the above time. instant: Instant, } pub(crate) fn sys_time(time: u64) -> SystemTime { UNIX_EPOCH + Duration::from_secs(time) } impl CanonicalInstant { pub fn new(time: u64) -> CanonicalInstant { // This is imprecise yet should be precise enough, as it'll resolve within a few ms let instant_now = Instant::now(); let sys_now = SystemTime::now(); // If the time is in the future, this will be off by that much time let elapsed = sys_now.duration_since(sys_time(time)).unwrap_or(Duration::ZERO); // Except for the fact this panics here let synced_instant = instant_now.checked_sub(elapsed).unwrap(); CanonicalInstant { time, instant: synced_instant } } pub fn canonical(&self) -> u64 { self.time } pub fn instant(&self) -> Instant { self.instant } } impl Add for CanonicalInstant { type Output = CanonicalInstant; fn add(self, duration: Duration) -> CanonicalInstant { CanonicalInstant { time: self.time + duration.as_secs(), instant: self.instant + duration } } } ================================================ FILE: coordinator/tributary/tendermint/tests/ext.rs ================================================ use std::{ sync::Arc, time::{UNIX_EPOCH, SystemTime, Duration}, }; use async_trait::async_trait; use parity_scale_codec::{Encode, Decode}; use futures_util::sink::SinkExt; use tokio::{sync::RwLock, time::sleep}; use serai_db::MemDb; use tendermint_machine::{ ext::*, SignedMessageFor, SyncedBlockSender, SyncedBlockResultReceiver, MessageSender, SlashEvent, TendermintMachine, TendermintHandle, }; type TestValidatorId = u16; type TestBlockId = [u8; 4]; struct TestSigner(u16); #[async_trait] impl Signer for TestSigner { type ValidatorId = TestValidatorId; type Signature = [u8; 32]; async fn validator_id(&self) -> Option { Some(self.0) } async fn sign(&self, msg: &[u8]) -> [u8; 32] { let mut sig = [0; 32]; sig[.. 2].copy_from_slice(&self.0.to_le_bytes()); sig[2 .. (2 + 30.min(msg.len()))].copy_from_slice(&msg[.. 30.min(msg.len())]); sig } } #[derive(Clone)] struct TestSignatureScheme; impl SignatureScheme for TestSignatureScheme { type ValidatorId = TestValidatorId; type Signature = [u8; 32]; type AggregateSignature = Vec<[u8; 32]>; type Signer = TestSigner; #[must_use] fn verify(&self, validator: u16, msg: &[u8], sig: &[u8; 32]) -> bool { (sig[.. 2] == validator.to_le_bytes()) && (sig[2 ..] == [msg, &[0; 30]].concat()[.. 30]) } fn aggregate( &self, _: &[Self::ValidatorId], _: &[u8], sigs: &[Self::Signature], ) -> Self::AggregateSignature { sigs.to_vec() } #[must_use] fn verify_aggregate( &self, signers: &[TestValidatorId], msg: &[u8], sigs: &Vec<[u8; 32]>, ) -> bool { assert_eq!(signers.len(), sigs.len()); for sig in signers.iter().zip(sigs.iter()) { assert!(self.verify(*sig.0, msg, sig.1)); } true } } struct TestWeights; impl Weights for TestWeights { type ValidatorId = TestValidatorId; fn total_weight(&self) -> u64 { 4 } fn weight(&self, id: TestValidatorId) -> u64 { [1; 4][usize::from(id)] } fn proposer(&self, number: BlockNumber, round: RoundNumber) -> TestValidatorId { TestValidatorId::try_from((number.0 + u64::from(round.0)) % 4).unwrap() } } #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] struct TestBlock { id: TestBlockId, valid: Result<(), BlockError>, } impl Block for TestBlock { type Id = TestBlockId; fn id(&self) -> TestBlockId { self.id } } #[allow(clippy::type_complexity)] struct TestNetwork( u16, Arc, SyncedBlockSender, SyncedBlockResultReceiver)>>>, ); #[async_trait] impl Network for TestNetwork { type Db = MemDb; type ValidatorId = TestValidatorId; type SignatureScheme = TestSignatureScheme; type Weights = TestWeights; type Block = TestBlock; const BLOCK_PROCESSING_TIME: u32 = 2000; const LATENCY_TIME: u32 = 1000; fn signer(&self) -> TestSigner { TestSigner(self.0) } fn signature_scheme(&self) -> TestSignatureScheme { TestSignatureScheme } fn weights(&self) -> TestWeights { TestWeights } async fn broadcast(&mut self, msg: SignedMessageFor) { for (messages, _, _) in self.1.write().await.iter_mut() { messages.send(msg.clone()).await.unwrap(); } } async fn slash(&mut self, id: TestValidatorId, event: SlashEvent) { println!("Slash for {id} due to {event:?}"); } async fn validate(&self, block: &TestBlock) -> Result<(), BlockError> { block.valid } async fn add_block( &mut self, block: TestBlock, commit: Commit, ) -> Option { println!("Adding {:?}", &block); assert!(block.valid.is_ok()); assert!(self.verify_commit(block.id(), &commit)); Some(TestBlock { id: (u32::from_le_bytes(block.id) + 1).to_le_bytes(), valid: Ok(()) }) } } impl TestNetwork { async fn new( validators: usize, start_time: u64, ) -> Arc, SyncedBlockSender, SyncedBlockResultReceiver)>>> { let arc = Arc::new(RwLock::new(vec![])); { let mut write = arc.write().await; for i in 0 .. validators { let i = u16::try_from(i).unwrap(); let TendermintHandle { messages, synced_block, synced_block_result, machine } = TendermintMachine::new( MemDb::new(), TestNetwork(i, arc.clone()), [0; 32], BlockNumber(1), start_time, TestBlock { id: 1u32.to_le_bytes(), valid: Ok(()) }, ) .await; tokio::spawn(machine.run()); write.push((messages, synced_block, synced_block_result)); } } arc } } #[tokio::test] async fn test_machine() { TestNetwork::new(4, SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs()).await; sleep(Duration::from_secs(30)).await; } #[tokio::test] async fn test_machine_with_historic_start_time() { TestNetwork::new(4, SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs() - 60).await; sleep(Duration::from_secs(30)).await; } ================================================ FILE: crypto/ciphersuite/Cargo.toml ================================================ [package] name = "ciphersuite" version = "0.4.2" description = "Ciphersuites built around ff/group" license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/crypto/ciphersuite" authors = ["Luke Parker "] keywords = ["ciphersuite", "ff", "group"] edition = "2021" rust-version = "1.66" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] std-shims = { path = "../../common/std-shims", version = "^0.1.1", default-features = false, optional = true } rand_core = { version = "0.6", default-features = false } zeroize = { version = "^1.5", default-features = false, features = ["derive"] } subtle = { version = "^2.4", default-features = false } digest = { version = "0.10", default-features = false, features = ["core-api"] } transcript = { package = "flexible-transcript", path = "../transcript", version = "^0.3.2", default-features = false } ff = { version = "0.13", default-features = false, features = ["bits"] } group = { version = "0.13", default-features = false } [dev-dependencies] hex = { version = "0.4", default-features = false, features = ["std"] } rand_core = { version = "0.6", default-features = false, features = ["std"] } ff-group-tests = { version = "0.13", path = "../ff-group-tests" } [features] alloc = ["std-shims", "ff/alloc"] std = [ "std-shims/std", "rand_core/std", "zeroize/std", "subtle/std", "digest/std", "transcript/std", "ff/std", ] default = ["std"] ================================================ FILE: crypto/ciphersuite/LICENSE ================================================ MIT License Copyright (c) 2021-2023 Luke Parker Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: crypto/ciphersuite/README.md ================================================ # Ciphersuite Ciphersuites for elliptic curves premised on ff/group. This library, except for the not recommended Ed448 ciphersuite, was [audited by Cypher Stack in March 2023](https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf), culminating in commit [669d2dbffc1dafb82a09d9419ea182667115df06](https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06). Any subsequent changes have not undergone auditing. This library is usable under no_std. The `alloc` and `std` features enable reading from the `io::Read` trait, shimmed by `std-shims` under `alloc`. ### Secp256k1/P-256 Secp256k1 and P-256 are offered via [k256](https://crates.io/crates/k256) and [p256](https://crates.io/crates/p256), two libraries maintained by [RustCrypto](https://github.com/RustCrypto). Their `hash_to_F` is the [IETF's hash to curve](https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html), yet applied to their scalar field. Please see the [`ciphersuite-kp256`](https://docs.rs/ciphersuite-kp256) crate for more info. ### Ed25519/Ristretto Ed25519/Ristretto are offered via [dalek-ff-group](https://crates.io/crates/dalek-ff-group), an ff/group wrapper around [curve25519-dalek](https://crates.io/crates/curve25519-dalek). Their `hash_to_F` is the wide reduction of SHA2-512, as used in [RFC-8032](https://www.rfc-editor.org/rfc/rfc8032). This is also compliant with the draft [RFC-RISTRETTO](https://www.ietf.org/archive/id/draft-irtf-cfrg-ristretto255-decaf448-05.html). The domain-separation tag is naively prefixed to the message. Please see the [`dalek-ff-group`](https://docs.rs/dalek-ff-group) crate for more info. ### Ed448 Ed448 is offered via [minimal-ed448](https://crates.io/crates/minimal-ed448), an explicitly not recommended, unaudited, incomplete Ed448 implementation, limited to its prime-order subgroup. Its `hash_to_F` is the wide reduction of SHAKE256, with a 114-byte output, as used in [RFC-8032](https://www.rfc-editor.org/rfc/rfc8032). The domain-separation tag is naively prefixed to the message. Please see the [`minimal-ed448`](https://docs.rs/minimal-ed448) crate for more info. ================================================ FILE: crypto/ciphersuite/kp256/Cargo.toml ================================================ [package] name = "ciphersuite-kp256" version = "0.4.0" description = "Ciphersuites built around ff/group" license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/crypto/ciphersuite/kp256" authors = ["Luke Parker "] keywords = ["ciphersuite", "ff", "group"] edition = "2021" rust-version = "1.66" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] rand_core = { version = "0.6", default-features = false } zeroize = { version = "^1.5", default-features = false, features = ["derive"] } sha2 = { version = "0.10", default-features = false } elliptic-curve = { version = "0.13", default-features = false, features = ["hash2curve"] } p256 = { version = "^0.13.1", default-features = false, features = ["arithmetic", "bits", "hash2curve"] } k256 = { version = "^0.13.1", default-features = false, features = ["arithmetic", "bits", "hash2curve"] } ciphersuite = { path = "../", version = "0.4", default-features = false } [dev-dependencies] hex = { version = "0.4", default-features = false, features = ["std"] } rand_core = { version = "0.6", default-features = false, features = ["std"] } ff-group-tests = { version = "0.13", path = "../../ff-group-tests" } [features] alloc = ["ciphersuite/alloc"] std = [ "rand_core/std", "zeroize/std", "sha2/std", "elliptic-curve/std", "p256/std", "k256/std", "ciphersuite/std", ] default = ["std"] ================================================ FILE: crypto/ciphersuite/kp256/LICENSE ================================================ MIT License Copyright (c) 2021-2023 Luke Parker Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: crypto/ciphersuite/kp256/README.md ================================================ # Ciphersuite {k, p}256 SECP256k1 and P-256 Ciphersuites around k256 and p256. ================================================ FILE: crypto/ciphersuite/kp256/src/lib.rs ================================================ #![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(not(feature = "std"), no_std)] use zeroize::Zeroize; use sha2::Sha256; use elliptic_curve::{ generic_array::GenericArray, bigint::{NonZero, CheckedAdd, Encoding, U384}, hash2curve::{Expander, ExpandMsg, ExpandMsgXmd}, }; use ciphersuite::{group::ff::PrimeField, Ciphersuite}; macro_rules! kp_curve { ( $feature: literal, $lib: ident, $Ciphersuite: ident, $ID: literal ) => { impl Ciphersuite for $Ciphersuite { type F = $lib::Scalar; type G = $lib::ProjectivePoint; type H = Sha256; const ID: &'static [u8] = $ID; fn generator() -> Self::G { $lib::ProjectivePoint::GENERATOR } fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F { // While one of these two libraries does support directly hashing to the Scalar field, the // other doesn't. While that's probably an oversight, this is a universally working method // This method is from // https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html // Specifically, Section 5 // While that draft, overall, is intended for hashing to curves, that necessitates // detailing how to hash to a finite field. The draft comments that its mechanism for // doing so, which it uses to derive field elements, is also applicable to the scalar field // The hash_to_field function is intended to provide unbiased values // In order to do so, a wide reduction from an extra k bits is applied, minimizing bias to // 2^-k // k is intended to be the bits of security of the suite, which is 128 for secp256k1 and // P-256 const K: usize = 128; // L is the amount of bytes of material which should be used in the wide reduction // The 256 is for the bit-length of the primes, rounded up to the nearest byte threshold // This is a simplification of the formula from the end of section 5 const L: usize = (256 + K) / 8; // 48 // In order to perform this reduction, we need to use 48-byte numbers // First, convert the modulus to a 48-byte number // This is done by getting -1 as bytes, parsing it into a U384, and then adding back one let mut modulus = [0; L]; // The byte repr of scalars will be 32 big-endian bytes // Set the lower 32 bytes of our 48-byte array accordingly modulus[16 ..].copy_from_slice(&(Self::F::ZERO - Self::F::ONE).to_bytes()); // Use a checked_add + unwrap since this addition cannot fail (being a 32-byte value with // 48-bytes of space) // While a non-panicking saturating_add/wrapping_add could be used, they'd likely be less // performant let modulus = U384::from_be_slice(&modulus).checked_add(&U384::ONE).unwrap(); // The defined P-256 and secp256k1 ciphersuites both use expand_message_xmd let mut wide = U384::from_be_bytes({ let mut bytes = [0; 48]; ExpandMsgXmd::::expand_message(&[msg], &[dst], 48) .unwrap() .fill_bytes(&mut bytes); bytes }) .rem(&NonZero::new(modulus).unwrap()) .to_be_bytes(); // Now that this has been reduced back to a 32-byte value, grab the lower 32-bytes let mut array = *GenericArray::from_slice(&wide[16 ..]); let res = $lib::Scalar::from_repr(array).unwrap(); // Zeroize the temp values we can due to the possibility hash_to_F is being used for nonces wide.zeroize(); array.zeroize(); res } } }; } #[cfg(test)] fn test_oversize_dst() { use sha2::Digest; // The draft specifies DSTs >255 bytes should be hashed into a 32-byte DST let oversize_dst = [0x00; 256]; let actual_dst = Sha256::digest([b"H2C-OVERSIZE-DST-".as_ref(), &oversize_dst].concat()); // Test the hash_to_F function handles this // If it didn't, these would return different values assert_eq!(C::hash_to_F(&oversize_dst, &[]), C::hash_to_F(&actual_dst, &[])); } /// Ciphersuite for Secp256k1. /// /// hash_to_F is implemented via the IETF draft for hash to curve's hash_to_field (v16). #[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)] pub struct Secp256k1; kp_curve!("secp256k1", k256, Secp256k1, b"secp256k1"); #[test] fn test_secp256k1() { ff_group_tests::group::test_prime_group_bits::<_, k256::ProjectivePoint>(&mut rand_core::OsRng); // Ideally, a test vector from hash_to_field (not FROST) would be here // Unfortunately, the IETF draft only provides vectors for field elements, not scalars // Vectors have been requested in // https://github.com/cfrg/draft-irtf-cfrg-hash-to-curve/issues/343 assert_eq!( Secp256k1::hash_to_F( b"FROST-secp256k1-SHA256-v11nonce", &hex::decode( "\ 80cbea5e405d169999d8c4b30b755fedb26ab07ec8198cda4873ed8ce5e16773\ 08f89ffe80ac94dcb920c26f3f46140bfc7f95b493f8310f5fc1ea2b01f4254c" ) .unwrap() ) .to_repr() .iter() .copied() .collect::>(), hex::decode("acc83278035223c1ba464e2d11bfacfc872b2b23e1041cf5f6130da21e4d8068").unwrap() ); test_oversize_dst::(); } /// Ciphersuite for P-256. /// /// hash_to_F is implemented via the IETF draft for hash to curve's hash_to_field (v16). #[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)] pub struct P256; kp_curve!("p256", p256, P256, b"P-256"); #[test] fn test_p256() { ff_group_tests::group::test_prime_group_bits::<_, p256::ProjectivePoint>(&mut rand_core::OsRng); assert_eq!( P256::hash_to_F( b"FROST-P256-SHA256-v11nonce", &hex::decode( "\ f4e8cf80aec3f888d997900ac7e3e349944b5a6b47649fc32186d2f1238103c6\ 0c9c1a0fe806c184add50bbdcac913dda73e482daf95dcb9f35dbb0d8a9f7731" ) .unwrap() ) .to_repr() .iter() .copied() .collect::>(), hex::decode("f871dfcf6bcd199342651adc361b92c941cb6a0d8c8c1a3b91d79e2c1bf3722d").unwrap() ); test_oversize_dst::(); } ================================================ FILE: crypto/ciphersuite/src/lib.md ================================================ # Ciphersuite Ciphersuites for elliptic curves premised on ff/group. This library was [audited by Cypher Stack in March 2023](https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf), culminating in commit [669d2dbffc1dafb82a09d9419ea182667115df06](https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06). Any subsequent changes have not undergone auditing. This library is usable under no_std. The `alloc` and `std` features enable reading from the `io::Read` trait, shimmed by `std-shims` under `alloc`. ================================================ FILE: crypto/ciphersuite/src/lib.rs ================================================ #![cfg_attr(docsrs, feature(doc_cfg))] #![doc = include_str!("lib.md")] #![cfg_attr(not(feature = "std"), no_std)] use core::fmt::Debug; #[cfg(any(feature = "alloc", feature = "std"))] #[allow(unused_imports)] use std_shims::prelude::*; #[cfg(any(feature = "alloc", feature = "std"))] use std_shims::io::{self, Read}; use rand_core::{RngCore, CryptoRng}; use zeroize::Zeroize; use subtle::ConstantTimeEq; use digest::{core_api::BlockSizeUser, Digest, HashMarker}; use transcript::SecureDigest; pub use group; use group::{ ff::{Field, PrimeField, PrimeFieldBits}, Group, GroupOps, prime::PrimeGroup, }; #[cfg(any(feature = "alloc", feature = "std"))] use group::GroupEncoding; /// Unified trait defining a ciphersuite around an elliptic curve. pub trait Ciphersuite: 'static + Send + Sync + Clone + Copy + PartialEq + Eq + Debug + Zeroize { /// Scalar field element type. // This is available via G::Scalar yet `C::G::Scalar` is ambiguous, forcing horrific accesses type F: PrimeField + PrimeFieldBits + Zeroize; /// Group element type. type G: Group + GroupOps + PrimeGroup + Zeroize + ConstantTimeEq; /// Hash algorithm used with this curve. // Requires BlockSizeUser so it can be used within Hkdf which requires that. type H: Send + Clone + BlockSizeUser + Digest + HashMarker + SecureDigest; /// ID for this curve. const ID: &'static [u8]; /// Generator for the group. // While group does provide this in its API, privacy coins may want to use a custom basepoint fn generator() -> Self::G; /// Hash the provided domain-separation tag and message to a scalar. Ciphersuites MAY naively /// prefix the tag to the message, enabling transpotion between the two. Accordingly, this /// function should NOT be used in any scheme where one tag is a valid substring of another /// UNLESS the specific Ciphersuite is verified to handle the DST securely. /// /// Verifying specific ciphersuites have secure tag handling is not recommended, due to it /// breaking the intended modularity of ciphersuites. Instead, component-specific tags with /// further purpose tags are recommended ("Schnorr-nonce", "Schnorr-chal"). #[allow(non_snake_case)] fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F; /// Generate a random non-zero scalar. #[allow(non_snake_case)] fn random_nonzero_F(rng: &mut R) -> Self::F { let mut res; while { res = Self::F::random(&mut *rng); res.ct_eq(&Self::F::ZERO).into() } {} res } /// Read a canonical scalar from something implementing std::io::Read. #[cfg(any(feature = "alloc", feature = "std"))] #[allow(non_snake_case)] fn read_F(reader: &mut R) -> io::Result { let mut encoding = ::Repr::default(); reader.read_exact(encoding.as_mut())?; // ff mandates this is canonical let res = Option::::from(Self::F::from_repr(encoding)) .ok_or_else(|| io::Error::other("non-canonical scalar")); encoding.as_mut().zeroize(); res } /// Read a canonical point from something implementing std::io::Read. /// /// The provided implementation is safe so long as `GroupEncoding::to_bytes` always returns a /// canonical serialization. #[cfg(any(feature = "alloc", feature = "std"))] #[allow(non_snake_case)] fn read_G(reader: &mut R) -> io::Result { let mut encoding = ::Repr::default(); reader.read_exact(encoding.as_mut())?; let point = Option::::from(Self::G::from_bytes(&encoding)) .ok_or_else(|| io::Error::other("invalid point"))?; if point.to_bytes().as_ref() != encoding.as_ref() { Err(io::Error::other("non-canonical point"))?; } Ok(point) } } ================================================ FILE: crypto/dalek-ff-group/Cargo.toml ================================================ [package] name = "dalek-ff-group" version = "0.4.4" description = "ff/group bindings around curve25519-dalek" license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dalek-ff-group" authors = ["Luke Parker "] keywords = ["curve25519", "ed25519", "ristretto", "dalek", "group"] edition = "2021" rust-version = "1.65" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] rustversion = "1" zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] } subtle = { version = "^2.4", default-features = false } rand_core = { version = "0.6", default-features = false } digest = { version = "0.10", default-features = false } sha2 = { version = "0.10", default-features = false } ff = { version = "0.13", default-features = false, features = ["bits"] } group = { version = "0.13", default-features = false } ciphersuite = { path = "../ciphersuite", default-features = false } crypto-bigint = { version = "0.5", default-features = false, features = ["zeroize"] } curve25519-dalek = { version = ">= 4.0, < 4.2", default-features = false, features = ["alloc", "zeroize", "digest", "group", "precomputed-tables"] } [dev-dependencies] hex = "0.4" rand_core = { version = "0.6", default-features = false, features = ["std"] } ff-group-tests = { path = "../ff-group-tests" } [features] alloc = ["zeroize/alloc", "ciphersuite/alloc"] std = ["alloc", "zeroize/std", "subtle/std", "rand_core/std", "digest/std", "sha2/std", "ciphersuite/std"] default = ["std"] ================================================ FILE: crypto/dalek-ff-group/LICENSE ================================================ MIT License Copyright (c) 2022-2023 Luke Parker Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: crypto/dalek-ff-group/README.md ================================================ # Dalek FF/Group ff/group bindings around curve25519-dalek with a from_hash/random function based around modern dependencies. This library was [audited by Cypher Stack in March 2023](https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf), culminating in commit [669d2dbffc1dafb82a09d9419ea182667115df06](https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06). Any subsequent changes have not undergone auditing. This library is usable under no_std. ================================================ FILE: crypto/dalek-ff-group/src/ciphersuite.rs ================================================ use zeroize::Zeroize; use sha2::{Digest, Sha512}; use group::Group; use crate::Scalar; use ciphersuite::Ciphersuite; macro_rules! dalek_curve { ( $feature: literal, $Ciphersuite: ident, $Point: ident, $ID: literal ) => { use crate::$Point; impl Ciphersuite for $Ciphersuite { type F = Scalar; type G = $Point; type H = Sha512; const ID: &'static [u8] = $ID; fn generator() -> Self::G { $Point::generator() } fn hash_to_F(dst: &[u8], data: &[u8]) -> Self::F { Scalar::from_hash(Sha512::new_with_prefix(&[dst, data].concat())) } } }; } /// Ciphersuite for Ristretto. /// /// hash_to_F is implemented with a naive concatenation of the dst and data, allowing transposition /// between the two. This means `dst: b"abc", data: b"def"`, will produce the same scalar as /// `dst: "abcdef", data: b""`. Please use carefully, not letting dsts be substrings of each other. #[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)] pub struct Ristretto; dalek_curve!("ristretto", Ristretto, RistrettoPoint, b"ristretto"); #[test] fn test_ristretto() { ff_group_tests::group::test_prime_group_bits::<_, RistrettoPoint>(&mut rand_core::OsRng); assert_eq!( Ristretto::hash_to_F( b"FROST-RISTRETTO255-SHA512-v11nonce", &hex::decode( "\ 81800157bb554f299fe0b6bd658e4c4591d74168b5177bf55e8dceed59dc80c7\ 5c3430d391552f6e60ecdc093ff9f6f4488756aa6cebdbad75a768010b8f830e" ) .unwrap() ) .to_bytes() .as_ref(), &hex::decode("40f58e8df202b21c94f826e76e4647efdb0ea3ca7ae7e3689bc0cbe2e2f6660c").unwrap() ); } /// Ciphersuite for Ed25519, inspired by RFC-8032. /// /// hash_to_F is implemented with a naive concatenation of the dst and data, allowing transposition /// between the two. This means `dst: b"abc", data: b"def"`, will produce the same scalar as /// `dst: "abcdef", data: b""`. Please use carefully, not letting dsts be substrings of each other. #[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)] pub struct Ed25519; dalek_curve!("ed25519", Ed25519, EdwardsPoint, b"edwards25519"); #[test] fn test_ed25519() { ff_group_tests::group::test_prime_group_bits::<_, EdwardsPoint>(&mut rand_core::OsRng); // Ideally, a test vector from RFC-8032 (not FROST) would be here // Unfortunately, the IETF draft doesn't provide any vectors for the derived challenges assert_eq!( Ed25519::hash_to_F( b"FROST-ED25519-SHA512-v11nonce", &hex::decode( "\ 9d06a6381c7a4493929761a73692776772b274236fb5cfcc7d1b48ac3a9c249f\ 929dcc590407aae7d388761cddb0c0db6f5627aea8e217f4a033f2ec83d93509" ) .unwrap() ) .to_bytes() .as_ref(), &hex::decode("70652da3e8d7533a0e4b9e9104f01b48c396b5b553717784ed8d05c6a36b9609").unwrap() ); } ================================================ FILE: crypto/dalek-ff-group/src/field.rs ================================================ use core::{ ops::{Add, AddAssign, Sub, SubAssign, Neg, Mul, MulAssign}, iter::{Sum, Product}, }; use zeroize::Zeroize; use rand_core::RngCore; use subtle::{ Choice, CtOption, ConstantTimeEq, ConstantTimeLess, ConditionallyNegatable, ConditionallySelectable, }; use crypto_bigint::{ Integer, NonZero, Encoding, U256, U512, modular::constant_mod::{ResidueParams, Residue}, impl_modulus, }; use group::ff::{Field, PrimeField, FieldBits, PrimeFieldBits, FromUniformBytes}; use crate::{u8_from_bool, constant_time, math_op, math}; // 2 ** 255 - 19 // Uses saturating_sub because checked_sub isn't available at compile time const MODULUS: U256 = U256::from_u8(1).shl_vartime(255).saturating_sub(&U256::from_u8(19)); const WIDE_MODULUS: U512 = U256::ZERO.concat(&MODULUS); impl_modulus!( FieldModulus, U256, // 2 ** 255 - 19 "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffed" ); type ResidueType = Residue; /// A constant-time implementation of the Ed25519 field. #[derive(Clone, Copy, PartialEq, Eq, Default, Debug, Zeroize)] #[repr(transparent)] pub struct FieldElement(ResidueType); // Square root of -1. // Formula from RFC-8032 (modp_sqrt_m1/sqrt8k5 z) // 2 ** ((MODULUS - 1) // 4) % MODULUS const SQRT_M1: FieldElement = FieldElement( ResidueType::new(&U256::from_u8(2)) .pow(&MODULUS.saturating_sub(&U256::ONE).wrapping_div(&U256::from_u8(4))), ); // Constant useful in calculating square roots (RFC-8032 sqrt8k5's exponent used to calculate y) const MOD_3_8: FieldElement = FieldElement(ResidueType::new( &MODULUS.saturating_add(&U256::from_u8(3)).wrapping_div(&U256::from_u8(8)), )); // Constant useful in sqrt_ratio_i (sqrt(u / v)) const MOD_5_8: FieldElement = FieldElement(ResidueType::sub(&MOD_3_8.0, &ResidueType::ONE)); fn reduce(x: U512) -> ResidueType { ResidueType::new(&U256::from_le_slice( &x.rem(&NonZero::new(WIDE_MODULUS).unwrap()).to_le_bytes()[.. 32], )) } constant_time!(FieldElement, ResidueType); math!( FieldElement, FieldElement, |x: ResidueType, y: ResidueType| x.add(&y), |x: ResidueType, y: ResidueType| x.sub(&y), |x: ResidueType, y: ResidueType| x.mul(&y) ); macro_rules! from_wrapper { ($uint: ident) => { impl From<$uint> for FieldElement { fn from(a: $uint) -> FieldElement { Self(ResidueType::new(&U256::from(a))) } } }; } from_wrapper!(u8); from_wrapper!(u16); from_wrapper!(u32); from_wrapper!(u64); from_wrapper!(u128); impl Neg for FieldElement { type Output = Self; fn neg(self) -> Self::Output { Self(self.0.neg()) } } impl Neg for &FieldElement { type Output = FieldElement; fn neg(self) -> Self::Output { (*self).neg() } } impl Field for FieldElement { const ZERO: Self = Self(ResidueType::ZERO); const ONE: Self = Self(ResidueType::ONE); fn random(mut rng: impl RngCore) -> Self { let mut bytes = [0; 64]; rng.fill_bytes(&mut bytes); FieldElement(reduce(U512::from_le_bytes(bytes))) } fn square(&self) -> Self { FieldElement(self.0.square()) } fn double(&self) -> Self { FieldElement(self.0.add(&self.0)) } fn invert(&self) -> CtOption { const NEG_2: FieldElement = FieldElement(ResidueType::new(&MODULUS.saturating_sub(&U256::from_u8(2)))); CtOption::new(self.pow(NEG_2), !self.is_zero()) } // RFC-8032 sqrt8k5 fn sqrt(&self) -> CtOption { let tv1 = self.pow(MOD_3_8); let tv2 = tv1 * SQRT_M1; let candidate = Self::conditional_select(&tv2, &tv1, tv1.square().ct_eq(self)); CtOption::new(candidate, candidate.square().ct_eq(self)) } fn sqrt_ratio(u: &FieldElement, v: &FieldElement) -> (Choice, FieldElement) { let i = SQRT_M1; let u = *u; let v = *v; let v3 = v.square() * v; let v7 = v3.square() * v; let mut r = (u * v3) * (u * v7).pow(MOD_5_8); let check = v * r.square(); let correct_sign = check.ct_eq(&u); let flipped_sign = check.ct_eq(&(-u)); let flipped_sign_i = check.ct_eq(&((-u) * i)); r.conditional_assign(&(r * i), flipped_sign | flipped_sign_i); let r_is_negative = r.is_odd(); r.conditional_negate(r_is_negative); (correct_sign | flipped_sign, r) } } impl PrimeField for FieldElement { type Repr = [u8; 32]; // Big endian representation of the modulus const MODULUS: &'static str = "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffed"; const NUM_BITS: u32 = 255; const CAPACITY: u32 = 254; const TWO_INV: Self = FieldElement(ResidueType::new(&U256::from_u8(2)).invert().0); // This was calculated with the method from the ff crate docs // SageMath GF(modulus).primitive_element() const MULTIPLICATIVE_GENERATOR: Self = Self(ResidueType::new(&U256::from_u8(2))); // This was set per the specification in the ff crate docs // The number of leading zero bits in the little-endian bit representation of (modulus - 1) const S: u32 = 2; // This was calculated via the formula from the ff crate docs // Self::MULTIPLICATIVE_GENERATOR ** ((modulus - 1) >> Self::S) const ROOT_OF_UNITY: Self = FieldElement(ResidueType::new(&U256::from_be_hex( "2b8324804fc1df0b2b4d00993dfbd7a72f431806ad2fe478c4ee1b274a0ea0b0", ))); // Self::ROOT_OF_UNITY.invert() const ROOT_OF_UNITY_INV: Self = FieldElement(Self::ROOT_OF_UNITY.0.invert().0); // This was calculated via the formula from the ff crate docs // Self::MULTIPLICATIVE_GENERATOR ** (2 ** Self::S) const DELTA: Self = FieldElement(ResidueType::new(&U256::from_be_hex( "0000000000000000000000000000000000000000000000000000000000000010", ))); fn from_repr(bytes: [u8; 32]) -> CtOption { let res = U256::from_le_bytes(bytes); CtOption::new(Self(ResidueType::new(&res)), res.ct_lt(&MODULUS)) } fn to_repr(&self) -> [u8; 32] { self.0.retrieve().to_le_bytes() } fn is_odd(&self) -> Choice { self.0.retrieve().is_odd() } fn from_u128(num: u128) -> Self { Self::from(num) } } impl PrimeFieldBits for FieldElement { type ReprBits = [u8; 32]; fn to_le_bits(&self) -> FieldBits { self.to_repr().into() } fn char_le_bits() -> FieldBits { MODULUS.to_le_bytes().into() } } impl FieldElement { /// Create a FieldElement from a `crypto_bigint::U256`. /// /// This will reduce the `U256` by the modulus, into a member of the field. pub const fn from_u256(u256: &U256) -> Self { FieldElement(Residue::new(u256)) } /// Create a `FieldElement` from the reduction of a 512-bit number. /// /// The bytes are interpreted in little-endian format. pub fn wide_reduce(value: [u8; 64]) -> Self { FieldElement(reduce(U512::from_le_bytes(value))) } /// Perform an exponentiation. pub fn pow(&self, other: FieldElement) -> FieldElement { let mut table = [FieldElement::ONE; 16]; table[1] = *self; for i in 2 .. 16 { table[i] = table[i - 1] * self; } let mut res = FieldElement::ONE; let mut bits = 0; for (i, mut bit) in other.to_le_bits().iter_mut().rev().enumerate() { bits <<= 1; let mut bit = u8_from_bool(&mut bit); bits |= bit; bit.zeroize(); if ((i + 1) % 4) == 0 { if i != 3 { for _ in 0 .. 4 { res *= res; } } let mut scale_by = FieldElement::ONE; #[allow(clippy::needless_range_loop)] for i in 0 .. 16 { #[allow(clippy::cast_possible_truncation)] // Safe since 0 .. 16 { scale_by = <_>::conditional_select(&scale_by, &table[i], bits.ct_eq(&(i as u8))); } } res *= scale_by; bits = 0; } } res } /// The square root of u/v, as used for Ed25519 point decoding (RFC 8032 5.1.3) and within /// Ristretto (5.1 Extracting an Inverse Square Root). /// /// The result is only a valid square root if the Choice is true. /// RFC 8032 simply fails if there isn't a square root, leaving any return value undefined. /// Ristretto explicitly returns 0 or sqrt((SQRT_M1 * u) / v). pub fn sqrt_ratio_i(u: FieldElement, v: FieldElement) -> (Choice, FieldElement) { let i = SQRT_M1; let v3 = v.square() * v; let v7 = v3.square() * v; // Candidate root let mut r = (u * v3) * (u * v7).pow(MOD_5_8); // 8032 3.1 let check = v * r.square(); let correct_sign = check.ct_eq(&u); // 8032 3.2 conditional let neg_u = -u; let flipped_sign = check.ct_eq(&neg_u); // Ristretto Step 5 let flipped_sign_i = check.ct_eq(&(neg_u * i)); // 3.2 set r.conditional_assign(&(r * i), flipped_sign | flipped_sign_i); // Always return the even root, per Ristretto // This doesn't break Ed25519 point decoding as that doesn't expect these steps to return a // specific root // Ed25519 points include a dedicated sign bit to determine which root to use, so at worst // this is a pointless inefficiency r.conditional_negate(r.is_odd()); (correct_sign | flipped_sign, r) } } impl FromUniformBytes<64> for FieldElement { fn from_uniform_bytes(bytes: &[u8; 64]) -> Self { Self::wide_reduce(*bytes) } } impl Sum for FieldElement { fn sum>(iter: I) -> FieldElement { let mut res = FieldElement::ZERO; for item in iter { res += item; } res } } impl<'a> Sum<&'a FieldElement> for FieldElement { fn sum>(iter: I) -> FieldElement { iter.copied().sum() } } impl Product for FieldElement { fn product>(iter: I) -> FieldElement { let mut res = FieldElement::ONE; for item in iter { res *= item; } res } } impl<'a> Product<&'a FieldElement> for FieldElement { fn product>(iter: I) -> FieldElement { iter.copied().product() } } #[test] fn test_wide_modulus() { let mut wide = [0; 64]; wide[.. 32].copy_from_slice(&MODULUS.to_le_bytes()); assert_eq!(wide, WIDE_MODULUS.to_le_bytes()); } #[test] fn test_sqrt_m1() { // Test equivalence against the known constant value const SQRT_M1_MAGIC: U256 = U256::from_be_hex("2b8324804fc1df0b2b4d00993dfbd7a72f431806ad2fe478c4ee1b274a0ea0b0"); assert_eq!(SQRT_M1.0.retrieve(), SQRT_M1_MAGIC); // Also test equivalence against the result of the formula from RFC-8032 (modp_sqrt_m1/sqrt8k5 z) // 2 ** ((MODULUS - 1) // 4) % MODULUS assert_eq!( SQRT_M1, FieldElement::from(2u8).pow(FieldElement(ResidueType::new( &(FieldElement::ZERO - FieldElement::ONE).0.retrieve().wrapping_div(&U256::from(4u8)) ))) ); } #[test] fn test_field() { ff_group_tests::prime_field::test_prime_field_bits::<_, FieldElement>(&mut rand_core::OsRng); } ================================================ FILE: crypto/dalek-ff-group/src/lib.rs ================================================ #![allow(deprecated)] #![cfg_attr(docsrs, feature(doc_cfg))] #![no_std] // Prevents writing new code, in what should be a simple wrapper, which requires std #![doc = include_str!("../README.md")] #![allow(clippy::redundant_closure_call)] use core::{ borrow::Borrow, ops::{Deref, Add, AddAssign, Sub, SubAssign, Neg, Mul, MulAssign}, iter::{Iterator, Sum, Product}, hash::{Hash, Hasher}, }; use zeroize::Zeroize; use subtle::{ConstantTimeEq, ConditionallySelectable}; use rand_core::RngCore; use digest::{consts::U64, Digest, HashMarker}; use subtle::{Choice, CtOption}; pub use curve25519_dalek as dalek; use dalek::{ constants::{self, BASEPOINT_ORDER}, scalar::Scalar as DScalar, edwards::{EdwardsPoint as DEdwardsPoint, EdwardsBasepointTable, CompressedEdwardsY}, ristretto::{RistrettoPoint as DRistrettoPoint, RistrettoBasepointTable, CompressedRistretto}, }; pub use constants::{ED25519_BASEPOINT_TABLE, RISTRETTO_BASEPOINT_TABLE}; use group::{ ff::{Field, PrimeField, FieldBits, PrimeFieldBits, FromUniformBytes}, Group, GroupEncoding, prime::PrimeGroup, }; mod field; pub use field::FieldElement; mod ciphersuite; pub use crate::ciphersuite::{Ed25519, Ristretto}; // Use black_box when possible #[rustversion::since(1.66)] mod black_box { pub(crate) fn black_box(val: T) -> T { #[allow(clippy::incompatible_msrv)] core::hint::black_box(val) } } #[rustversion::before(1.66)] mod black_box { pub(crate) fn black_box(val: T) -> T { val } } use black_box::black_box; fn u8_from_bool(bit_ref: &mut bool) -> u8 { let bit_ref = black_box(bit_ref); let mut bit = black_box(*bit_ref); #[allow(clippy::cast_lossless)] let res = black_box(bit as u8); bit.zeroize(); debug_assert!((res | 1) == 1); bit_ref.zeroize(); res } // Convert a boolean to a Choice in a *presumably* constant time manner fn choice(mut value: bool) -> Choice { Choice::from(u8_from_bool(&mut value)) } macro_rules! deref_borrow { ($Source: ident, $Target: ident) => { impl Deref for $Source { type Target = $Target; fn deref(&self) -> &Self::Target { &self.0 } } impl Borrow<$Target> for $Source { fn borrow(&self) -> &$Target { &self.0 } } impl Borrow<$Target> for &$Source { fn borrow(&self) -> &$Target { &self.0 } } }; } macro_rules! constant_time { ($Value: ident, $Inner: ident) => { impl ConstantTimeEq for $Value { fn ct_eq(&self, other: &Self) -> Choice { self.0.ct_eq(&other.0) } } impl ConditionallySelectable for $Value { fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { $Value($Inner::conditional_select(&a.0, &b.0, choice)) } } }; } pub(crate) use constant_time; macro_rules! math_op { ( $Value: ident, $Other: ident, $Op: ident, $op_fn: ident, $Assign: ident, $assign_fn: ident, $function: expr ) => { impl $Op<$Other> for $Value { type Output = $Value; fn $op_fn(self, other: $Other) -> Self::Output { Self($function(self.0, other.0)) } } impl $Assign<$Other> for $Value { fn $assign_fn(&mut self, other: $Other) { self.0 = $function(self.0, other.0); } } impl<'a> $Op<&'a $Other> for $Value { type Output = $Value; fn $op_fn(self, other: &'a $Other) -> Self::Output { Self($function(self.0, other.0)) } } impl<'a> $Assign<&'a $Other> for $Value { fn $assign_fn(&mut self, other: &'a $Other) { self.0 = $function(self.0, other.0); } } }; } pub(crate) use math_op; macro_rules! math { ($Value: ident, $Factor: ident, $add: expr, $sub: expr, $mul: expr) => { math_op!($Value, $Value, Add, add, AddAssign, add_assign, $add); math_op!($Value, $Value, Sub, sub, SubAssign, sub_assign, $sub); math_op!($Value, $Factor, Mul, mul, MulAssign, mul_assign, $mul); }; } pub(crate) use math; macro_rules! math_neg { ($Value: ident, $Factor: ident, $add: expr, $sub: expr, $mul: expr) => { math!($Value, $Factor, $add, $sub, $mul); impl Neg for $Value { type Output = Self; fn neg(self) -> Self::Output { Self(-self.0) } } }; } /// Wrapper around the dalek Scalar type. #[derive(Clone, Copy, PartialEq, Eq, Default, Debug, Zeroize)] pub struct Scalar(pub DScalar); deref_borrow!(Scalar, DScalar); constant_time!(Scalar, DScalar); math_neg!(Scalar, Scalar, DScalar::add, DScalar::sub, DScalar::mul); macro_rules! from_wrapper { ($uint: ident) => { impl From<$uint> for Scalar { fn from(a: $uint) -> Scalar { Scalar(DScalar::from(a)) } } }; } from_wrapper!(u8); from_wrapper!(u16); from_wrapper!(u32); from_wrapper!(u64); from_wrapper!(u128); impl Scalar { pub fn pow(&self, other: Scalar) -> Scalar { let mut table = [Scalar::ONE; 16]; table[1] = *self; for i in 2 .. 16 { table[i] = table[i - 1] * self; } let mut res = Scalar::ONE; let mut bits = 0; for (i, mut bit) in other.to_le_bits().iter_mut().rev().enumerate() { bits <<= 1; let mut bit = u8_from_bool(&mut bit); bits |= bit; bit.zeroize(); if ((i + 1) % 4) == 0 { if i != 3 { for _ in 0 .. 4 { res *= res; } } let mut scale_by = Scalar::ONE; #[allow(clippy::needless_range_loop)] for i in 0 .. 16 { #[allow(clippy::cast_possible_truncation)] // Safe since 0 .. 16 { scale_by = <_>::conditional_select(&scale_by, &table[i], bits.ct_eq(&(i as u8))); } } res *= scale_by; bits = 0; } } res } /// Perform wide reduction on a 64-byte array to create a Scalar without bias. pub fn from_bytes_mod_order_wide(bytes: &[u8; 64]) -> Scalar { Self(DScalar::from_bytes_mod_order_wide(bytes)) } /// Derive a Scalar without bias from a digest via wide reduction. pub fn from_hash + HashMarker>(hash: D) -> Scalar { let mut output = [0u8; 64]; output.copy_from_slice(&hash.finalize()); let res = Scalar(DScalar::from_bytes_mod_order_wide(&output)); output.zeroize(); res } } impl Field for Scalar { const ZERO: Scalar = Scalar(DScalar::ZERO); const ONE: Scalar = Scalar(DScalar::ONE); fn random(rng: impl RngCore) -> Self { Self(::random(rng)) } fn square(&self) -> Self { Self(self.0.square()) } fn double(&self) -> Self { Self(self.0.double()) } fn invert(&self) -> CtOption { ::invert(&self.0).map(Self) } fn sqrt(&self) -> CtOption { self.0.sqrt().map(Self) } fn sqrt_ratio(num: &Self, div: &Self) -> (Choice, Self) { let (choice, res) = DScalar::sqrt_ratio(num, div); (choice, Self(res)) } } impl PrimeField for Scalar { type Repr = [u8; 32]; const MODULUS: &'static str = ::MODULUS; const NUM_BITS: u32 = ::NUM_BITS; const CAPACITY: u32 = ::CAPACITY; const TWO_INV: Scalar = Scalar(::TWO_INV); const MULTIPLICATIVE_GENERATOR: Scalar = Scalar(::MULTIPLICATIVE_GENERATOR); const S: u32 = ::S; const ROOT_OF_UNITY: Scalar = Scalar(::ROOT_OF_UNITY); const ROOT_OF_UNITY_INV: Scalar = Scalar(::ROOT_OF_UNITY_INV); const DELTA: Scalar = Scalar(::DELTA); fn from_repr(bytes: [u8; 32]) -> CtOption { ::from_repr(bytes).map(Scalar) } fn to_repr(&self) -> [u8; 32] { self.0.to_repr() } fn is_odd(&self) -> Choice { self.0.is_odd() } fn from_u128(num: u128) -> Self { Scalar(DScalar::from_u128(num)) } } impl PrimeFieldBits for Scalar { type ReprBits = [u8; 32]; fn to_le_bits(&self) -> FieldBits { self.to_repr().into() } fn char_le_bits() -> FieldBits { BASEPOINT_ORDER.to_bytes().into() } } impl FromUniformBytes<64> for Scalar { fn from_uniform_bytes(bytes: &[u8; 64]) -> Self { Self::from_bytes_mod_order_wide(bytes) } } impl Sum for Scalar { fn sum>(iter: I) -> Scalar { Self(DScalar::sum(iter)) } } impl<'a> Sum<&'a Scalar> for Scalar { fn sum>(iter: I) -> Scalar { Self(DScalar::sum(iter)) } } impl Product for Scalar { fn product>(iter: I) -> Scalar { Self(DScalar::product(iter)) } } impl<'a> Product<&'a Scalar> for Scalar { fn product>(iter: I) -> Scalar { Self(DScalar::product(iter)) } } macro_rules! dalek_group { ( $Point: ident, $DPoint: ident, $torsion_free: expr, $Table: ident, $DCompressed: ident, $BASEPOINT_POINT: ident, $BASEPOINT_TABLE: ident ) => { /// Wrapper around the dalek Point type. /// /// All operations will be restricted to a prime-order subgroup (equivalent to the group itself /// in the case of Ristretto). The exposure of the internal element does allow bypassing this /// however, which may lead to undefined/computationally-unsafe behavior, and is entirely at /// the user's risk. #[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)] pub struct $Point(pub $DPoint); deref_borrow!($Point, $DPoint); constant_time!($Point, $DPoint); math_neg!($Point, Scalar, $DPoint::add, $DPoint::sub, $DPoint::mul); /// The basepoint for this curve. pub const $BASEPOINT_POINT: $Point = $Point(constants::$BASEPOINT_POINT); impl Sum<$Point> for $Point { fn sum>(iter: I) -> $Point { Self($DPoint::sum(iter)) } } impl<'a> Sum<&'a $Point> for $Point { fn sum>(iter: I) -> $Point { Self($DPoint::sum(iter)) } } impl Group for $Point { type Scalar = Scalar; fn random(mut rng: impl RngCore) -> Self { loop { let mut bytes = [0; 32]; rng.fill_bytes(&mut bytes); let Some(point) = Option::<$Point>::from($Point::from_bytes(&bytes)) else { continue; }; // Ban identity, per the trait specification if !bool::from(point.is_identity()) { return point; } } } fn identity() -> Self { Self($DPoint::identity()) } fn generator() -> Self { $BASEPOINT_POINT } fn is_identity(&self) -> Choice { self.0.ct_eq(&$DPoint::identity()) } fn double(&self) -> Self { Self(self.0.double()) } } impl GroupEncoding for $Point { type Repr = [u8; 32]; fn from_bytes(bytes: &Self::Repr) -> CtOption { let decompressed = $DCompressed(*bytes).decompress(); // TODO: Same note on unwrap_or as above let point = decompressed.unwrap_or($DPoint::identity()); CtOption::new( $Point(point), choice(black_box(decompressed).is_some()) & choice($torsion_free(point)), ) } fn from_bytes_unchecked(bytes: &Self::Repr) -> CtOption { $Point::from_bytes(bytes) } fn to_bytes(&self) -> Self::Repr { self.0.to_bytes() } } impl PrimeGroup for $Point {} impl Mul for &$Table { type Output = $Point; fn mul(self, b: Scalar) -> $Point { $Point(&b.0 * self) } } // Support being used as a key in a table // While it is expensive as a key, due to the field operations required, there's frequently // use cases for public key -> value lookups #[allow(unknown_lints, renamed_and_removed_lints)] #[allow(clippy::derived_hash_with_manual_eq, clippy::derive_hash_xor_eq)] impl Hash for $Point { fn hash(&self, state: &mut H) { self.to_bytes().hash(state); } } }; } dalek_group!( EdwardsPoint, DEdwardsPoint, |point: DEdwardsPoint| point.is_torsion_free(), EdwardsBasepointTable, CompressedEdwardsY, ED25519_BASEPOINT_POINT, ED25519_BASEPOINT_TABLE ); impl EdwardsPoint { pub fn mul_by_cofactor(&self) -> EdwardsPoint { EdwardsPoint(self.0.mul_by_cofactor()) } } dalek_group!( RistrettoPoint, DRistrettoPoint, |_| true, RistrettoBasepointTable, CompressedRistretto, RISTRETTO_BASEPOINT_POINT, RISTRETTO_BASEPOINT_TABLE ); #[test] fn test_ed25519_group() { ff_group_tests::group::test_prime_group_bits::<_, EdwardsPoint>(&mut rand_core::OsRng); } #[test] fn test_ristretto_group() { ff_group_tests::group::test_prime_group_bits::<_, RistrettoPoint>(&mut rand_core::OsRng); } ================================================ FILE: crypto/dkg/Cargo.toml ================================================ [package] name = "dkg" version = "0.6.1" description = "Distributed key generation over ff/group" license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dkg" authors = ["Luke Parker "] keywords = ["dkg", "multisig", "threshold", "ff", "group"] edition = "2021" rust-version = "1.66" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive", "alloc"] } thiserror = { version = "2", default-features = false } std-shims = { version = "0.1", path = "../../common/std-shims", default-features = false } borsh = { version = "1", default-features = false, features = ["derive", "de_strict_order"], optional = true } ciphersuite = { path = "../ciphersuite", version = "^0.4.1", default-features = false, features = ["alloc"] } [features] std = [ "thiserror/std", "std-shims/std", "borsh?/std", "ciphersuite/std", ] borsh = ["dep:borsh"] default = ["std"] ================================================ FILE: crypto/dkg/LICENSE ================================================ MIT License Copyright (c) 2021-2025 Luke Parker Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: crypto/dkg/README.md ================================================ # Distributed Key Generation A crate implementing a type for keys, presumably the result of a distributed key generation protocol, and utilities from there. This crate used to host implementations of distributed key generation protocols as well (hence the name). Those have been smashed into their own crates, such as [`dkg-musig`](https://docs.rs/dkg-musig) and [`dkg-pedpop`](https://docs.rs/dkg-pedpop). Before being smashed, this crate was [audited by Cypher Stack in March 2023]( https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf ), culminating in commit [669d2dbffc1dafb82a09d9419ea182667115df06]( https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06 ). Any subsequent changes have not undergone auditing. ================================================ FILE: crypto/dkg/dealer/Cargo.toml ================================================ [package] name = "dkg-dealer" version = "0.6.0" description = "Produce dkg::ThresholdKeys with a dealer key generation" license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dkg/dealer" authors = ["Luke Parker "] keywords = ["dkg", "multisig", "threshold", "ff", "group"] edition = "2021" rust-version = "1.66" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] zeroize = { version = "^1.5", default-features = false } rand_core = { version = "0.6", default-features = false } std-shims = { version = "0.1", path = "../../../common/std-shims", default-features = false } ciphersuite = { path = "../../ciphersuite", version = "^0.4.1", default-features = false } dkg = { path = "../", version = "0.6", default-features = false } [features] std = [ "zeroize/std", "rand_core/std", "std-shims/std", "ciphersuite/std", "dkg/std", ] default = ["std"] ================================================ FILE: crypto/dkg/dealer/LICENSE ================================================ MIT License Copyright (c) 2021-2025 Luke Parker Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: crypto/dkg/dealer/README.md ================================================ # Distributed Key Generation - Dealer This crate implements a dealer key generation protocol for the [`dkg`](https://docs.rs/dkg) crate's types. This provides a single point of failure when the key is being generated and is NOT recommended for use outside of tests. This crate was originally part of (in some form) the `dkg` crate, which was [audited by Cypher Stack in March 2023]( https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf ), culminating in commit [669d2dbffc1dafb82a09d9419ea182667115df06]( https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06 ). Any subsequent changes have not undergone auditing. ================================================ FILE: crypto/dkg/dealer/src/lib.rs ================================================ #![cfg_attr(docsrs, feature(doc_cfg))] #![doc = include_str!("../README.md")] #![no_std] use core::ops::Deref; use std_shims::{vec::Vec, collections::HashMap}; use zeroize::{Zeroize, Zeroizing}; use rand_core::{RngCore, CryptoRng}; use ciphersuite::{ group::ff::{Field, PrimeField}, Ciphersuite, }; pub use dkg::*; /// Create a key via a dealer key generation protocol. pub fn key_gen( rng: &mut R, threshold: u16, participants: u16, ) -> Result>, DkgError> { let mut coefficients = Vec::with_capacity(usize::from(participants)); // `.max(1)` so we always generate the 0th coefficient which we'll share for _ in 0 .. threshold.max(1) { coefficients.push(Zeroizing::new(C::F::random(&mut *rng))); } fn polynomial( coefficients: &[Zeroizing], l: Participant, ) -> Zeroizing { let l = F::from(u64::from(u16::from(l))); // This should never be reached since Participant is explicitly non-zero assert!(l != F::ZERO, "zero participant passed to polynomial"); let mut share = Zeroizing::new(F::ZERO); for (idx, coefficient) in coefficients.iter().rev().enumerate() { *share += coefficient.deref(); if idx != (coefficients.len() - 1) { *share *= l; } } share } let group_key = C::generator() * coefficients[0].deref(); let mut secret_shares = HashMap::with_capacity(participants as usize); let mut verification_shares = HashMap::with_capacity(participants as usize); for i in 1 ..= participants { let i = Participant::new(i).expect("non-zero u16 wasn't a valid Participant index"); let secret_share = polynomial(&coefficients, i); secret_shares.insert(i, secret_share.clone()); verification_shares.insert(i, C::generator() * *secret_share); } let mut res = HashMap::with_capacity(participants as usize); for (i, secret_share) in secret_shares { let keys = ThresholdKeys::new( ThresholdParams::new(threshold, participants, i)?, Interpolation::Lagrange, secret_share, verification_shares.clone(), )?; debug_assert_eq!(keys.group_key(), group_key); res.insert(i, keys); } Ok(res) } ================================================ FILE: crypto/dkg/musig/Cargo.toml ================================================ [package] name = "dkg-musig" version = "0.6.0" description = "The MuSig key aggregation protocol" license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dkg/musig" authors = ["Luke Parker "] keywords = ["dkg", "multisig", "threshold", "ff", "group"] edition = "2021" rust-version = "1.79" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] thiserror = { version = "2", default-features = false } rand_core = { version = "0.6", default-features = false } zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] } std-shims = { version = "0.1", path = "../../../common/std-shims", default-features = false } multiexp = { path = "../../multiexp", version = "0.4", default-features = false } ciphersuite = { path = "../../ciphersuite", version = "^0.4.1", default-features = false } dkg = { path = "../", version = "0.6", default-features = false } [dev-dependencies] rand_core = { version = "0.6", default-features = false, features = ["getrandom"] } dalek-ff-group = { path = "../../dalek-ff-group" } dkg-recovery = { path = "../recovery", default-features = false, features = ["std"] } [features] std = [ "thiserror/std", "rand_core/std", "std-shims/std", "multiexp/std", "ciphersuite/std", "dkg/std", ] default = ["std"] ================================================ FILE: crypto/dkg/musig/LICENSE ================================================ MIT License Copyright (c) 2021-2025 Luke Parker Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: crypto/dkg/musig/README.md ================================================ # Distributed Key Generation - MuSig This implements the MuSig key aggregation protocol for the [`dkg`](https://docs.rs/dkg) crate's types. This crate was originally part of (in some form) the `dkg` crate, which was [audited by Cypher Stack in March 2023]( https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf ), culminating in commit [669d2dbffc1dafb82a09d9419ea182667115df06]( https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06 ). Any subsequent changes have not undergone auditing. ================================================ FILE: crypto/dkg/musig/src/lib.rs ================================================ #![cfg_attr(docsrs, feature(doc_cfg))] #![doc = include_str!("../README.md")] #![cfg_attr(not(feature = "std"), no_std)] use core::ops::Deref; use std_shims::{ vec, vec::Vec, collections::{HashSet, HashMap}, }; use zeroize::Zeroizing; use ciphersuite::{group::GroupEncoding, Ciphersuite}; pub use dkg::*; #[cfg(test)] mod tests; /// Errors encountered when working with threshold keys. #[derive(Clone, PartialEq, Eq, Debug, thiserror::Error)] pub enum MusigError { /// No keys were provided. #[error("no keys provided")] NoKeysProvided, /// Too many keys were provided. #[error("too many keys (allowed {max}, provided {provided})")] TooManyKeysProvided { /// The maximum amount of keys allowed. max: u16, /// The amount of keys provided. provided: usize, }, /// A participant was duplicated. #[error("a participant was duplicated")] DuplicatedParticipant(C::G), /// Participating, yet our public key wasn't found in the list of keys. #[error("private key's public key wasn't present in the list of public keys")] NotPresent, /// An error propagated from the underlying `dkg` crate. #[error("error from dkg ({0})")] DkgError(DkgError), } fn check_keys(keys: &[C::G]) -> Result> { if keys.is_empty() { Err(MusigError::NoKeysProvided)?; } let keys_len = u16::try_from(keys.len()) .map_err(|_| MusigError::TooManyKeysProvided { max: u16::MAX, provided: keys.len() })?; let mut set = HashSet::with_capacity(keys.len()); for key in keys { let bytes = key.to_bytes().as_ref().to_vec(); if !set.insert(bytes) { Err(MusigError::DuplicatedParticipant(*key))?; } } Ok(keys_len) } fn binding_factor_transcript( context: [u8; 32], keys_len: u16, keys: &[C::G], ) -> Vec { debug_assert_eq!(usize::from(keys_len), keys.len()); let mut transcript = vec![]; transcript.extend(&context); transcript.extend(keys_len.to_le_bytes()); for key in keys { transcript.extend(key.to_bytes().as_ref()); } transcript } fn binding_factor(mut transcript: Vec, i: u16) -> C::F { transcript.extend(i.to_le_bytes()); C::hash_to_F(b"dkg-musig", &transcript) } #[allow(clippy::type_complexity)] fn musig_key_multiexp( context: [u8; 32], keys: &[C::G], ) -> Result, MusigError> { let keys_len = check_keys::(keys)?; let transcript = binding_factor_transcript::(context, keys_len, keys); let mut multiexp = Vec::with_capacity(keys.len()); for i in 1 ..= keys_len { multiexp.push((binding_factor::(transcript.clone(), i), keys[usize::from(i - 1)])); } Ok(multiexp) } /// The group key resulting from using this library's MuSig key aggregation. /// /// This function executes in variable time and MUST NOT be used with secret data. pub fn musig_key_vartime( context: [u8; 32], keys: &[C::G], ) -> Result> { Ok(multiexp::multiexp_vartime(&musig_key_multiexp(context, keys)?)) } /// The group key resulting from using this library's MuSig key aggregation. pub fn musig_key(context: [u8; 32], keys: &[C::G]) -> Result> { Ok(multiexp::multiexp(&musig_key_multiexp(context, keys)?)) } /// A n-of-n non-interactive DKG which does not guarantee the usability of the resulting key. pub fn musig( context: [u8; 32], private_key: Zeroizing, keys: &[C::G], ) -> Result, MusigError> { let our_pub_key = C::generator() * private_key.deref(); let Some(our_i) = keys.iter().position(|key| *key == our_pub_key) else { Err(MusigError::DkgError(DkgError::NotParticipating))? }; let keys_len: u16 = check_keys::(keys)?; let params = ThresholdParams::new( keys_len, keys_len, // The `+ 1` won't fail as `keys.len() <= u16::MAX`, so any index is `< u16::MAX` Participant::new( u16::try_from(our_i).expect("keys.len() <= u16::MAX yet index of keys > u16::MAX?") + 1, ) .expect("i + 1 != 0"), ) .map_err(MusigError::DkgError)?; let transcript = binding_factor_transcript::(context, keys_len, keys); let mut binding_factors = Vec::with_capacity(keys.len()); let mut multiexp = Vec::with_capacity(keys.len()); let mut verification_shares = HashMap::with_capacity(keys.len()); for (i, key) in (1 ..= keys_len).zip(keys.iter().copied()) { let binding_factor = binding_factor::(transcript.clone(), i); binding_factors.push(binding_factor); multiexp.push((binding_factor, key)); let i = Participant::new(i).expect("non-zero u16 wasn't a valid Participant index?"); verification_shares.insert(i, key); } let group_key = multiexp::multiexp(&multiexp); debug_assert_eq!(our_pub_key, verification_shares[¶ms.i()]); debug_assert_eq!(musig_key_vartime::(context, keys), Ok(group_key)); ThresholdKeys::new( params, Interpolation::Constant(binding_factors), private_key, verification_shares, ) .map_err(MusigError::DkgError) } ================================================ FILE: crypto/dkg/musig/src/tests.rs ================================================ use std::collections::HashMap; use zeroize::Zeroizing; use rand_core::OsRng; use dalek_ff_group::Ristretto; use ciphersuite::{group::ff::Field, Ciphersuite}; use dkg_recovery::recover_key; use crate::*; /// Tests MuSig key generation. #[test] pub fn test_musig() { const PARTICIPANTS: u16 = 5; let mut keys = vec![]; let mut pub_keys = vec![]; for _ in 0 .. PARTICIPANTS { let key = Zeroizing::new(::F::random(&mut OsRng)); pub_keys.push(::generator() * *key); keys.push(key); } const CONTEXT: [u8; 32] = *b"MuSig Test "; // Empty signing set musig::(CONTEXT, Zeroizing::new(::F::ZERO), &[]) .unwrap_err(); // Signing set we're not part of musig::( CONTEXT, Zeroizing::new(::F::ZERO), &[::generator()], ) .unwrap_err(); // Test with n keys { let mut created_keys = HashMap::new(); let mut verification_shares = HashMap::new(); let group_key = musig_key::(CONTEXT, &pub_keys).unwrap(); for (i, key) in keys.iter().enumerate() { let these_keys = musig::(CONTEXT, key.clone(), &pub_keys).unwrap(); assert_eq!(these_keys.params().t(), PARTICIPANTS); assert_eq!(these_keys.params().n(), PARTICIPANTS); assert_eq!(usize::from(u16::from(these_keys.params().i())), i + 1); verification_shares.insert( these_keys.params().i(), ::generator() * **these_keys.original_secret_share(), ); assert_eq!(these_keys.group_key(), group_key); created_keys.insert(these_keys.params().i(), these_keys); } for keys in created_keys.values() { for (l, verification_share) in &verification_shares { assert_eq!(keys.original_verification_share(*l), *verification_share); } } assert_eq!( ::generator() * *recover_key(&created_keys.values().cloned().collect::>()).unwrap(), group_key ); } } ================================================ FILE: crypto/dkg/pedpop/Cargo.toml ================================================ [package] name = "dkg-pedpop" version = "0.6.0" description = "The PedPoP distributed key generation protocol" license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dkg/pedpop" authors = ["Luke Parker "] keywords = ["dkg", "multisig", "threshold", "ff", "group"] edition = "2021" rust-version = "1.80" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] thiserror = { version = "2", default-features = false, features = ["std"] } zeroize = { version = "^1.5", default-features = false, features = ["std", "zeroize_derive"] } rand_core = { version = "0.6", default-features = false, features = ["std"] } transcript = { package = "flexible-transcript", path = "../../transcript", version = "^0.3.3", default-features = false, features = ["std", "recommended"] } chacha20 = { version = "0.9", default-features = false, features = ["std", "zeroize"] } multiexp = { path = "../../multiexp", version = "0.4", default-features = false, features = ["std"] } ciphersuite = { path = "../../ciphersuite", version = "^0.4.1", default-features = false, features = ["std"] } schnorr = { package = "schnorr-signatures", path = "../../schnorr", version = "^0.5.1", default-features = false, features = ["std"] } dleq = { path = "../../dleq", version = "^0.4.1", default-features = false, features = ["std", "serialize"] } dkg = { path = "../", version = "0.6", default-features = false, features = ["std"] } [dev-dependencies] rand_core = { version = "0.6", default-features = false, features = ["getrandom"] } dalek-ff-group = { path = "../../dalek-ff-group", default-features = false } ================================================ FILE: crypto/dkg/pedpop/LICENSE ================================================ MIT License Copyright (c) 2021-2025 Luke Parker Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: crypto/dkg/pedpop/README.md ================================================ # Distributed Key Generation - PedPoP This implements the PedPoP distributed key generation protocol for the [`dkg`](https://docs.rs/dkg) crate's types. This crate was originally part of the `dkg` crate, which was [audited by Cypher Stack in March 2023]( https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf ), culminating in commit [669d2dbffc1dafb82a09d9419ea182667115df06]( https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06 ). Any subsequent changes have not undergone auditing. ================================================ FILE: crypto/dkg/pedpop/src/encryption.rs ================================================ use core::{ops::Deref, fmt}; use std::{io, collections::HashMap}; use thiserror::Error; use zeroize::{Zeroize, Zeroizing}; use rand_core::{RngCore, CryptoRng}; use chacha20::{ cipher::{crypto_common::KeyIvInit, StreamCipher}, Key as Cc20Key, Nonce as Cc20Iv, ChaCha20, }; use transcript::{Transcript, RecommendedTranscript}; #[cfg(test)] use ciphersuite::group::ff::Field; use ciphersuite::{group::GroupEncoding, Ciphersuite}; use multiexp::BatchVerifier; use schnorr::SchnorrSignature; use dleq::DLEqProof; use dkg::{Participant, ThresholdParams}; mod sealed { use super::*; pub trait ReadWrite: Sized { fn read(reader: &mut R, params: ThresholdParams) -> io::Result; fn write(&self, writer: &mut W) -> io::Result<()>; fn serialize(&self) -> Vec { let mut buf = vec![]; self.write(&mut buf).unwrap(); buf } } pub trait Message: Clone + PartialEq + Eq + fmt::Debug + Zeroize + ReadWrite {} impl Message for M {} pub trait Encryptable: Clone + AsRef<[u8]> + AsMut<[u8]> + Zeroize + ReadWrite {} impl + AsMut<[u8]> + Zeroize + ReadWrite> Encryptable for E {} } pub(crate) use sealed::*; /// Wraps a message with a key to use for encryption in the future. #[derive(Clone, PartialEq, Eq, Debug, Zeroize)] pub struct EncryptionKeyMessage { msg: M, enc_key: C::G, } // Doesn't impl ReadWrite so that doesn't need to be imported impl EncryptionKeyMessage { pub fn read(reader: &mut R, params: ThresholdParams) -> io::Result { Ok(Self { msg: M::read(reader, params)?, enc_key: C::read_G(reader)? }) } pub fn write(&self, writer: &mut W) -> io::Result<()> { self.msg.write(writer)?; writer.write_all(self.enc_key.to_bytes().as_ref()) } pub fn serialize(&self) -> Vec { let mut buf = vec![]; self.write(&mut buf).unwrap(); buf } #[cfg(test)] pub(crate) fn enc_key(&self) -> C::G { self.enc_key } } /// An encrypted message, with a per-message encryption key enabling revealing specific messages /// without side effects. #[derive(Clone, Zeroize)] pub struct EncryptedMessage { key: C::G, // Also include a proof-of-possession for the key. // If this proof-of-possession wasn't here, Eve could observe Alice encrypt to Bob with key X, // then send Bob a message also claiming to use X. // While Eve's message would fail to meaningfully decrypt, Bob would then use this to create a // blame argument against Eve. When they do, they'd reveal bX, revealing Alice's message to Bob. // This is a massive side effect which could break some protocols, in the worst case. // While Eve can still reuse their own keys, causing Bob to leak all messages by revealing for // any single one, that's effectively Eve revealing themselves, and not considered relevant. pop: SchnorrSignature, msg: Zeroizing, } fn ecdh(private: &Zeroizing, public: C::G) -> Zeroizing { Zeroizing::new(public * private.deref()) } // Each ecdh must be distinct. Reuse of an ecdh for multiple ciphers will cause the messages to be // leaked. fn cipher(context: [u8; 32], ecdh: &Zeroizing) -> ChaCha20 { // Ideally, we'd box this transcript with ZAlloc, yet that's only possible on nightly // TODO: https://github.com/serai-dex/serai/issues/151 let mut transcript = RecommendedTranscript::new(b"DKG Encryption v0.2"); transcript.append_message(b"context", context); transcript.domain_separate(b"encryption_key"); let mut ecdh = ecdh.to_bytes(); transcript.append_message(b"shared_key", ecdh.as_ref()); ecdh.as_mut().zeroize(); let zeroize = |buf: &mut [u8]| buf.zeroize(); let mut key = Cc20Key::default(); let mut challenge = transcript.challenge(b"key"); key.copy_from_slice(&challenge[.. 32]); zeroize(challenge.as_mut()); // Since the key is single-use, it doesn't matter what we use for the IV // The issue is key + IV reuse. If we never reuse the key, we can't have the opportunity to // reuse a nonce // Use a static IV in acknowledgement of this let mut iv = Cc20Iv::default(); // The \0 is to satisfy the length requirement (12), not to be null terminated iv.copy_from_slice(b"DKG IV v0.2\0"); // ChaCha20 has the same commentary as the transcript regarding ZAlloc // TODO: https://github.com/serai-dex/serai/issues/151 let res = ChaCha20::new(&key, &iv); zeroize(key.as_mut()); res } fn encrypt( rng: &mut R, context: [u8; 32], from: Participant, to: C::G, mut msg: Zeroizing, ) -> EncryptedMessage { /* The following code could be used to replace the requirement on an RNG here. It's just currently not an issue to require taking in an RNG here. let last = self.last_enc_key.to_bytes(); self.last_enc_key = C::hash_to_F(b"encryption_base", last.as_ref()); let key = C::hash_to_F(b"encryption_key", last.as_ref()); last.as_mut().zeroize(); */ // Generate a new key for this message, satisfying cipher's requirement of distinct keys per // message, and enabling revealing this message without revealing any others let key = Zeroizing::new(C::random_nonzero_F(rng)); cipher::(context, &ecdh::(&key, to)).apply_keystream(msg.as_mut().as_mut()); let pub_key = C::generator() * key.deref(); let nonce = Zeroizing::new(C::random_nonzero_F(rng)); let pub_nonce = C::generator() * nonce.deref(); EncryptedMessage { key: pub_key, pop: SchnorrSignature::sign( &key, nonce, pop_challenge::(context, pub_nonce, pub_key, from, msg.deref().as_ref()), ), msg, } } impl EncryptedMessage { pub fn read(reader: &mut R, params: ThresholdParams) -> io::Result { Ok(Self { key: C::read_G(reader)?, pop: SchnorrSignature::::read(reader)?, msg: Zeroizing::new(E::read(reader, params)?), }) } pub fn write(&self, writer: &mut W) -> io::Result<()> { writer.write_all(self.key.to_bytes().as_ref())?; self.pop.write(writer)?; self.msg.write(writer) } pub fn serialize(&self) -> Vec { let mut buf = vec![]; self.write(&mut buf).unwrap(); buf } #[cfg(test)] pub(crate) fn invalidate_pop(&mut self) { self.pop.s += C::F::ONE; } #[cfg(test)] pub(crate) fn invalidate_msg( &mut self, rng: &mut R, context: [u8; 32], from: Participant, ) { // Invalidate the message by specifying a new key/Schnorr PoP // This will cause all initial checks to pass, yet a decrypt to gibberish let key = Zeroizing::new(C::random_nonzero_F(rng)); let pub_key = C::generator() * key.deref(); let nonce = Zeroizing::new(C::random_nonzero_F(rng)); let pub_nonce = C::generator() * nonce.deref(); self.key = pub_key; self.pop = SchnorrSignature::sign( &key, nonce, pop_challenge::(context, pub_nonce, pub_key, from, self.msg.deref().as_ref()), ); } // Assumes the encrypted message is a secret share. #[cfg(test)] pub(crate) fn invalidate_share_serialization( &mut self, rng: &mut R, context: [u8; 32], from: Participant, to: C::G, ) { use ciphersuite::group::ff::PrimeField; let mut repr = ::Repr::default(); for b in repr.as_mut() { *b = 255; } // Tries to guarantee the above assumption. assert_eq!(repr.as_ref().len(), self.msg.as_ref().len()); // Checks that this isn't over a field where this is somehow valid assert!(!bool::from(C::F::from_repr(repr).is_some())); self.msg.as_mut().as_mut().copy_from_slice(repr.as_ref()); *self = encrypt(rng, context, from, to, self.msg.clone()); } // Assumes the encrypted message is a secret share. #[cfg(test)] pub(crate) fn invalidate_share_value( &mut self, rng: &mut R, context: [u8; 32], from: Participant, to: C::G, ) { use ciphersuite::group::ff::PrimeField; // Assumes the share isn't randomly 1 let repr = C::F::ONE.to_repr(); self.msg.as_mut().as_mut().copy_from_slice(repr.as_ref()); *self = encrypt(rng, context, from, to, self.msg.clone()); } } /// A proof that the provided encryption key is a legitimately derived shared key for some message. #[derive(Clone, PartialEq, Eq, Debug, Zeroize)] pub struct EncryptionKeyProof { key: Zeroizing, dleq: DLEqProof, } impl EncryptionKeyProof { pub fn read(reader: &mut R) -> io::Result { Ok(Self { key: Zeroizing::new(C::read_G(reader)?), dleq: DLEqProof::read(reader)? }) } pub fn write(&self, writer: &mut W) -> io::Result<()> { writer.write_all(self.key.to_bytes().as_ref())?; self.dleq.write(writer) } pub fn serialize(&self) -> Vec { let mut buf = vec![]; self.write(&mut buf).unwrap(); buf } #[cfg(test)] pub(crate) fn invalidate_key(&mut self) { *self.key += C::generator(); } #[cfg(test)] pub(crate) fn invalidate_dleq(&mut self) { let mut buf = vec![]; self.dleq.write(&mut buf).unwrap(); // Adds one to c since this is serialized c, s // Adding one to c will leave a validly serialized c // Adding one to s may leave an invalidly serialized s buf[0] = buf[0].wrapping_add(1); self.dleq = DLEqProof::read::<&[u8]>(&mut buf.as_ref()).unwrap(); } } // This doesn't need to take the msg. It just doesn't hurt as an extra layer. // This still doesn't mean the DKG offers an authenticated channel. The per-message keys have no // root of trust other than their existence in the assumed-to-exist external authenticated channel. fn pop_challenge( context: [u8; 32], nonce: C::G, key: C::G, sender: Participant, msg: &[u8], ) -> C::F { let mut transcript = RecommendedTranscript::new(b"DKG Encryption Key Proof of Possession v0.2"); transcript.append_message(b"context", context); transcript.domain_separate(b"proof_of_possession"); transcript.append_message(b"nonce", nonce.to_bytes()); transcript.append_message(b"key", key.to_bytes()); // This is sufficient to prevent the attack this is meant to stop transcript.append_message(b"sender", sender.to_bytes()); // This, as written above, doesn't hurt transcript.append_message(b"message", msg); // While this is a PoK and a PoP, it's called a PoP here since the important part is its owner // Elsewhere, where we use the term PoK, the important part is that it isn't some inverse, with // an unknown to anyone discrete log, breaking the system C::hash_to_F(b"DKG-encryption-proof_of_possession", &transcript.challenge(b"schnorr")) } fn encryption_key_transcript(context: [u8; 32]) -> RecommendedTranscript { let mut transcript = RecommendedTranscript::new(b"DKG Encryption Key Correctness Proof v0.2"); transcript.append_message(b"context", context); transcript } #[derive(Clone, Copy, PartialEq, Eq, Debug, Error)] pub(crate) enum DecryptionError { #[error("accused provided an invalid signature")] InvalidSignature, #[error("accuser provided an invalid decryption key")] InvalidProof, } // A simple box for managing decryption. #[derive(Clone, Debug)] pub(crate) struct Decryption { context: [u8; 32], enc_keys: HashMap, } impl Decryption { pub(crate) fn new(context: [u8; 32]) -> Self { Self { context, enc_keys: HashMap::new() } } pub(crate) fn register( &mut self, participant: Participant, msg: EncryptionKeyMessage, ) -> M { assert!( !self.enc_keys.contains_key(&participant), "Re-registering encryption key for a participant" ); self.enc_keys.insert(participant, msg.enc_key); msg.msg } // Given a message, and the intended decryptor, and a proof for its key, decrypt the message. // Returns None if the key was wrong. pub(crate) fn decrypt_with_proof( &self, from: Participant, decryptor: Participant, mut msg: EncryptedMessage, // There's no encryption key proof if the accusation is of an invalid signature proof: Option>, ) -> Result, DecryptionError> { if !msg.pop.verify( msg.key, pop_challenge::(self.context, msg.pop.R, msg.key, from, msg.msg.deref().as_ref()), ) { Err(DecryptionError::InvalidSignature)?; } if let Some(proof) = proof { // Verify this is the decryption key for this message proof .dleq .verify( &mut encryption_key_transcript(self.context), &[C::generator(), msg.key], &[self.enc_keys[&decryptor], *proof.key], ) .map_err(|_| DecryptionError::InvalidProof)?; cipher::(self.context, &proof.key).apply_keystream(msg.msg.as_mut().as_mut()); Ok(msg.msg) } else { Err(DecryptionError::InvalidProof) } } } // A simple box for managing encryption. #[derive(Clone)] pub(crate) struct Encryption { context: [u8; 32], i: Participant, enc_key: Zeroizing, enc_pub_key: C::G, decryption: Decryption, } impl fmt::Debug for Encryption { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt .debug_struct("Encryption") .field("context", &self.context) .field("i", &self.i) .field("enc_pub_key", &self.enc_pub_key) .field("decryption", &self.decryption) .finish_non_exhaustive() } } impl Zeroize for Encryption { fn zeroize(&mut self) { self.enc_key.zeroize(); self.enc_pub_key.zeroize(); for (_, mut value) in self.decryption.enc_keys.drain() { value.zeroize(); } } } impl Encryption { pub(crate) fn new( context: [u8; 32], i: Participant, rng: &mut R, ) -> Self { let enc_key = Zeroizing::new(C::random_nonzero_F(rng)); Self { context, i, enc_pub_key: C::generator() * enc_key.deref(), enc_key, decryption: Decryption::new(context), } } pub(crate) fn registration(&self, msg: M) -> EncryptionKeyMessage { EncryptionKeyMessage { msg, enc_key: self.enc_pub_key } } pub(crate) fn register( &mut self, participant: Participant, msg: EncryptionKeyMessage, ) -> M { self.decryption.register(participant, msg) } pub(crate) fn encrypt( &self, rng: &mut R, participant: Participant, msg: Zeroizing, ) -> EncryptedMessage { encrypt(rng, self.context, self.i, self.decryption.enc_keys[&participant], msg) } pub(crate) fn decrypt( &self, rng: &mut R, batch: &mut BatchVerifier, // Uses a distinct batch ID so if this batch verifier is reused, we know its the PoP aspect // which failed, and therefore to use None for the blame batch_id: I, from: Participant, mut msg: EncryptedMessage, ) -> (Zeroizing, EncryptionKeyProof) { msg.pop.batch_verify( rng, batch, batch_id, msg.key, pop_challenge::(self.context, msg.pop.R, msg.key, from, msg.msg.deref().as_ref()), ); let key = ecdh::(&self.enc_key, msg.key); cipher::(self.context, &key).apply_keystream(msg.msg.as_mut().as_mut()); ( msg.msg, EncryptionKeyProof { key, dleq: DLEqProof::prove( rng, &mut encryption_key_transcript(self.context), &[C::generator(), msg.key], &self.enc_key, ), }, ) } pub(crate) fn into_decryption(self) -> Decryption { self.decryption } } ================================================ FILE: crypto/dkg/pedpop/src/lib.rs ================================================ #![cfg_attr(docsrs, feature(doc_cfg))] #![doc = include_str!("../README.md")] // This crate requires `dleq` which doesn't support no-std via std-shims // #![cfg_attr(not(feature = "std"), no_std)] use core::{marker::PhantomData, ops::Deref, fmt}; use std::{ io::{self, Read, Write}, collections::HashMap, }; use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing}; use rand_core::{RngCore, CryptoRng}; use transcript::{Transcript, RecommendedTranscript}; use multiexp::{multiexp_vartime, BatchVerifier}; use ciphersuite::{ group::{ ff::{Field, PrimeField}, Group, GroupEncoding, }, Ciphersuite, }; use schnorr::SchnorrSignature; pub use dkg::*; mod encryption; pub use encryption::*; #[cfg(test)] mod tests; /// Errors possible during key generation. #[derive(Clone, PartialEq, Eq, Debug, thiserror::Error)] pub enum PedPoPError { /// An incorrect amount of participants was provided. #[error("incorrect amount of participants (expected {expected}, found {found})")] IncorrectAmountOfParticipants { expected: usize, found: usize }, /// An invalid proof of knowledge was provided. #[error("invalid proof of knowledge (participant {0})")] InvalidCommitments(Participant), /// An invalid DKG share was provided. #[error("invalid share (participant {participant}, blame {blame})")] InvalidShare { participant: Participant, blame: Option> }, /// A participant was missing. #[error("missing participant {0}")] MissingParticipant(Participant), /// An error propagated from the underlying `dkg` crate. #[error("error from dkg ({0})")] DkgError(DkgError), } // Validate a map of values to have the expected included participants fn validate_map( map: &HashMap, included: &[Participant], ours: Participant, ) -> Result<(), PedPoPError> { if (map.len() + 1) != included.len() { Err(PedPoPError::IncorrectAmountOfParticipants { expected: included.len(), found: map.len() + 1, })?; } for included in included { if *included == ours { if map.contains_key(included) { Err(PedPoPError::DkgError(DkgError::DuplicatedParticipant(*included)))?; } continue; } if !map.contains_key(included) { Err(PedPoPError::MissingParticipant(*included))?; } } Ok(()) } #[allow(non_snake_case)] fn challenge(context: [u8; 32], l: Participant, R: &[u8], Am: &[u8]) -> C::F { let mut transcript = RecommendedTranscript::new(b"DKG PedPoP v0.2"); transcript.domain_separate(b"schnorr_proof_of_knowledge"); transcript.append_message(b"context", context); transcript.append_message(b"participant", l.to_bytes()); transcript.append_message(b"nonce", R); transcript.append_message(b"commitments", Am); C::hash_to_F(b"DKG-PedPoP-proof_of_knowledge-0", &transcript.challenge(b"schnorr")) } /// The commitments message, intended to be broadcast to all other parties. /// /// Every participant should only provide one set of commitments to all parties. If any /// participant sends multiple sets of commitments, they are faulty and should be presumed /// malicious. As this library does not handle networking, it is unable to detect if any /// participant is so faulty. That responsibility lies with the caller. #[derive(Clone, PartialEq, Eq, Debug, Zeroize)] pub struct Commitments { commitments: Vec, cached_msg: Vec, sig: SchnorrSignature, } impl ReadWrite for Commitments { fn read(reader: &mut R, params: ThresholdParams) -> io::Result { let mut commitments = Vec::with_capacity(params.t().into()); let mut cached_msg = vec![]; #[allow(non_snake_case)] let mut read_G = || -> io::Result { let mut buf = ::Repr::default(); reader.read_exact(buf.as_mut())?; let point = C::read_G(&mut buf.as_ref())?; cached_msg.extend(buf.as_ref()); Ok(point) }; for _ in 0 .. params.t() { commitments.push(read_G()?); } Ok(Commitments { commitments, cached_msg, sig: SchnorrSignature::read(reader)? }) } fn write(&self, writer: &mut W) -> io::Result<()> { writer.write_all(&self.cached_msg)?; self.sig.write(writer) } } /// State machine to begin the key generation protocol. #[derive(Debug, Zeroize)] pub struct KeyGenMachine { params: ThresholdParams, context: [u8; 32], _curve: PhantomData, } impl KeyGenMachine { /// Create a new machine to generate a key. /// /// The context should be unique among multisigs. pub fn new(params: ThresholdParams, context: [u8; 32]) -> KeyGenMachine { KeyGenMachine { params, context, _curve: PhantomData } } /// Start generating a key according to the PedPoP DKG specification present in the FROST paper. /// /// Returns a commitments message to be sent to all parties over an authenticated channel. If any /// party submits multiple sets of commitments, they MUST be treated as malicious. pub fn generate_coefficients( self, rng: &mut R, ) -> (SecretShareMachine, EncryptionKeyMessage>) { let t = usize::from(self.params.t()); let mut coefficients = Vec::with_capacity(t); let mut commitments = Vec::with_capacity(t); let mut cached_msg = vec![]; for i in 0 .. t { // Step 1: Generate t random values to form a polynomial with coefficients.push(Zeroizing::new(C::random_nonzero_F(&mut *rng))); // Step 3: Generate public commitments commitments.push(C::generator() * coefficients[i].deref()); cached_msg.extend(commitments[i].to_bytes().as_ref()); } // Step 2: Provide a proof of knowledge let r = Zeroizing::new(C::random_nonzero_F(rng)); let nonce = C::generator() * r.deref(); let sig = SchnorrSignature::::sign( &coefficients[0], // This could be deterministic as the PoK is a singleton never opened up to cooperative // discussion // There's no reason to spend the time and effort to make this deterministic besides a // general obsession with canonicity and determinism though r, challenge::(self.context, self.params.i(), nonce.to_bytes().as_ref(), &cached_msg), ); // Additionally create an encryption mechanism to protect the secret shares let encryption = Encryption::new(self.context, self.params.i(), rng); // Step 4: Broadcast let msg = encryption.registration(Commitments { commitments: commitments.clone(), cached_msg, sig }); ( SecretShareMachine { params: self.params, context: self.context, coefficients, our_commitments: commitments, encryption, }, msg, ) } } fn polynomial( coefficients: &[Zeroizing], l: Participant, ) -> Zeroizing { let l = F::from(u64::from(u16::from(l))); // This should never be reached since Participant is explicitly non-zero assert!(l != F::ZERO, "zero participant passed to polynomial"); let mut share = Zeroizing::new(F::ZERO); for (idx, coefficient) in coefficients.iter().rev().enumerate() { *share += coefficient.deref(); if idx != (coefficients.len() - 1) { *share *= l; } } share } /// The secret share message, to be sent to the party it's intended for over an authenticated /// channel. /// /// If any participant sends multiple secret shares to another participant, they are faulty. // This should presumably be written as SecretShare(Zeroizing). // It's unfortunately not possible as F::Repr doesn't have Zeroize as a bound. // The encryption system also explicitly uses Zeroizing so it can ensure anything being // encrypted is within Zeroizing. Accordingly, internally having Zeroizing would be redundant. #[derive(Clone, PartialEq, Eq)] pub struct SecretShare(F::Repr); impl AsRef<[u8]> for SecretShare { fn as_ref(&self) -> &[u8] { self.0.as_ref() } } impl AsMut<[u8]> for SecretShare { fn as_mut(&mut self) -> &mut [u8] { self.0.as_mut() } } impl fmt::Debug for SecretShare { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("SecretShare").finish_non_exhaustive() } } impl Zeroize for SecretShare { fn zeroize(&mut self) { self.0.as_mut().zeroize() } } // Still manually implement ZeroizeOnDrop to ensure these don't stick around. // We could replace Zeroizing with a bound M: ZeroizeOnDrop. // Doing so would potentially fail to highlight the expected behavior with these and remove a layer // of depth. impl Drop for SecretShare { fn drop(&mut self) { self.zeroize(); } } impl ZeroizeOnDrop for SecretShare {} impl ReadWrite for SecretShare { fn read(reader: &mut R, _: ThresholdParams) -> io::Result { let mut repr = F::Repr::default(); reader.read_exact(repr.as_mut())?; Ok(SecretShare(repr)) } fn write(&self, writer: &mut W) -> io::Result<()> { writer.write_all(self.0.as_ref()) } } /// Advancement of the key generation state machine. #[derive(Zeroize)] pub struct SecretShareMachine { params: ThresholdParams, context: [u8; 32], coefficients: Vec>, our_commitments: Vec, encryption: Encryption, } impl fmt::Debug for SecretShareMachine { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt .debug_struct("SecretShareMachine") .field("params", &self.params) .field("context", &self.context) .field("our_commitments", &self.our_commitments) .field("encryption", &self.encryption) .finish_non_exhaustive() } } impl SecretShareMachine { /// Verify the data from the previous round (canonicity, PoKs, message authenticity) #[allow(clippy::type_complexity)] fn verify_r1( &mut self, rng: &mut R, mut commitment_msgs: HashMap>>, ) -> Result>, PedPoPError> { validate_map( &commitment_msgs, &self.params.all_participant_indexes().collect::>(), self.params.i(), )?; let mut batch = BatchVerifier::::new(commitment_msgs.len()); let mut commitments = HashMap::new(); for l in self.params.all_participant_indexes() { let Some(msg) = commitment_msgs.remove(&l) else { continue }; let mut msg = self.encryption.register(l, msg); if msg.commitments.len() != self.params.t().into() { Err(PedPoPError::InvalidCommitments(l))?; } // Step 5: Validate each proof of knowledge // This is solely the prep step for the latter batch verification msg.sig.batch_verify( rng, &mut batch, l, msg.commitments[0], challenge::(self.context, l, msg.sig.R.to_bytes().as_ref(), &msg.cached_msg), ); commitments.insert(l, msg.commitments.drain(..).collect::>()); } batch.verify_vartime_with_vartime_blame().map_err(PedPoPError::InvalidCommitments)?; commitments.insert(self.params.i(), self.our_commitments.drain(..).collect()); Ok(commitments) } /// Continue generating a key. /// /// Takes in everyone else's commitments. Returns a HashMap of encrypted secret shares to be sent /// over authenticated channels to their relevant counterparties. /// /// If any participant sends multiple secret shares to another participant, they are faulty. #[allow(clippy::type_complexity)] pub fn generate_secret_shares( mut self, rng: &mut R, commitments: HashMap>>, ) -> Result< (KeyMachine, HashMap>>), PedPoPError, > { let commitments = self.verify_r1(&mut *rng, commitments)?; // Step 1: Generate secret shares for all other parties let mut res = HashMap::new(); for l in self.params.all_participant_indexes() { // Don't insert our own shares to the byte buffer which is meant to be sent around // An app developer could accidentally send it. Best to keep this black boxed if l == self.params.i() { continue; } let mut share = polynomial(&self.coefficients, l); let share_bytes = Zeroizing::new(SecretShare::(share.to_repr())); share.zeroize(); res.insert(l, self.encryption.encrypt(rng, l, share_bytes)); } // Calculate our own share let share = polynomial(&self.coefficients, self.params.i()); self.coefficients.zeroize(); Ok(( KeyMachine { params: self.params, secret: share, commitments, encryption: self.encryption }, res, )) } } /// Advancement of the the secret share state machine. /// /// This machine will 'complete' the protocol, by a local perspective. In order to be secure, /// the parties must confirm having successfully completed the protocol (an effort out of scope to /// this library), yet this is modeled by one more state transition (BlameMachine). pub struct KeyMachine { params: ThresholdParams, secret: Zeroizing, commitments: HashMap>, encryption: Encryption, } impl fmt::Debug for KeyMachine { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt .debug_struct("KeyMachine") .field("params", &self.params) .field("commitments", &self.commitments) .field("encryption", &self.encryption) .finish_non_exhaustive() } } impl Zeroize for KeyMachine { fn zeroize(&mut self) { self.params.zeroize(); self.secret.zeroize(); for commitments in self.commitments.values_mut() { commitments.zeroize(); } self.encryption.zeroize(); } } // Calculate the exponent for a given participant and apply it to a series of commitments // Initially used with the actual commitments to verify the secret share, later used with // stripes to generate the verification shares fn exponential(i: Participant, values: &[C::G]) -> Vec<(C::F, C::G)> { let i = C::F::from(u16::from(i).into()); let mut res = Vec::with_capacity(values.len()); (0 .. values.len()).fold(C::F::ONE, |exp, l| { res.push((exp, values[l])); exp * i }); res } fn share_verification_statements( target: Participant, commitments: &[C::G], mut share: Zeroizing, ) -> Vec<(C::F, C::G)> { // This can be insecurely linearized from n * t to just n using the below sums for a given // stripe. Doing so uses naive addition which is subject to malleability. The only way to // ensure that malleability isn't present is to use this n * t algorithm, which runs // per sender and not as an aggregate of all senders, which also enables blame let mut values = exponential::(target, commitments); // Perform the share multiplication outside of the multiexp to minimize stack copying // While the multiexp BatchVerifier does zeroize its flattened multiexp, and itself, it still // converts whatever we give to an iterator and then builds a Vec internally, welcoming copies let neg_share_pub = C::generator() * -*share; share.zeroize(); values.push((C::F::ONE, neg_share_pub)); values } #[derive(Clone, Copy, Hash, Debug, Zeroize)] enum BatchId { Decryption(Participant), Share(Participant), } impl KeyMachine { /// Calculate our share given the shares sent to us. /// /// Returns a BlameMachine usable to determine if faults in the protocol occurred. /// /// This will error on, and return a blame proof for, the first-observed case of faulty behavior. pub fn calculate_share( mut self, rng: &mut R, mut shares: HashMap>>, ) -> Result, PedPoPError> { validate_map( &shares, &self.params.all_participant_indexes().collect::>(), self.params.i(), )?; let mut batch = BatchVerifier::new(shares.len()); let mut blames = HashMap::new(); for (l, share_bytes) in shares.drain() { let (mut share_bytes, blame) = self.encryption.decrypt(rng, &mut batch, BatchId::Decryption(l), l, share_bytes); let share = Zeroizing::new(Option::::from(C::F::from_repr(share_bytes.0)).ok_or_else(|| { PedPoPError::InvalidShare { participant: l, blame: Some(blame.clone()) } })?); share_bytes.zeroize(); *self.secret += share.deref(); blames.insert(l, blame); batch.queue( rng, BatchId::Share(l), share_verification_statements::(self.params.i(), &self.commitments[&l], share), ); } batch.verify_with_vartime_blame().map_err(|id| { let (l, blame) = match id { BatchId::Decryption(l) => (l, None), BatchId::Share(l) => (l, Some(blames.remove(&l).unwrap())), }; PedPoPError::InvalidShare { participant: l, blame } })?; // Stripe commitments per t and sum them in advance. Calculating verification shares relies on // these sums so preprocessing them is a massive speedup // If these weren't just sums, yet the tables used in multiexp, this would be further optimized // As of right now, each multiexp will regenerate them let mut stripes = Vec::with_capacity(usize::from(self.params.t())); for t in 0 .. usize::from(self.params.t()) { stripes.push(self.commitments.values().map(|commitments| commitments[t]).sum()); } // Calculate each user's verification share let mut verification_shares = HashMap::new(); for i in self.params.all_participant_indexes() { verification_shares.insert( i, if i == self.params.i() { C::generator() * self.secret.deref() } else { multiexp_vartime(&exponential::(i, &stripes)) }, ); } let KeyMachine { commitments, encryption, params, secret } = self; Ok(BlameMachine { commitments, encryption: encryption.into_decryption(), result: Some( ThresholdKeys::new(params, Interpolation::Lagrange, secret, verification_shares) .map_err(PedPoPError::DkgError)?, ), }) } } /// A machine capable of handling blame proofs. pub struct BlameMachine { commitments: HashMap>, encryption: Decryption, result: Option>, } impl fmt::Debug for BlameMachine { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt .debug_struct("BlameMachine") .field("commitments", &self.commitments) .field("encryption", &self.encryption) .finish_non_exhaustive() } } impl Zeroize for BlameMachine { fn zeroize(&mut self) { for commitments in self.commitments.values_mut() { commitments.zeroize(); } self.result.zeroize(); } } impl BlameMachine { /// Mark the protocol as having been successfully completed, returning the generated keys. /// This should only be called after having confirmed, with all participants, successful /// completion. /// /// Confirming successful completion is not necessarily as simple as everyone reporting their /// completion. Everyone must also receive everyone's report of completion, entering into the /// territory of consensus protocols. This library does not handle that nor does it provide any /// tooling to do so. This function is solely intended to force users to acknowledge they're /// completing the protocol, not processing any blame. pub fn complete(self) -> ThresholdKeys { self.result.unwrap() } fn blame_internal( &self, sender: Participant, recipient: Participant, msg: EncryptedMessage>, proof: Option>, ) -> Participant { let share_bytes = match self.encryption.decrypt_with_proof(sender, recipient, msg, proof) { Ok(share_bytes) => share_bytes, // If there's an invalid signature, the sender did not send a properly formed message Err(DecryptionError::InvalidSignature) => return sender, // Decryption will fail if the provided ECDH key wasn't correct for the given message Err(DecryptionError::InvalidProof) => return recipient, }; let Some(share) = Option::::from(C::F::from_repr(share_bytes.0)) else { // If this isn't a valid scalar, the sender is faulty return sender; }; // If this isn't a valid share, the sender is faulty if !bool::from( multiexp_vartime(&share_verification_statements::( recipient, &self.commitments[&sender], Zeroizing::new(share), )) .is_identity(), ) { return sender; } // The share was canonical and valid recipient } /// Given an accusation of fault, determine the faulty party (either the sender, who sent an /// invalid secret share, or the receiver, who claimed a valid secret share was invalid). No /// matter which, prevent completion of the machine, forcing an abort of the protocol. /// /// The message should be a copy of the encrypted secret share from the accused sender to the /// accusing recipient. This message must have been authenticated as actually having come from /// the sender in question. /// /// In order to enable detecting multiple faults, an `AdditionalBlameMachine` is returned, which /// can be used to determine further blame. These machines will process the same blame statements /// multiple times, always identifying blame. It is the caller's job to ensure they're unique in /// order to prevent multiple instances of blame over a single incident. pub fn blame( self, sender: Participant, recipient: Participant, msg: EncryptedMessage>, proof: Option>, ) -> (AdditionalBlameMachine, Participant) { let faulty = self.blame_internal(sender, recipient, msg, proof); (AdditionalBlameMachine(self), faulty) } } /// A machine capable of handling an arbitrary amount of additional blame proofs. #[derive(Debug, Zeroize)] pub struct AdditionalBlameMachine(BlameMachine); impl AdditionalBlameMachine { /// Create an AdditionalBlameMachine capable of evaluating Blame regardless of if the caller was /// a member in the DKG protocol. /// /// Takes in the parameters for the DKG protocol and all of the participant's commitment /// messages. /// /// This constructor assumes the full validity of the commitment messages. They must be fully /// authenticated as having come from the supposed party and verified as valid. Usage of invalid /// commitments is considered undefined behavior, and may cause everything from inaccurate blame /// to panics. pub fn new( context: [u8; 32], n: u16, mut commitment_msgs: HashMap>>, ) -> Result> { let mut commitments = HashMap::new(); let mut encryption = Decryption::new(context); for i in 1 ..= n { let i = Participant::new(i).unwrap(); let Some(msg) = commitment_msgs.remove(&i) else { Err(PedPoPError::MissingParticipant(i))? }; commitments.insert(i, encryption.register(i, msg).commitments); } Ok(AdditionalBlameMachine(BlameMachine { commitments, encryption, result: None })) } /// Given an accusation of fault, determine the faulty party (either the sender, who sent an /// invalid secret share, or the receiver, who claimed a valid secret share was invalid). /// /// The message should be a copy of the encrypted secret share from the accused sender to the /// accusing recipient. This message must have been authenticated as actually having come from /// the sender in question. /// /// This will process the same blame statement multiple times, always identifying blame. It is /// the caller's job to ensure they're unique in order to prevent multiple instances of blame /// over a single incident. pub fn blame( &self, sender: Participant, recipient: Participant, msg: EncryptedMessage>, proof: Option>, ) -> Participant { self.0.blame_internal(sender, recipient, msg, proof) } } ================================================ FILE: crypto/dkg/pedpop/src/tests.rs ================================================ use std::collections::HashMap; use rand_core::{RngCore, CryptoRng, OsRng}; use dalek_ff_group::Ristretto; use ciphersuite::Ciphersuite; use crate::*; const THRESHOLD: u16 = 3; const PARTICIPANTS: u16 = 5; /// Clone a map without a specific value. fn clone_without( map: &HashMap, without: &K, ) -> HashMap { let mut res = map.clone(); res.remove(without).unwrap(); res } type PedPoPEncryptedMessage = EncryptedMessage::F>>; type PedPoPSecretShares = HashMap>; const CONTEXT: [u8; 32] = *b"DKG Test Key Generation "; // Commit, then return commitment messages, enc keys, and shares #[allow(clippy::type_complexity)] fn commit_enc_keys_and_shares( rng: &mut R, ) -> ( HashMap>, HashMap>>, HashMap, HashMap>, ) { let mut machines = HashMap::new(); let mut commitments = HashMap::new(); let mut enc_keys = HashMap::new(); for i in (1 ..= PARTICIPANTS).map(|i| Participant::new(i).unwrap()) { let params = ThresholdParams::new(THRESHOLD, PARTICIPANTS, i).unwrap(); let machine = KeyGenMachine::::new(params, CONTEXT); let (machine, these_commitments) = machine.generate_coefficients(rng); machines.insert(i, machine); commitments.insert( i, EncryptionKeyMessage::read::<&[u8]>(&mut these_commitments.serialize().as_ref(), params) .unwrap(), ); enc_keys.insert(i, commitments[&i].enc_key()); } let mut secret_shares = HashMap::new(); let machines = machines .drain() .map(|(l, machine)| { let (machine, mut shares) = machine.generate_secret_shares(rng, clone_without(&commitments, &l)).unwrap(); let shares = shares .drain() .map(|(l, share)| { ( l, EncryptedMessage::read::<&[u8]>( &mut share.serialize().as_ref(), // Only t/n actually matters, so hardcode i to 1 here ThresholdParams::new(THRESHOLD, PARTICIPANTS, Participant::new(1).unwrap()).unwrap(), ) .unwrap(), ) }) .collect::>(); secret_shares.insert(l, shares); (l, machine) }) .collect::>(); (machines, commitments, enc_keys, secret_shares) } fn generate_secret_shares( shares: &HashMap>, recipient: Participant, ) -> PedPoPSecretShares { let mut our_secret_shares = HashMap::new(); for (i, shares) in shares { if recipient == *i { continue; } our_secret_shares.insert(*i, shares[&recipient].clone()); } our_secret_shares } /// Fully perform the PedPoP key generation algorithm. fn pedpop_gen( rng: &mut R, ) -> HashMap> { let (mut machines, _, _, secret_shares) = commit_enc_keys_and_shares::<_, C>(rng); let mut verification_shares = None; let mut group_key = None; machines .drain() .map(|(i, machine)| { let our_secret_shares = generate_secret_shares(&secret_shares, i); let these_keys = machine.calculate_share(rng, our_secret_shares).unwrap().complete(); // Verify the verification_shares are agreed upon if verification_shares.is_none() { verification_shares = Some( these_keys .params() .all_participant_indexes() .map(|i| (i, these_keys.original_verification_share(i))) .collect::>(), ); } assert_eq!( verification_shares.as_ref().unwrap(), &these_keys .params() .all_participant_indexes() .map(|i| (i, these_keys.original_verification_share(i))) .collect::>() ); // Verify the group keys are agreed upon if group_key.is_none() { group_key = Some(these_keys.group_key()); } assert_eq!(group_key.unwrap(), these_keys.group_key()); (i, these_keys) }) .collect::>() } const ONE: Participant = Participant::new(1).unwrap(); const TWO: Participant = Participant::new(2).unwrap(); #[test] fn test_pedpop() { let _ = core::hint::black_box(pedpop_gen::<_, Ristretto>(&mut OsRng)); } fn test_blame( commitment_msgs: &HashMap>>, machines: Vec>, msg: &PedPoPEncryptedMessage, blame: &Option>, ) { for machine in machines { let (additional, blamed) = machine.blame(ONE, TWO, msg.clone(), blame.clone()); assert_eq!(blamed, ONE); // Verify additional blame also works assert_eq!(additional.blame(ONE, TWO, msg.clone(), blame.clone()), ONE); // Verify machines constructed with AdditionalBlameMachine::new work assert_eq!( AdditionalBlameMachine::new(CONTEXT, PARTICIPANTS, commitment_msgs.clone()).unwrap().blame( ONE, TWO, msg.clone(), blame.clone() ), ONE, ); } } // TODO: Write a macro which expands to the following #[test] fn invalid_encryption_pop_blame() { let (mut machines, commitment_msgs, _, mut secret_shares) = commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng); // Mutate the PoP of the encrypted message from 1 to 2 secret_shares.get_mut(&ONE).unwrap().get_mut(&TWO).unwrap().invalidate_pop(); let mut blame = None; let machines = machines .drain() .filter_map(|(i, machine)| { let our_secret_shares = generate_secret_shares(&secret_shares, i); let machine = machine.calculate_share(&mut OsRng, our_secret_shares); if i == TWO { assert_eq!( machine.err(), Some(PedPoPError::InvalidShare { participant: ONE, blame: None }) ); // Explicitly declare we have a blame object, which happens to be None since invalid PoP // is self-explainable blame = Some(None); None } else { Some(machine.unwrap()) } }) .collect::>(); test_blame(&commitment_msgs, machines, &secret_shares[&ONE][&TWO].clone(), &blame.unwrap()); } #[test] fn invalid_ecdh_blame() { let (mut machines, commitment_msgs, _, mut secret_shares) = commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng); // Mutate the share to trigger a blame event // Mutates from 2 to 1, as 1 is expected to end up malicious for test_blame to pass // While here, 2 is malicious, this is so 1 creates the blame proof // We then malleate 1's blame proof, so 1 ends up malicious // Doesn't simply invalidate the PoP as that won't have a blame statement // By mutating the encrypted data, we do ensure a blame statement is created secret_shares .get_mut(&TWO) .unwrap() .get_mut(&ONE) .unwrap() .invalidate_msg(&mut OsRng, CONTEXT, TWO); let mut blame = None; let machines = machines .drain() .filter_map(|(i, machine)| { let our_secret_shares = generate_secret_shares(&secret_shares, i); let machine = machine.calculate_share(&mut OsRng, our_secret_shares); if i == ONE { blame = Some(match machine.err() { Some(PedPoPError::InvalidShare { participant: TWO, blame: Some(blame) }) => Some(blame), _ => panic!(), }); None } else { Some(machine.unwrap()) } }) .collect::>(); blame.as_mut().unwrap().as_mut().unwrap().invalidate_key(); test_blame(&commitment_msgs, machines, &secret_shares[&TWO][&ONE].clone(), &blame.unwrap()); } // This should be largely equivalent to the prior test #[test] fn invalid_dleq_blame() { let (mut machines, commitment_msgs, _, mut secret_shares) = commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng); secret_shares .get_mut(&TWO) .unwrap() .get_mut(&ONE) .unwrap() .invalidate_msg(&mut OsRng, CONTEXT, TWO); let mut blame = None; let machines = machines .drain() .filter_map(|(i, machine)| { let our_secret_shares = generate_secret_shares(&secret_shares, i); let machine = machine.calculate_share(&mut OsRng, our_secret_shares); if i == ONE { blame = Some(match machine.err() { Some(PedPoPError::InvalidShare { participant: TWO, blame: Some(blame) }) => Some(blame), _ => panic!(), }); None } else { Some(machine.unwrap()) } }) .collect::>(); blame.as_mut().unwrap().as_mut().unwrap().invalidate_dleq(); test_blame(&commitment_msgs, machines, &secret_shares[&TWO][&ONE].clone(), &blame.unwrap()); } #[test] fn invalid_share_serialization_blame() { let (mut machines, commitment_msgs, enc_keys, mut secret_shares) = commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng); secret_shares.get_mut(&ONE).unwrap().get_mut(&TWO).unwrap().invalidate_share_serialization( &mut OsRng, CONTEXT, ONE, enc_keys[&TWO], ); let mut blame = None; let machines = machines .drain() .filter_map(|(i, machine)| { let our_secret_shares = generate_secret_shares(&secret_shares, i); let machine = machine.calculate_share(&mut OsRng, our_secret_shares); if i == TWO { blame = Some(match machine.err() { Some(PedPoPError::InvalidShare { participant: ONE, blame: Some(blame) }) => Some(blame), _ => panic!(), }); None } else { Some(machine.unwrap()) } }) .collect::>(); test_blame(&commitment_msgs, machines, &secret_shares[&ONE][&TWO].clone(), &blame.unwrap()); } #[test] fn invalid_share_value_blame() { let (mut machines, commitment_msgs, enc_keys, mut secret_shares) = commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng); secret_shares.get_mut(&ONE).unwrap().get_mut(&TWO).unwrap().invalidate_share_value( &mut OsRng, CONTEXT, ONE, enc_keys[&TWO], ); let mut blame = None; let machines = machines .drain() .filter_map(|(i, machine)| { let our_secret_shares = generate_secret_shares(&secret_shares, i); let machine = machine.calculate_share(&mut OsRng, our_secret_shares); if i == TWO { blame = Some(match machine.err() { Some(PedPoPError::InvalidShare { participant: ONE, blame: Some(blame) }) => Some(blame), _ => panic!(), }); None } else { Some(machine.unwrap()) } }) .collect::>(); test_blame(&commitment_msgs, machines, &secret_shares[&ONE][&TWO].clone(), &blame.unwrap()); } ================================================ FILE: crypto/dkg/promote/Cargo.toml ================================================ [package] name = "dkg-promote" version = "0.6.1" description = "Promotions for keys from the dkg crate" license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dkg/promote" authors = ["Luke Parker "] keywords = ["dkg", "multisig", "threshold", "ff", "group"] edition = "2021" rust-version = "1.80" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] thiserror = { version = "2", default-features = false, features = ["std"] } rand_core = { version = "0.6", default-features = false, features = ["std"] } transcript = { package = "flexible-transcript", path = "../../transcript", version = "^0.3.2", default-features = false, features = ["std", "recommended"] } ciphersuite = { path = "../../ciphersuite", version = "^0.4.1", default-features = false, features = ["std"] } dleq = { path = "../../dleq", version = "^0.4.1", default-features = false, features = ["std", "serialize"] } dkg = { path = "../", version = "0.6.1", default-features = false, features = ["std"] } [dev-dependencies] zeroize = { version = "^1.5", default-features = false, features = ["std", "zeroize_derive"] } rand_core = { version = "0.6", default-features = false, features = ["getrandom"] } dalek-ff-group = { path = "../../dalek-ff-group" } dkg-recovery = { path = "../recovery", default-features = false, features = ["std"] } ================================================ FILE: crypto/dkg/promote/LICENSE ================================================ MIT License Copyright (c) 2021-2025 Luke Parker Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: crypto/dkg/promote/README.md ================================================ # Distributed Key Generation - Promote This crate implements 'promotions' for keys from the [`dkg`](https://docs.rs/dkg) crate. A promotion takes a set of keys and maps it to a different `Ciphersuite`. This crate was originally part of the `dkg` crate, which was [audited by Cypher Stack in March 2023]( https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf ), culminating in commit [669d2dbffc1dafb82a09d9419ea182667115df06]( https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06 ). Any subsequent changes have not undergone auditing. ================================================ FILE: crypto/dkg/promote/src/lib.rs ================================================ #![cfg_attr(docsrs, feature(doc_cfg))] #![doc = include_str!("../README.md")] // This crate requires `dleq` which doesn't support no-std via std-shims // #![cfg_attr(not(feature = "std"), no_std)] use core::{marker::PhantomData, ops::Deref}; use std::{ io::{self, Read, Write}, collections::HashMap, }; use rand_core::{RngCore, CryptoRng}; use ciphersuite::{group::GroupEncoding, Ciphersuite}; use transcript::{Transcript, RecommendedTranscript}; use dleq::DLEqProof; pub use dkg::*; #[cfg(test)] mod tests; /// Errors encountered when promoting keys. #[derive(Clone, PartialEq, Eq, Debug, thiserror::Error)] pub enum PromotionError { /// Invalid participant identifier. #[error("invalid participant (1 <= participant <= {n}, yet participant is {participant})")] InvalidParticipant { /// The total amount of participants. n: u16, /// The specified participant. participant: Participant, }, /// An incorrect amount of participants was specified. #[error("incorrect amount of participants. {t} <= amount <= {n}, yet amount is {amount}")] IncorrectAmountOfParticipants { /// The threshold required. t: u16, /// The total amount of participants. n: u16, /// The amount of participants specified. amount: usize, }, /// Participant provided an invalid proof. #[error("invalid proof {0}")] InvalidProof(Participant), } fn transcript(key: &G, i: Participant) -> RecommendedTranscript { let mut transcript = RecommendedTranscript::new(b"DKG Generator Promotion v0.2"); transcript.append_message(b"group_key", key.to_bytes()); transcript.append_message(b"participant", i.to_bytes()); transcript } /// Proof of valid promotion to another generator. #[derive(Clone, Copy)] pub struct GeneratorProof { share: C::G, proof: DLEqProof, } impl GeneratorProof { pub fn write(&self, writer: &mut W) -> io::Result<()> { writer.write_all(self.share.to_bytes().as_ref())?; self.proof.write(writer) } pub fn read(reader: &mut R) -> io::Result> { Ok(GeneratorProof { share: ::read_G(reader)?, proof: DLEqProof::read(reader)?, }) } pub fn serialize(&self) -> Vec { let mut buf = vec![]; self.write(&mut buf).unwrap(); buf } } /// Promote a set of keys from one generator to another, where the elliptic curve is the same. /// /// Since the Ciphersuite trait additionally specifies a generator, this provides an O(n) way to /// update the generator used with keys. This outperforms the key generation protocol which is /// exponential. pub struct GeneratorPromotion { base: ThresholdKeys, proof: GeneratorProof, _c2: PhantomData, } impl> GeneratorPromotion { /// Begin promoting keys from one generator to another. /// /// Returns a proof this share was properly promoted. pub fn promote( rng: &mut R, base: ThresholdKeys, ) -> (GeneratorPromotion, GeneratorProof) { // Do a DLEqProof for the new generator let proof = GeneratorProof { share: C2::generator() * base.original_secret_share().deref(), proof: DLEqProof::prove( rng, &mut transcript(&base.original_group_key(), base.params().i()), &[C1::generator(), C2::generator()], base.original_secret_share(), ), }; (GeneratorPromotion { base, proof, _c2: PhantomData:: }, proof) } /// Complete promotion by taking in the proofs from all other participants. pub fn complete( self, proofs: &HashMap>, ) -> Result, PromotionError> { let params = self.base.params(); if proofs.len() != (usize::from(params.n()) - 1) { Err(PromotionError::IncorrectAmountOfParticipants { t: params.n(), n: params.n(), amount: proofs.len() + 1, })?; } for i in proofs.keys().copied() { if u16::from(i) > params.n() { Err(PromotionError::InvalidParticipant { n: params.n(), participant: i })?; } } let mut verification_shares = HashMap::new(); verification_shares.insert(params.i(), self.proof.share); for i in 1 ..= params.n() { let i = Participant::new(i).unwrap(); if i == params.i() { continue; } let proof = proofs.get(&i).unwrap(); proof .proof .verify( &mut transcript(&self.base.original_group_key(), i), &[C1::generator(), C2::generator()], &[self.base.original_verification_share(i), proof.share], ) .map_err(|_| PromotionError::InvalidProof(i))?; verification_shares.insert(i, proof.share); } Ok( ThresholdKeys::new( params, self.base.interpolation().clone(), self.base.original_secret_share().clone(), verification_shares, ) .unwrap(), ) } } ================================================ FILE: crypto/dkg/promote/src/tests.rs ================================================ use core::marker::PhantomData; use std::collections::HashMap; use zeroize::{Zeroize, Zeroizing}; use rand_core::OsRng; use dalek_ff_group::Ristretto; use ciphersuite::{ group::{ff::Field, Group}, Ciphersuite, }; use dkg::*; use dkg_recovery::recover_key; use crate::{GeneratorPromotion, GeneratorProof}; #[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)] struct AltGenerator { _curve: PhantomData, } impl Ciphersuite for AltGenerator { type F = C::F; type G = C::G; type H = C::H; const ID: &'static [u8] = b"Alternate Ciphersuite"; fn generator() -> Self::G { C::G::generator() * ::hash_to_F(b"DKG Promotion Test", b"generator") } fn hash_to_F(dst: &[u8], data: &[u8]) -> Self::F { ::hash_to_F(dst, data) } } /// Clone a map without a specific value. pub fn clone_without( map: &HashMap, without: &K, ) -> HashMap { let mut res = map.clone(); res.remove(without).unwrap(); res } // Test promotion of threshold keys to another generator #[test] fn test_generator_promotion() { // Generate a set of `ThresholdKeys` const PARTICIPANTS: u16 = 5; let keys: [ThresholdKeys<_>; PARTICIPANTS as usize] = { let shares: [::F; PARTICIPANTS as usize] = core::array::from_fn(|_| ::F::random(&mut OsRng)); let verification_shares = (0 .. PARTICIPANTS) .map(|i| { ( Participant::new(i + 1).unwrap(), ::generator() * shares[usize::from(i)], ) }) .collect::>(); core::array::from_fn(|i| { ThresholdKeys::new( ThresholdParams::new( PARTICIPANTS, PARTICIPANTS, Participant::new(u16::try_from(i + 1).unwrap()).unwrap(), ) .unwrap(), Interpolation::Constant(vec![::F::ONE; PARTICIPANTS as usize]), Zeroizing::new(shares[i]), verification_shares.clone(), ) .unwrap() }) }; // Perform the promotion let mut promotions = HashMap::new(); let mut proofs = HashMap::new(); for keys in &keys { let i = keys.params().i(); let (promotion, proof) = GeneratorPromotion::<_, AltGenerator>::promote(&mut OsRng, keys.clone()); promotions.insert(i, promotion); proofs.insert( i, GeneratorProof::::read::<&[u8]>(&mut proof.serialize().as_ref()).unwrap(), ); } // Complete the promotion, and verify it worked let new_group_key = AltGenerator::::generator() * *recover_key(&keys).unwrap(); for (i, promoting) in promotions.drain() { let promoted = promoting.complete(&clone_without(&proofs, &i)).unwrap(); assert_eq!(keys[usize::from(u16::from(i) - 1)].params(), promoted.params()); assert_eq!( keys[usize::from(u16::from(i) - 1)].original_secret_share(), promoted.original_secret_share() ); assert_eq!(new_group_key, promoted.group_key()); for l in 0 .. PARTICIPANTS { let verification_share = promoted.original_verification_share(Participant::new(l + 1).unwrap()); assert_eq!( AltGenerator::::generator() * **keys[usize::from(l)].original_secret_share(), verification_share ); } } } ================================================ FILE: crypto/dkg/recovery/Cargo.toml ================================================ [package] name = "dkg-recovery" version = "0.6.0" description = "Recover a secret-shared key from a collection of dkg::ThresholdKeys" license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dkg/recovery" authors = ["Luke Parker "] keywords = ["dkg", "multisig", "threshold", "ff", "group"] edition = "2021" rust-version = "1.66" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] zeroize = { version = "^1.5", default-features = false } thiserror = { version = "2", default-features = false } ciphersuite = { path = "../../ciphersuite", version = "^0.4.1", default-features = false } dkg = { path = "../", version = "0.6", default-features = false } [features] std = [ "zeroize/std", "thiserror/std", "ciphersuite/std", "dkg/std", ] default = ["std"] ================================================ FILE: crypto/dkg/recovery/LICENSE ================================================ MIT License Copyright (c) 2021-2025 Luke Parker Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: crypto/dkg/recovery/README.md ================================================ # Distributed Key Generation - Recovery A utility function to recover a key from its secret shares. Keys likely SHOULD NOT ever be recovered, making this primarily intended for testing purposes. Instead, the shares of the key should be used to produce shares for the desired action, allowing using the key while never reconstructing it. Before being smashed, this crate was [audited by Cypher Stack in March 2023]( https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf ), culminating in commit [669d2dbffc1dafb82a09d9419ea182667115df06]( https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06 ). Any subsequent changes have not undergone auditing. ================================================ FILE: crypto/dkg/recovery/src/lib.rs ================================================ #![cfg_attr(docsrs, feature(doc_cfg))] #![doc = include_str!("../README.md")] #![no_std] use core::ops::{Deref, DerefMut}; extern crate alloc; use alloc::vec::Vec; use zeroize::Zeroizing; use ciphersuite::Ciphersuite; pub use dkg::*; /// Errors encountered when recovering a secret-shared key from a collection of /// `dkg::ThresholdKeys`. #[derive(Clone, PartialEq, Eq, Debug, thiserror::Error)] pub enum RecoveryError { /// No keys were provided. #[error("no keys provided")] NoKeysProvided, /// Not enough keys were provided. #[error("not enough keys provided (threshold required {required}, provided {provided})")] NotEnoughKeysProvided { required: u16, provided: usize }, /// The keys had inconsistent parameters. #[error("keys had inconsistent parameters")] InconsistentParameters, /// The keys are from distinct secret-sharing sessions or otherwise corrupt. #[error("recovery failed")] Failure, /// An error propagated from the underlying `dkg` crate. #[error("error from dkg ({0})")] DkgError(DkgError), } /// Recover a shared secret from a collection of `dkg::ThresholdKeys`. pub fn recover_key( keys: &[ThresholdKeys], ) -> Result, RecoveryError> { let included = keys.iter().map(|keys| keys.params().i()).collect::>(); let keys_len = keys.len(); let mut keys = keys.iter(); let first_keys = keys.next().ok_or(RecoveryError::NoKeysProvided)?; { let t = first_keys.params().t(); if keys_len < usize::from(t) { Err(RecoveryError::NotEnoughKeysProvided { required: t, provided: keys_len })?; } } { let first_params = ( first_keys.params().t(), first_keys.params().n(), first_keys.group_key(), first_keys.current_scalar(), first_keys.current_offset(), ); for keys in keys.clone() { let params = ( keys.params().t(), keys.params().n(), keys.group_key(), keys.current_scalar(), keys.current_offset(), ); if params != first_params { Err(RecoveryError::InconsistentParameters)?; } } } let mut res: Zeroizing<_> = first_keys.view(included.clone()).map_err(RecoveryError::DkgError)?.secret_share().clone(); for keys in keys { *res.deref_mut() += keys.view(included.clone()).map_err(RecoveryError::DkgError)?.secret_share().deref(); } if (C::generator() * res.deref()) != first_keys.group_key() { Err(RecoveryError::Failure)?; } Ok(res) } ================================================ FILE: crypto/dkg/src/lib.rs ================================================ #![cfg_attr(docsrs, feature(doc_cfg))] #![doc = include_str!("../README.md")] #![cfg_attr(not(feature = "std"), no_std)] use core::{ ops::Deref, fmt::{self, Debug}, }; #[allow(unused_imports)] use std_shims::prelude::*; use std_shims::{sync::Arc, vec, vec::Vec, collections::HashMap, io}; use zeroize::{Zeroize, Zeroizing}; use ciphersuite::{ group::{ ff::{Field, PrimeField}, GroupEncoding, }, Ciphersuite, }; /// The ID of a participant, defined as a non-zero u16. #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Zeroize)] #[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize))] pub struct Participant(u16); impl Participant { /// Create a new Participant identifier from a u16. pub const fn new(i: u16) -> Option { if i == 0 { None } else { Some(Participant(i)) } } /// Convert a Participant identifier to bytes. #[allow(clippy::wrong_self_convention)] pub const fn to_bytes(&self) -> [u8; 2] { self.0.to_le_bytes() } } impl From for u16 { fn from(participant: Participant) -> u16 { participant.0 } } impl fmt::Display for Participant { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.0) } } /// Errors encountered when working with threshold keys. #[derive(Clone, PartialEq, Eq, Debug, thiserror::Error)] pub enum DkgError { /// A parameter was zero. #[error("a parameter was 0 (threshold {t}, participants {n})")] ZeroParameter { /// The specified threshold. t: u16, /// The specified total amount of participants. n: u16, }, /// The threshold exceeded the amount of participants. #[error("invalid threshold (max {n}, got {t})")] InvalidThreshold { /// The specified threshold. t: u16, /// The specified total amount of participants. n: u16, }, /// Invalid participant identifier. #[error("invalid participant (1 <= participant <= {n}, yet participant is {participant})")] InvalidParticipant { /// The total amount of participants. n: u16, /// The specified participant. participant: Participant, }, /// An incorrect amount of participants was specified. #[error("incorrect amount of verification shares (n = {n} yet {shares} provided)")] IncorrectAmountOfVerificationShares { /// The amount of participants. n: u16, /// The amount of shares provided. shares: usize, }, /// An inapplicable method of interpolation was specified. #[error("inapplicable method of interpolation ({0})")] InapplicableInterpolation(&'static str), /// An incorrect amount of participants was specified. #[error("incorrect amount of participants. {t} <= amount <= {n}, yet amount is {amount}")] IncorrectAmountOfParticipants { /// The threshold required. t: u16, /// The total amount of participants. n: u16, /// The amount of participants specified. amount: usize, }, /// A participant was duplicated. #[error("a participant ({0}) was duplicated")] DuplicatedParticipant(Participant), /// Not participating in declared signing set. #[error("not participating in declared signing set")] NotParticipating, } // Manually implements BorshDeserialize so we can enforce it's a valid index #[cfg(feature = "borsh")] impl borsh::BorshDeserialize for Participant { fn deserialize_reader(reader: &mut R) -> io::Result { Participant::new(u16::deserialize_reader(reader)?) .ok_or_else(|| io::Error::other("invalid participant")) } } /// Parameters for a multisig. #[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)] #[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize))] pub struct ThresholdParams { /// Participants needed to sign on behalf of the group. t: u16, /// Amount of participants. n: u16, /// Index of the participant being acted for. i: Participant, } /// An iterator over all participant indexes. struct AllParticipantIndexes { i: u16, n: u16, } impl Iterator for AllParticipantIndexes { type Item = Participant; fn next(&mut self) -> Option { if self.i > self.n { None?; } let res = Participant::new(self.i).unwrap(); // If i == n == u16::MAX, we cause `i > n` by setting `n` to `0` so the iterator becomes empty if self.i == u16::MAX { self.n = 0; } else { self.i += 1; } Some(res) } } impl ThresholdParams { /// Create a new set of parameters. pub const fn new(t: u16, n: u16, i: Participant) -> Result { if (t == 0) || (n == 0) { return Err(DkgError::ZeroParameter { t, n }); } if t > n { return Err(DkgError::InvalidThreshold { t, n }); } if i.0 > n { return Err(DkgError::InvalidParticipant { n, participant: i }); } Ok(ThresholdParams { t, n, i }) } /// The threshold for a multisig with these parameters. pub const fn t(&self) -> u16 { self.t } /// The amount of participants for a multisig with these parameters. pub const fn n(&self) -> u16 { self.n } /// The participant index of the share with these parameters. pub const fn i(&self) -> Participant { self.i } /// An iterator over all participant indexes. pub fn all_participant_indexes(&self) -> impl Iterator { AllParticipantIndexes { i: 1, n: self.n } } } #[cfg(feature = "borsh")] impl borsh::BorshDeserialize for ThresholdParams { fn deserialize_reader(reader: &mut R) -> io::Result { let t = u16::deserialize_reader(reader)?; let n = u16::deserialize_reader(reader)?; let i = Participant::deserialize_reader(reader)?; ThresholdParams::new(t, n, i).map_err(|e| io::Error::other(format!("{e:?}"))) } } /// A method of interpolation. #[derive(Clone, PartialEq, Eq, Debug, Zeroize)] pub enum Interpolation { /// A list of constant coefficients, one for each of the secret key shares. /* There's no benefit to using a full linear combination here, as the additive term would have an entirely known evaluation with a fixed, public coefficient of `1`. Accordingly, the entire key can simply be offset with the additive term to achieve the same effect. */ Constant(Vec), /// Lagrange interpolation. Lagrange, } impl Interpolation { /// The interpolation factor for this participant, within this signing set. fn interpolation_factor(&self, i: Participant, included: &[Participant]) -> F { match self { Interpolation::Constant(c) => c[usize::from(u16::from(i) - 1)], Interpolation::Lagrange => { let i_f = F::from(u64::from(u16::from(i))); let mut num = F::ONE; let mut denom = F::ONE; for l in included { if i == *l { continue; } let share = F::from(u64::from(u16::from(*l))); num *= share; denom *= share - i_f; } // Safe as this will only be 0 if we're part of the above loop // (which we have an if case to avoid) num * denom.invert().unwrap() } } } } /// A key share for a thresholdized secret key. /// /// This is the 'core' structure containing all relevant data, expected to be wrapped into an /// heap-allocated pointer to minimize copies on the stack (`ThresholdKeys`, the publicly exposed /// type). #[derive(Clone, PartialEq, Eq)] struct ThresholdCore { params: ThresholdParams, group_key: C::G, verification_shares: HashMap, interpolation: Interpolation, secret_share: Zeroizing, } impl fmt::Debug for ThresholdCore { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt .debug_struct("ThresholdCore") .field("params", &self.params) .field("group_key", &self.group_key) .field("verification_shares", &self.verification_shares) .field("interpolation", &self.interpolation) .finish_non_exhaustive() } } impl Zeroize for ThresholdCore { fn zeroize(&mut self) { self.params.zeroize(); self.group_key.zeroize(); for share in self.verification_shares.values_mut() { share.zeroize(); } self.interpolation.zeroize(); self.secret_share.zeroize(); } } /// Threshold keys usable for signing. #[derive(Clone, Debug, Zeroize)] pub struct ThresholdKeys { // Core keys. #[zeroize(skip)] core: Arc>>, // Scalar applied to these keys. scalar: C::F, // Offset applied to these keys. offset: C::F, } /// View of keys, interpolated and with the expected linear combination taken for usage. #[derive(Clone)] pub struct ThresholdView { interpolation: Interpolation, scalar: C::F, offset: C::F, group_key: C::G, included: Vec, secret_share: Zeroizing, original_verification_shares: HashMap, verification_shares: HashMap, } impl fmt::Debug for ThresholdView { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt .debug_struct("ThresholdView") .field("interpolation", &self.interpolation) .field("scalar", &self.scalar) .field("offset", &self.offset) .field("group_key", &self.group_key) .field("included", &self.included) .field("original_verification_shares", &self.original_verification_shares) .field("verification_shares", &self.verification_shares) .finish_non_exhaustive() } } impl Zeroize for ThresholdView { fn zeroize(&mut self) { self.scalar.zeroize(); self.offset.zeroize(); self.group_key.zeroize(); self.included.zeroize(); self.secret_share.zeroize(); for share in self.original_verification_shares.values_mut() { share.zeroize(); } for share in self.verification_shares.values_mut() { share.zeroize(); } } } impl ThresholdKeys { /// Create a new set of ThresholdKeys. pub fn new( params: ThresholdParams, interpolation: Interpolation, secret_share: Zeroizing, verification_shares: HashMap, ) -> Result, DkgError> { if verification_shares.len() != usize::from(params.n()) { Err(DkgError::IncorrectAmountOfVerificationShares { n: params.n(), shares: verification_shares.len(), })?; } for participant in verification_shares.keys().copied() { if u16::from(participant) > params.n() { Err(DkgError::InvalidParticipant { n: params.n(), participant })?; } } match &interpolation { Interpolation::Constant(_) => { if params.t() != params.n() { Err(DkgError::InapplicableInterpolation("constant interpolation for keys where t != n"))?; } } Interpolation::Lagrange => {} } let t = (1 ..= params.t()).map(Participant).collect::>(); let group_key = t.iter().map(|i| verification_shares[i] * interpolation.interpolation_factor(*i, &t)).sum(); Ok(ThresholdKeys { core: Arc::new(Zeroizing::new(ThresholdCore { params, interpolation, secret_share, group_key, verification_shares, })), scalar: C::F::ONE, offset: C::F::ZERO, }) } /// Scale the keys by a given scalar to allow for various account and privacy schemes. /// /// This scalar is ephemeral and will not be included when these keys are serialized. The /// scalar is applied on top of any already-existing scalar/offset. /// /// Returns `None` if the scalar is equal to `0`. #[must_use] pub fn scale(mut self, scalar: C::F) -> Option> { if bool::from(scalar.is_zero()) { None?; } self.scalar *= scalar; self.offset *= scalar; Some(self) } /// Offset the keys by a given scalar to allow for various account and privacy schemes. /// /// This offset is ephemeral and will not be included when these keys are serialized. The /// offset is applied on top of any already-existing scalar/offset. #[must_use] pub fn offset(mut self, offset: C::F) -> ThresholdKeys { self.offset += offset; self } /// Return the current scalar in-use for these keys. pub fn current_scalar(&self) -> C::F { self.scalar } /// Return the current offset in-use for these keys. pub fn current_offset(&self) -> C::F { self.offset } /// Return the parameters for these keys. pub fn params(&self) -> ThresholdParams { self.core.params } /// Return the original group key, without any tweaks applied. pub fn original_group_key(&self) -> C::G { self.core.group_key } /// Return the interpolation method for these keys. pub fn interpolation(&self) -> &Interpolation { &self.core.interpolation } /// Return the group key, with the expected linear combination taken. pub fn group_key(&self) -> C::G { (self.core.group_key * self.scalar) + (C::generator() * self.offset) } /// Return the underlying secret share for these keys, without any tweaks applied. pub fn original_secret_share(&self) -> &Zeroizing { &self.core.secret_share } /// Return the original (untweaked) verification share for the specified participant. /// /// This will panic if the participant index is invalid for these keys. pub fn original_verification_share(&self, l: Participant) -> C::G { self.core.verification_shares[&l] } /// Obtain a view of these keys, interpolated for the specified signing set, with the specified /// linear combination taken. pub fn view(&self, mut included: Vec) -> Result, DkgError> { if (included.len() < self.params().t.into()) || (usize::from(self.params().n()) < included.len()) { Err(DkgError::IncorrectAmountOfParticipants { t: self.params().t, n: self.params().n, amount: included.len(), })?; } included.sort(); { let mut found = included[0] == self.params().i(); for i in 1 .. included.len() { if included[i - 1] == included[i] { Err(DkgError::DuplicatedParticipant(included[i]))?; } found |= included[i] == self.params().i(); } if !found { Err(DkgError::NotParticipating)?; } } { let last = *included.last().unwrap(); if u16::from(last) > self.params().n() { Err(DkgError::InvalidParticipant { n: self.params().n(), participant: last })?; } } // The interpolation occurs multiplicatively, letting us scale by the scalar now let secret_share_scaled = Zeroizing::new(self.scalar * self.original_secret_share().deref()); let mut secret_share = Zeroizing::new( self.core.interpolation.interpolation_factor(self.params().i(), &included) * secret_share_scaled.deref(), ); let mut verification_shares = HashMap::with_capacity(included.len()); for i in &included { let verification_share = self.core.verification_shares[i]; let verification_share = verification_share * self.scalar * self.core.interpolation.interpolation_factor(*i, &included); verification_shares.insert(*i, verification_share); } /* The offset is included by adding it to the participant with the lowest ID. This is done after interpolating to ensure, regardless of the method of interpolation, that the method of interpolation does not scale the offset. For Lagrange interpolation, we could add the offset to every key share before interpolating, yet for Constant interpolation, we _have_ to add it as we do here (which also works even when we intend to perform Lagrange interpolation). */ if included[0] == self.params().i() { *secret_share += self.offset; } *verification_shares.get_mut(&included[0]).unwrap() += C::generator() * self.offset; Ok(ThresholdView { interpolation: self.core.interpolation.clone(), scalar: self.scalar, offset: self.offset, group_key: self.group_key(), secret_share, original_verification_shares: self.core.verification_shares.clone(), verification_shares, included, }) } /// Write these keys to a type satisfying `std::io::Write`. /// /// This will not include the ephemeral scalar/offset. pub fn write(&self, writer: &mut W) -> io::Result<()> { writer.write_all(&u32::try_from(C::ID.len()).unwrap().to_le_bytes())?; writer.write_all(C::ID)?; writer.write_all(&self.core.params.t.to_le_bytes())?; writer.write_all(&self.core.params.n.to_le_bytes())?; writer.write_all(&self.core.params.i.to_bytes())?; match &self.core.interpolation { Interpolation::Constant(c) => { writer.write_all(&[0])?; for c in c { writer.write_all(c.to_repr().as_ref())?; } } Interpolation::Lagrange => writer.write_all(&[1])?, }; let mut share_bytes = self.core.secret_share.to_repr(); writer.write_all(share_bytes.as_ref())?; share_bytes.as_mut().zeroize(); for l in 1 ..= self.core.params.n { writer.write_all( self.core.verification_shares[&Participant::new(l).unwrap()].to_bytes().as_ref(), )?; } Ok(()) } /// Serialize these keys to a `Vec`. /// /// This will not include the ephemeral scalar/offset. pub fn serialize(&self) -> Zeroizing> { let mut serialized = Zeroizing::new(vec![]); self.write::>(serialized.as_mut()).unwrap(); serialized } /// Read keys from a type satisfying `std::io::Read`. pub fn read(reader: &mut R) -> io::Result> { { let different = || io::Error::other("deserializing ThresholdKeys for another curve"); let mut id_len = [0; 4]; reader.read_exact(&mut id_len)?; if u32::try_from(C::ID.len()).unwrap().to_le_bytes() != id_len { Err(different())?; } let mut id = vec![0; C::ID.len()]; reader.read_exact(&mut id)?; if id != C::ID { Err(different())?; } } let (t, n, i) = { let mut read_u16 = || -> io::Result { let mut value = [0; 2]; reader.read_exact(&mut value)?; Ok(u16::from_le_bytes(value)) }; ( read_u16()?, read_u16()?, Participant::new(read_u16()?).ok_or(io::Error::other("invalid participant index"))?, ) }; let mut interpolation = [0]; reader.read_exact(&mut interpolation)?; let interpolation = match interpolation[0] { 0 => Interpolation::Constant({ let mut res = Vec::with_capacity(usize::from(n)); for _ in 0 .. n { res.push(C::read_F(reader)?); } res }), 1 => Interpolation::Lagrange, _ => Err(io::Error::other("invalid interpolation method"))?, }; let secret_share = Zeroizing::new(C::read_F(reader)?); let mut verification_shares = HashMap::new(); for l in (1 ..= n).map(Participant) { verification_shares.insert(l, ::read_G(reader)?); } ThresholdKeys::new( ThresholdParams::new(t, n, i).map_err(io::Error::other)?, interpolation, secret_share, verification_shares, ) .map_err(io::Error::other) } } impl ThresholdView { /// Return the scalar applied to this view. pub fn scalar(&self) -> C::F { self.scalar } /// Return the offset applied to this view. pub fn offset(&self) -> C::F { self.offset } /// Return the group key. pub fn group_key(&self) -> C::G { self.group_key } /// Return the included signers. pub fn included(&self) -> &[Participant] { &self.included } /// Return the interpolation factor for a signer. pub fn interpolation_factor(&self, participant: Participant) -> Option { if !self.included.contains(&participant) { None? } Some(self.interpolation.interpolation_factor(participant, &self.included)) } /// Return the interpolated secret share, with the expected linear combination taken. pub fn secret_share(&self) -> &Zeroizing { &self.secret_share } /// Return the original (untweaked) verification share for the specified participant. /// /// This will panic if the participant index is invalid for these keys. pub fn original_verification_share(&self, l: Participant) -> C::G { self.original_verification_shares[&l] } /// Return the interpolated verification share, with the expected linear combination taken, /// for the specified participant. /// /// This will panic if the participant was not included in the signing set. pub fn verification_share(&self, l: Participant) -> C::G { self.verification_shares[&l] } } ================================================ FILE: crypto/dleq/Cargo.toml ================================================ [package] name = "dleq" version = "0.4.1" description = "Implementation of single and cross-curve Discrete Log Equality proofs" license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dleq" authors = ["Luke Parker "] edition = "2021" rust-version = "1.79" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] rustversion = "1" thiserror = { version = "2", default-features = false, optional = true } rand_core = { version = "0.6", default-features = false } zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] } digest = { version = "0.10", default-features = false } transcript = { package = "flexible-transcript", path = "../transcript", version = "^0.3.2", default-features = false } ff = { version = "0.13", default-features = false } group = { version = "0.13", default-features = false } multiexp = { path = "../multiexp", version = "0.4", default-features = false, features = ["batch"], optional = true } [dev-dependencies] hex-literal = "0.4" rand_core = { version = "0.6", features = ["getrandom"] } blake2 = "0.10" k256 = { version = "^0.13.1", default-features = false, features = ["std", "arithmetic", "bits"] } dalek-ff-group = { path = "../dalek-ff-group" } transcript = { package = "flexible-transcript", path = "../transcript", features = ["recommended"] } [features] std = ["thiserror?/std", "rand_core/std", "zeroize/std", "digest/std", "transcript/std", "ff/std", "multiexp?/std"] serialize = ["std"] # Needed for cross-group DLEqs secure_capacity_difference = [] experimental = ["std", "thiserror", "multiexp"] default = [ "std", # Only applies to experimental, yet is default to ensure security # experimental doesn't mandate it itself in case two curves with extreme # capacity differences are desired to be used together, in which case the user # must specify experimental without default features "secure_capacity_difference" ] ================================================ FILE: crypto/dleq/LICENSE ================================================ MIT License Copyright (c) 2020-2023 Luke Parker, Lee Bousfield Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: crypto/dleq/README.md ================================================ # Discrete Log Equality Implementation of discrete log equality proofs for curves implementing `ff`/`group`. There is also a highly experimental cross-group DLEq proof, under the `experimental` feature, which has no formal proofs available yet is available here regardless. This library, except for the `experimental` feature, was [audited by Cypher Stack in March 2023](https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf), culminating in commit [669d2dbffc1dafb82a09d9419ea182667115df06](https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06). Any subsequent changes have not undergone auditing. ### Cross-Group DLEq The present cross-group DLEq is based off [MRL-0010](https://web.getmonero.org/resources/research-lab/pubs/MRL-0010.pdf), which isn't computationally correct as while it proves both keys have the same discrete logarithm for their `G'`/`H'` component, it doesn't prove a lack of a `G`/`H` component. Accordingly, it was augmented with a pair of Schnorr Proof of Knowledges, proving a known `G'`/`H'` component, guaranteeing a lack of a `G`/`H` component (assuming an unknown relation between `G`/`H` and `G'`/`H'`). The challenges for the ring signatures were also merged, removing one-element from each bit's proof with only a slight reduction to challenge security (as instead of being uniform over each scalar field, they're uniform over the mutual bit capacity of each scalar field). This reduction is identical to the one applied to the proved-for scalar, and accordingly should not reduce overall security. It does create a lack of domain separation, yet that shouldn't be an issue. The following variants are available: - `ClassicLinear`. This is only for reference purposes, being the above described proof, with no further optimizations. - `ConciseLinear`. This proves for 2 bits at a time, not increasing the signature size for both bits yet decreasing the amount of commitments/challenges in total. - `EfficientLinear`. This provides ring signatures in the form `((R_G, R_H), s)`, instead of `(e, s)`, and accordingly enables a batch verification of their final step. It is the most performant, and also the largest, option. - `CompromiseLinear`. This provides signatures in the form `((R_G, R_H), s)` AND proves for 2-bits at a time. While this increases the amount of steps in verifying the ring signatures, which aren't batch verified, and decreases the amount of items batched (an operation which grows in efficiency with quantity), it strikes a balance between speed and size. The following numbers are from benchmarks performed with k256/curve25519_dalek on a Intel i7-118567: | Algorithm | Size | Verification Time | |--------------------|-------------------------|-------------------| | `ClassicLinear` | 56829 bytes (+27%) | 157ms (0%) | | `ConciseLinear` | 44607 bytes (Reference) | 156ms (Reference) | | `EfficientLinear` | 65145 bytes (+46%) | 122ms (-22%) | | `CompromiseLinear` | 48765 bytes (+9%) | 137ms (-12%) | `CompromiseLinear` is the best choice by only being marginally sub-optimal regarding size, yet still achieving most of the desired performance improvements. That said, neither the original postulation (which had flaws) nor any construction here has been proven nor audited. Accordingly, they are solely experimental, and none are recommended. All proofs are suffixed "Linear" in the hope a logarithmic proof makes itself available, which would likely immediately become the most efficient option. ================================================ FILE: crypto/dleq/src/cross_group/aos.rs ================================================ use rand_core::{RngCore, CryptoRng}; use zeroize::Zeroize; use transcript::Transcript; use group::{ ff::{Field, PrimeFieldBits}, prime::PrimeGroup, }; use multiexp::BatchVerifier; use crate::cross_group::{ Generators, DLEqError, scalar::{scalar_convert, mutual_scalar_from_bytes}, }; #[cfg(feature = "serialize")] use std::io::{Read, Write}; #[cfg(feature = "serialize")] use ff::PrimeField; #[cfg(feature = "serialize")] use crate::{read_scalar, cross_group::read_point}; #[allow(non_camel_case_types)] #[derive(Clone, PartialEq, Eq, Debug)] pub(crate) enum Re { R(G0, G1), // Merged challenges have a slight security reduction, yet one already applied to the scalar // being proven for, and this saves ~8kb. Alternatively, challenges could be redefined as a seed, // present here, which is then hashed for each of the two challenges, remaining unbiased/unique // while maintaining the bandwidth savings, yet also while adding 252 hashes for // Secp256k1/Ed25519 e(G0::Scalar), } impl Re { #[allow(non_snake_case)] pub(crate) fn R_default() -> Re { Re::R(G0::identity(), G1::identity()) } pub(crate) fn e_default() -> Re { Re::e(G0::Scalar::ZERO) } } #[allow(non_snake_case)] #[derive(Clone, PartialEq, Eq, Debug)] pub(crate) struct Aos { Re_0: Re, s: [(G0::Scalar, G1::Scalar); RING_LEN], } impl< G0: PrimeGroup + Zeroize, G1: PrimeGroup + Zeroize, const RING_LEN: usize, > Aos { #[allow(non_snake_case)] fn nonces(mut transcript: T, nonces: (G0, G1)) -> (G0::Scalar, G1::Scalar) { transcript.domain_separate(b"aos_membership_proof"); transcript.append_message(b"ring_len", u8::try_from(RING_LEN).unwrap().to_le_bytes()); transcript.append_message(b"nonce_0", nonces.0.to_bytes()); transcript.append_message(b"nonce_1", nonces.1.to_bytes()); mutual_scalar_from_bytes(transcript.challenge(b"challenge").as_ref()) } #[allow(non_snake_case)] fn R( generators: (Generators, Generators), s: (G0::Scalar, G1::Scalar), A: (G0, G1), e: (G0::Scalar, G1::Scalar), ) -> (G0, G1) { (((generators.0.alt * s.0) - (A.0 * e.0)), ((generators.1.alt * s.1) - (A.1 * e.1))) } #[allow(non_snake_case, clippy::type_complexity)] fn R_batch( generators: (Generators, Generators), s: (G0::Scalar, G1::Scalar), A: (G0, G1), e: (G0::Scalar, G1::Scalar), ) -> (Vec<(G0::Scalar, G0)>, Vec<(G1::Scalar, G1)>) { (vec![(-s.0, generators.0.alt), (e.0, A.0)], vec![(-s.1, generators.1.alt), (e.1, A.1)]) } #[allow(non_snake_case)] fn R_nonces( transcript: T, generators: (Generators, Generators), s: (G0::Scalar, G1::Scalar), A: (G0, G1), e: (G0::Scalar, G1::Scalar), ) -> (G0::Scalar, G1::Scalar) { Self::nonces(transcript, Self::R(generators, s, A, e)) } #[allow(non_snake_case)] pub(crate) fn prove( rng: &mut R, transcript: &T, generators: (Generators, Generators), ring: &[(G0, G1)], mut actual: usize, blinding_key: &mut (G0::Scalar, G1::Scalar), mut Re_0: Re, ) -> Self { // While it is possible to use larger values, it's not efficient to do so // 2 + 2 == 2^2, yet 2 + 2 + 2 < 2^3 debug_assert!((RING_LEN == 2) || (RING_LEN == 4)); debug_assert_eq!(RING_LEN, ring.len()); let mut s = [(G0::Scalar::ZERO, G1::Scalar::ZERO); RING_LEN]; let mut r = (G0::Scalar::random(&mut *rng), G1::Scalar::random(&mut *rng)); #[allow(non_snake_case)] let original_R = (generators.0.alt * r.0, generators.1.alt * r.1); #[allow(non_snake_case)] let mut R = original_R; for i in ((actual + 1) ..= (actual + RING_LEN)).map(|i| i % RING_LEN) { let e = Self::nonces(transcript.clone(), R); if i == 0 { match Re_0 { Re::R(ref mut R0_0, ref mut R1_0) => { *R0_0 = R.0; *R1_0 = R.1 } Re::e(ref mut e_0) => *e_0 = e.0, } } // Solve for the real index if i == actual { s[i] = (r.0 + (e.0 * blinding_key.0), r.1 + (e.1 * blinding_key.1)); debug_assert_eq!(Self::R(generators, s[i], ring[actual], e), original_R); actual.zeroize(); blinding_key.0.zeroize(); blinding_key.1.zeroize(); r.0.zeroize(); r.1.zeroize(); break; } // Generate a decoy response s[i] = (G0::Scalar::random(&mut *rng), G1::Scalar::random(&mut *rng)); R = Self::R(generators, s[i], ring[i], e); } Aos { Re_0, s } } // Assumes the ring has already been transcripted in some form. Critically insecure if it hasn't pub(crate) fn verify( &self, rng: &mut R, transcript: &T, generators: (Generators, Generators), batch: &mut (BatchVerifier<(), G0>, BatchVerifier<(), G1>), ring: &[(G0, G1)], ) -> Result<(), DLEqError> { debug_assert!((RING_LEN == 2) || (RING_LEN == 4)); debug_assert_eq!(RING_LEN, ring.len()); #[allow(non_snake_case)] match self.Re_0 { Re::R(R0_0, R1_0) => { let mut e = Self::nonces(transcript.clone(), (R0_0, R1_0)); #[allow(clippy::needless_range_loop)] for i in 0 .. (RING_LEN - 1) { e = Self::R_nonces(transcript.clone(), generators, self.s[i], ring[i], e); } let mut statements = Self::R_batch(generators, *self.s.last().unwrap(), *ring.last().unwrap(), e); statements.0.push((G0::Scalar::ONE, R0_0)); statements.1.push((G1::Scalar::ONE, R1_0)); batch.0.queue(&mut *rng, (), statements.0); batch.1.queue(&mut *rng, (), statements.1); } Re::e(e_0) => { let e_0 = (e_0, scalar_convert(e_0).ok_or(DLEqError::InvalidChallenge)?); let mut e = None; #[allow(clippy::needless_range_loop)] for i in 0 .. RING_LEN { e = Some(Self::R_nonces( transcript.clone(), generators, self.s[i], ring[i], e.unwrap_or(e_0), )); } // Will panic if the above loop is never run somehow // If e wasn't an Option, and instead initially set to e_0, it'd always pass if e_0 != e.unwrap() { Err(DLEqError::InvalidProof)?; } } } Ok(()) } #[cfg(feature = "serialize")] pub(crate) fn write(&self, w: &mut W) -> std::io::Result<()> { #[allow(non_snake_case)] match self.Re_0 { Re::R(R0, R1) => { w.write_all(R0.to_bytes().as_ref())?; w.write_all(R1.to_bytes().as_ref())?; } Re::e(e) => w.write_all(e.to_repr().as_ref())?, } for i in 0 .. RING_LEN { w.write_all(self.s[i].0.to_repr().as_ref())?; w.write_all(self.s[i].1.to_repr().as_ref())?; } Ok(()) } #[allow(non_snake_case)] #[cfg(feature = "serialize")] pub(crate) fn read(r: &mut R, mut Re_0: Re) -> std::io::Result { match Re_0 { Re::R(ref mut R0, ref mut R1) => { *R0 = read_point(r)?; *R1 = read_point(r)? } Re::e(ref mut e) => *e = read_scalar(r)?, } let mut s = [(G0::Scalar::ZERO, G1::Scalar::ZERO); RING_LEN]; for s in &mut s { *s = (read_scalar(r)?, read_scalar(r)?); } Ok(Aos { Re_0, s }) } } ================================================ FILE: crypto/dleq/src/cross_group/bits.rs ================================================ use rand_core::{RngCore, CryptoRng}; use zeroize::Zeroize; use transcript::Transcript; use group::{ff::PrimeFieldBits, prime::PrimeGroup}; use multiexp::BatchVerifier; use crate::cross_group::{ Generators, DLEqError, aos::{Re, Aos}, }; #[cfg(feature = "serialize")] use std::io::{Read, Write}; #[cfg(feature = "serialize")] use crate::cross_group::read_point; #[allow(clippy::enum_variant_names)] pub(crate) enum BitSignature { ClassicLinear, ConciseLinear, EfficientLinear, CompromiseLinear, } impl BitSignature { pub(crate) const fn to_u8(&self) -> u8 { match self { BitSignature::ClassicLinear => 0, BitSignature::ConciseLinear => 1, BitSignature::EfficientLinear => 2, BitSignature::CompromiseLinear => 3, } } pub(crate) const fn from(algorithm: u8) -> BitSignature { match algorithm { 0 => BitSignature::ClassicLinear, 1 => BitSignature::ConciseLinear, 2 => BitSignature::EfficientLinear, 3 => BitSignature::CompromiseLinear, _ => panic!("Unknown algorithm"), } } pub(crate) const fn bits(&self) -> u8 { match self { BitSignature::ClassicLinear | BitSignature::EfficientLinear => 1, BitSignature::ConciseLinear | BitSignature::CompromiseLinear => 2, } } pub(crate) const fn ring_len(&self) -> usize { 2_usize.pow(self.bits() as u32) } fn aos_form(&self) -> Re { match self { BitSignature::ClassicLinear | BitSignature::ConciseLinear => Re::e_default(), BitSignature::EfficientLinear | BitSignature::CompromiseLinear => Re::R_default(), } } } #[derive(Clone, PartialEq, Eq, Debug)] pub(crate) struct Bits< G0: PrimeGroup + Zeroize, G1: PrimeGroup + Zeroize, const SIGNATURE: u8, const RING_LEN: usize, > { pub(crate) commitments: (G0, G1), signature: Aos, } impl< G0: PrimeGroup + Zeroize, G1: PrimeGroup + Zeroize, const SIGNATURE: u8, const RING_LEN: usize, > Bits { fn transcript(transcript: &mut T, i: usize, commitments: (G0, G1)) { transcript.domain_separate(b"bits"); transcript.append_message(b"group", u16::try_from(i).unwrap().to_le_bytes()); transcript.append_message(b"commitment_0", commitments.0.to_bytes()); transcript.append_message(b"commitment_1", commitments.1.to_bytes()); } fn ring(pow_2: (G0, G1), commitments: (G0, G1)) -> Vec<(G0, G1)> { let mut res = vec![commitments; RING_LEN]; for i in 1 .. RING_LEN { res[i] = (res[i - 1].0 - pow_2.0, res[i - 1].1 - pow_2.1); } res } fn shift(pow_2: &mut (G0, G1)) { for _ in 0 .. BitSignature::from(SIGNATURE).bits() { pow_2.0 = pow_2.0.double(); pow_2.1 = pow_2.1.double(); } } pub(crate) fn prove( rng: &mut R, transcript: &mut T, generators: (Generators, Generators), i: usize, pow_2: &mut (G0, G1), mut bits: u8, blinding_key: &mut (G0::Scalar, G1::Scalar), ) -> Self { let mut commitments = ((generators.0.alt * blinding_key.0), (generators.1.alt * blinding_key.1)); commitments.0 += pow_2.0 * G0::Scalar::from(bits.into()); commitments.1 += pow_2.1 * G1::Scalar::from(bits.into()); Self::transcript(transcript, i, commitments); let signature = Aos::prove( rng, transcript, generators, &Self::ring(*pow_2, commitments), usize::from(bits), blinding_key, BitSignature::from(SIGNATURE).aos_form(), ); bits.zeroize(); Self::shift(pow_2); Bits { commitments, signature } } pub(crate) fn verify( &self, rng: &mut R, transcript: &mut T, generators: (Generators, Generators), batch: &mut (BatchVerifier<(), G0>, BatchVerifier<(), G1>), i: usize, pow_2: &mut (G0, G1), ) -> Result<(), DLEqError> { Self::transcript(transcript, i, self.commitments); self.signature.verify( rng, transcript, generators, batch, &Self::ring(*pow_2, self.commitments), )?; Self::shift(pow_2); Ok(()) } #[cfg(feature = "serialize")] pub(crate) fn write(&self, w: &mut W) -> std::io::Result<()> { w.write_all(self.commitments.0.to_bytes().as_ref())?; w.write_all(self.commitments.1.to_bytes().as_ref())?; self.signature.write(w) } #[cfg(feature = "serialize")] pub(crate) fn read(r: &mut R) -> std::io::Result { Ok(Bits { commitments: (read_point(r)?, read_point(r)?), signature: Aos::read(r, BitSignature::from(SIGNATURE).aos_form())?, }) } } ================================================ FILE: crypto/dleq/src/cross_group/mod.rs ================================================ use core::ops::{Deref, DerefMut}; #[cfg(feature = "serialize")] use std::io::{self, Read, Write}; use thiserror::Error; use rand_core::{RngCore, CryptoRng}; use zeroize::{Zeroize, Zeroizing}; use digest::{Digest, HashMarker}; use transcript::Transcript; use group::{ ff::{Field, PrimeField, PrimeFieldBits}, prime::PrimeGroup, }; use multiexp::BatchVerifier; /// Scalar utilities. pub mod scalar; use scalar::{scalar_convert, mutual_scalar_from_bytes}; pub(crate) mod schnorr; use self::schnorr::SchnorrPoK; pub(crate) mod aos; mod bits; use bits::{BitSignature, Bits}; // Use black_box when possible #[rustversion::since(1.66)] use core::hint::black_box; #[rustversion::before(1.66)] fn black_box(val: T) -> T { val } fn u8_from_bool(bit_ref: &mut bool) -> u8 { let bit_ref = black_box(bit_ref); let mut bit = black_box(*bit_ref); #[allow(clippy::cast_lossless)] let res = black_box(bit as u8); bit.zeroize(); debug_assert!((res | 1) == 1); bit_ref.zeroize(); res } #[cfg(feature = "serialize")] pub(crate) fn read_point(r: &mut R) -> io::Result { let mut repr = G::Repr::default(); r.read_exact(repr.as_mut())?; let point = G::from_bytes(&repr); let Some(point) = Option::::from(point) else { Err(io::Error::other("invalid point"))? }; if point.to_bytes().as_ref() != repr.as_ref() { Err(io::Error::other("non-canonical point"))?; } Ok(point) } /// A pair of generators, one committing to values (primary), one blinding (alt), for an elliptic /// curve. #[derive(Clone, Copy, PartialEq, Eq)] pub struct Generators { /// The generator used to commit to values. /// /// This should likely be the curve's traditional 'basepoint'. pub primary: G, /// The generator used to blind values. This must be distinct from the primary generator. pub alt: G, } impl Generators { /// Create a new set of generators. pub fn new(primary: G, alt: G) -> Option> { if primary == alt { None?; } Some(Generators { primary, alt }) } fn transcript(&self, transcript: &mut T) { transcript.domain_separate(b"generators"); transcript.append_message(b"primary", self.primary.to_bytes()); transcript.append_message(b"alternate", self.alt.to_bytes()); } } /// Error for cross-group DLEq proofs. #[derive(Clone, Copy, PartialEq, Eq, Debug, Error)] pub enum DLEqError { /// Invalid proof length. #[error("invalid proof length")] InvalidProofLength, /// Invalid challenge. #[error("invalid challenge")] InvalidChallenge, /// Invalid proof. #[error("invalid proof")] InvalidProof, } // This should never be directly instantiated and uses a u8 to represent internal values // Any external usage is likely invalid #[doc(hidden)] // Debug would be such a dump of data this likely isn't helpful, but at least it's available to // anyone who wants it #[derive(Clone, PartialEq, Eq, Debug)] pub struct __DLEqProof< G0: PrimeGroup + Zeroize, G1: PrimeGroup + Zeroize, const SIGNATURE: u8, const RING_LEN: usize, const REMAINDER_RING_LEN: usize, > { bits: Vec>, remainder: Option>, poks: (SchnorrPoK, SchnorrPoK), } macro_rules! dleq { ($doc_str: expr, $name: ident, $signature: expr, $remainder: literal,) => { #[doc = $doc_str] pub type $name = __DLEqProof< G0, G1, { $signature.to_u8() }, { $signature.ring_len() }, // There may not be a remainder, yet if there is one, it'll be just one bit // A ring for one bit has a RING_LEN of 2 { if $remainder { 2 } else { 0 } }, >; }; } // Proves for 1-bit at a time with the signature form (e, s), as originally described in MRL-0010. // Uses a merged challenge, unlike MRL-0010, for the ring signature, saving an element from each // bit and removing a hash while slightly reducing challenge security. This security reduction is // already applied to the scalar being proven for, a result of the requirement it's mutually valid // over both scalar fields, hence its application here as well. This is mainly here as a point of // reference for the following DLEq proofs, all which use merged challenges, and isn't performant // in comparison to the others dleq!( "The DLEq proof described in MRL-0010.", ClassicLinearDLEq, BitSignature::ClassicLinear, false, ); // Proves for 2-bits at a time to save 3/7 elements of every other bit // <9% smaller than CompromiseLinear, yet ~12% slower dleq!( "A DLEq proof modified from MRL-0010, proving for two bits at a time to save on space.", ConciseLinearDLEq, BitSignature::ConciseLinear, true, ); // Uses AOS signatures of the form R, s, to enable the final step of the ring signature to be // batch verified, at the cost of adding an additional element per bit dleq!( " A DLEq proof modified from MRL-0010, using R, s forms instead of c, s forms to enable batch verification at the cost of space usage. ", EfficientLinearDLEq, BitSignature::EfficientLinear, false, ); // Proves for 2-bits at a time while using the R, s form. This saves 3/7 elements of every other // bit, while adding 1 element to every bit, and is more efficient than ConciseLinear yet less // efficient than EfficientLinear due to having more ring signature steps which aren't batched // >25% smaller than EfficientLinear and just 11% slower, making it the recommended option dleq!( " A DLEq proof modified from MRL-0010, using R, s forms instead of c, s forms, while proving for two bits at a time, to enable batch verification and take advantage of space savings. This isn't quite as efficient as EfficientLinearDLEq, and isn't as compact as ConciseLinearDLEq, yet strikes a strong balance of performance and conciseness. ", CompromiseLinearDLEq, BitSignature::CompromiseLinear, true, ); impl< G0: PrimeGroup + Zeroize, G1: PrimeGroup + Zeroize, const SIGNATURE: u8, const RING_LEN: usize, const REMAINDER_RING_LEN: usize, > __DLEqProof { pub(crate) fn transcript( transcript: &mut T, generators: (Generators, Generators), keys: (G0, G1), ) { transcript.domain_separate(b"cross_group_dleq"); generators.0.transcript(transcript); generators.1.transcript(transcript); transcript.domain_separate(b"points"); transcript.append_message(b"point_0", keys.0.to_bytes()); transcript.append_message(b"point_1", keys.1.to_bytes()); } pub(crate) fn blinding_key( rng: &mut R, total: &mut F, last: bool, ) -> F { let blinding_key = if last { -*total } else { F::random(&mut *rng) }; *total += blinding_key; blinding_key } fn reconstruct_keys(&self) -> (G0, G1) { let mut res = ( self.bits.iter().map(|bit| bit.commitments.0).sum::(), self.bits.iter().map(|bit| bit.commitments.1).sum::(), ); if let Some(bit) = &self.remainder { res.0 += bit.commitments.0; res.1 += bit.commitments.1; } res } #[allow(clippy::type_complexity)] fn prove_internal( rng: &mut R, transcript: &mut T, generators: (Generators, Generators), f: (Zeroizing, Zeroizing), ) -> (Self, (Zeroizing, Zeroizing)) { Self::transcript( transcript, generators, ((generators.0.primary * f.0.deref()), (generators.1.primary * f.1.deref())), ); let poks = ( SchnorrPoK::::prove(rng, transcript, generators.0.primary, &f.0), SchnorrPoK::::prove(rng, transcript, generators.1.primary, &f.1), ); let mut blinding_key_total = (G0::Scalar::ZERO, G1::Scalar::ZERO); let mut blinding_key = |rng: &mut R, last| { let blinding_key = ( Self::blinding_key(&mut *rng, &mut blinding_key_total.0, last), Self::blinding_key(&mut *rng, &mut blinding_key_total.1, last), ); if last { debug_assert_eq!(blinding_key_total.0, G0::Scalar::ZERO); debug_assert_eq!(blinding_key_total.1, G1::Scalar::ZERO); } blinding_key }; let capacity = usize::try_from(G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY)).unwrap(); let bits_per_group = usize::from(BitSignature::from(SIGNATURE).bits()); let mut pow_2 = (generators.0.primary, generators.1.primary); let mut raw_bits = f.0.to_le_bits(); let mut bits = Vec::with_capacity(capacity); let mut these_bits: u8 = 0; // Needed to zero out the bits #[allow(unused_assignments)] for (i, mut bit) in raw_bits.iter_mut().enumerate() { if i == capacity { break; } // Accumulate this bit let mut bit = u8_from_bool(bit.deref_mut()); these_bits |= bit << (i % bits_per_group); bit.zeroize(); if (i % bits_per_group) == (bits_per_group - 1) { let last = i == (capacity - 1); let mut blinding_key = blinding_key(&mut *rng, last); bits.push(Bits::prove( &mut *rng, transcript, generators, i / bits_per_group, &mut pow_2, these_bits, &mut blinding_key, )); these_bits.zeroize(); } } debug_assert_eq!(bits.len(), capacity / bits_per_group); let mut remainder = None; if capacity != ((capacity / bits_per_group) * bits_per_group) { let mut blinding_key = blinding_key(&mut *rng, true); remainder = Some(Bits::prove( &mut *rng, transcript, generators, capacity / bits_per_group, &mut pow_2, these_bits, &mut blinding_key, )); } these_bits.zeroize(); let proof = __DLEqProof { bits, remainder, poks }; debug_assert_eq!( proof.reconstruct_keys(), (generators.0.primary * f.0.deref(), generators.1.primary * f.1.deref()) ); (proof, f) } /// Prove the Cross-Group Discrete Log Equality for the points derived from the scalar created as /// the output of the passed in Digest. /// /// Given the non-standard requirements to achieve uniformity, needing to be < 2^x instead of /// less than a prime moduli, this is the simplest way to safely and securely generate a Scalar, /// without risk of failure nor bias. /// /// It also ensures a lack of determinable relation between keys, guaranteeing security in the /// currently expected use case for this, atomic swaps, where each swap leaks the key. Knowing /// the relationship between keys would allow breaking all swaps after just one. #[allow(clippy::type_complexity)] pub fn prove( rng: &mut R, transcript: &mut T, generators: (Generators, Generators), digest: D, ) -> (Self, (Zeroizing, Zeroizing)) { // This pattern theoretically prevents the compiler from moving it, so our protection against // a copy remaining un-zeroized is actually what's causing a copy. There's still a feeling of // safety granted by it, even if there's a loss in performance. let (mut f0, mut f1) = mutual_scalar_from_bytes::(digest.finalize().as_ref()); let f = (Zeroizing::new(f0), Zeroizing::new(f1)); f0.zeroize(); f1.zeroize(); Self::prove_internal(rng, transcript, generators, f) } /// Prove the Cross-Group Discrete Log Equality for the points derived from the scalar passed in, /// failing if it's not mutually valid. /// /// This allows for rejection sampling externally derived scalars until they're safely usable, /// as needed. #[allow(clippy::type_complexity)] pub fn prove_without_bias( rng: &mut R, transcript: &mut T, generators: (Generators, Generators), f0: Zeroizing, ) -> Option<(Self, (Zeroizing, Zeroizing))> { scalar_convert(*f0.deref()) // scalar_convert will zeroize it, though this is unfortunate .map(|f1| Self::prove_internal(rng, transcript, generators, (f0, Zeroizing::new(f1)))) } /// Verify a Cross-Group Discrete Log Equality proof, returning the points proven for. pub fn verify( &self, rng: &mut R, transcript: &mut T, generators: (Generators, Generators), ) -> Result<(G0, G1), DLEqError> { let capacity = usize::try_from(G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY)).unwrap(); let bits_per_group = usize::from(BitSignature::from(SIGNATURE).bits()); let has_remainder = (capacity % bits_per_group) != 0; // These shouldn't be possible, as locally created and deserialized proofs should be properly // formed in these regards, yet it doesn't hurt to check and would be problematic if true if (self.bits.len() != (capacity / bits_per_group)) || ((self.remainder.is_none() && has_remainder) || (self.remainder.is_some() && !has_remainder)) { return Err(DLEqError::InvalidProofLength); } let keys = self.reconstruct_keys(); Self::transcript(transcript, generators, keys); let batch_capacity = match BitSignature::from(SIGNATURE) { BitSignature::ClassicLinear | BitSignature::ConciseLinear => 3, BitSignature::EfficientLinear | BitSignature::CompromiseLinear => (self.bits.len() + 1) * 3, }; let mut batch = (BatchVerifier::new(batch_capacity), BatchVerifier::new(batch_capacity)); self.poks.0.verify(&mut *rng, transcript, generators.0.primary, keys.0, &mut batch.0); self.poks.1.verify(&mut *rng, transcript, generators.1.primary, keys.1, &mut batch.1); let mut pow_2 = (generators.0.primary, generators.1.primary); for (i, bits) in self.bits.iter().enumerate() { bits.verify(&mut *rng, transcript, generators, &mut batch, i, &mut pow_2)?; } if let Some(bit) = &self.remainder { bit.verify(&mut *rng, transcript, generators, &mut batch, self.bits.len(), &mut pow_2)?; } if (!batch.0.verify_vartime()) || (!batch.1.verify_vartime()) { Err(DLEqError::InvalidProof)?; } Ok(keys) } /// Write a Cross-Group Discrete Log Equality proof to a type satisfying std::io::Write. #[cfg(feature = "serialize")] pub fn write(&self, w: &mut W) -> io::Result<()> { for bit in &self.bits { bit.write(w)?; } if let Some(bit) = &self.remainder { bit.write(w)?; } self.poks.0.write(w)?; self.poks.1.write(w) } /// Read a Cross-Group Discrete Log Equality proof from a type satisfying std::io::Read. #[cfg(feature = "serialize")] pub fn read(r: &mut R) -> io::Result { let capacity = usize::try_from(G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY)).unwrap(); let bits_per_group = usize::from(BitSignature::from(SIGNATURE).bits()); let mut bits = Vec::with_capacity(capacity / bits_per_group); for _ in 0 .. (capacity / bits_per_group) { bits.push(Bits::read(r)?); } let mut remainder = None; if (capacity % bits_per_group) != 0 { remainder = Some(Bits::read(r)?); } Ok(__DLEqProof { bits, remainder, poks: (SchnorrPoK::read(r)?, SchnorrPoK::read(r)?) }) } } ================================================ FILE: crypto/dleq/src/cross_group/scalar.rs ================================================ use core::ops::DerefMut; use ff::PrimeFieldBits; use zeroize::Zeroize; use crate::cross_group::u8_from_bool; /// Convert a uniform scalar into one usable on both fields, clearing the top bits as needed. pub fn scalar_normalize( mut scalar: F0, ) -> (F0, F1) { let mutual_capacity = F0::CAPACITY.min(F1::CAPACITY); // A mutual key is only as secure as its weakest group // Accordingly, this bans a capacity difference of more than 4 bits to prevent a curve generally // offering n-bits of security from being forced into a situation with much fewer bits #[cfg(feature = "secure_capacity_difference")] assert!((F0::CAPACITY.max(F1::CAPACITY) - mutual_capacity) <= 4); let mut res1 = F0::ZERO; let mut res2 = F1::ZERO; // Uses the bits API to ensure a consistent endianness let mut bits = scalar.to_le_bits(); scalar.zeroize(); // Convert it to big endian bits.reverse(); let mut skip = bits.len() - usize::try_from(mutual_capacity).unwrap(); // Needed to zero out the bits #[allow(unused_assignments)] for mut bit in &mut bits { if skip > 0 { bit.deref_mut().zeroize(); skip -= 1; continue; } res1 = res1.double(); res2 = res2.double(); let mut bit = u8_from_bool(bit.deref_mut()); res1 += F0::from(bit.into()); res2 += F1::from(bit.into()); bit.zeroize(); } (res1, res2) } /// Helper to convert a scalar between fields. Returns None if the scalar isn't mutually valid. pub fn scalar_convert( mut scalar: F0, ) -> Option { let (mut valid, converted) = scalar_normalize(scalar); let res = Some(converted).filter(|_| scalar == valid); scalar.zeroize(); valid.zeroize(); res } /// Create a mutually valid scalar from bytes via bit truncation to not introduce bias. pub fn mutual_scalar_from_bytes( bytes: &[u8], ) -> (F0, F1) { let capacity = usize::try_from(F0::CAPACITY.min(F1::CAPACITY)).unwrap(); debug_assert!((bytes.len() * 8) >= capacity); let mut accum = F0::ZERO; for b in 0 .. capacity { accum = accum.double(); accum += F0::from(((bytes[b / 8] >> (b % 8)) & 1).into()); } (accum, scalar_convert(accum).unwrap()) } ================================================ FILE: crypto/dleq/src/cross_group/schnorr.rs ================================================ use core::ops::Deref; use rand_core::{RngCore, CryptoRng}; use zeroize::{Zeroize, Zeroizing}; use transcript::Transcript; use group::{ ff::{Field, PrimeFieldBits}, prime::PrimeGroup, }; use multiexp::BatchVerifier; use crate::challenge; #[cfg(feature = "serialize")] use std::io::{Read, Write}; #[cfg(feature = "serialize")] use ff::PrimeField; #[cfg(feature = "serialize")] use crate::{read_scalar, cross_group::read_point}; #[allow(non_snake_case)] #[derive(Clone, PartialEq, Eq, Debug)] pub(crate) struct SchnorrPoK { R: G, s: G::Scalar, } impl + Zeroize> SchnorrPoK { // Not HRAm due to the lack of m #[allow(non_snake_case)] fn hra(transcript: &mut T, generator: G, R: G, A: G) -> G::Scalar { transcript.domain_separate(b"schnorr_proof_of_knowledge"); transcript.append_message(b"generator", generator.to_bytes()); transcript.append_message(b"nonce", R.to_bytes()); transcript.append_message(b"public_key", A.to_bytes()); challenge(transcript) } pub(crate) fn prove( rng: &mut R, transcript: &mut T, generator: G, private_key: &Zeroizing, ) -> SchnorrPoK { let nonce = Zeroizing::new(G::Scalar::random(rng)); #[allow(non_snake_case)] let R = generator * nonce.deref(); SchnorrPoK { R, s: (SchnorrPoK::hra(transcript, generator, R, generator * private_key.deref()) * private_key.deref()) + nonce.deref(), } } pub(crate) fn verify( &self, rng: &mut R, transcript: &mut T, generator: G, public_key: G, batch: &mut BatchVerifier<(), G>, ) { batch.queue( rng, (), [ (-self.s, generator), (G::Scalar::ONE, self.R), (Self::hra(transcript, generator, self.R, public_key), public_key), ], ); } #[cfg(feature = "serialize")] pub fn write(&self, w: &mut W) -> std::io::Result<()> { w.write_all(self.R.to_bytes().as_ref())?; w.write_all(self.s.to_repr().as_ref()) } #[cfg(feature = "serialize")] pub fn read(r: &mut R) -> std::io::Result> { Ok(SchnorrPoK { R: read_point(r)?, s: read_scalar(r)? }) } } ================================================ FILE: crypto/dleq/src/lib.rs ================================================ #![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(not(feature = "std"), no_std)] #![doc = include_str!("../README.md")] use core::ops::Deref; use rand_core::{RngCore, CryptoRng}; use zeroize::{Zeroize, Zeroizing}; use transcript::Transcript; use ff::{Field, PrimeField}; use group::prime::PrimeGroup; #[cfg(feature = "serialize")] use std::io::{self, Error, Read, Write}; /// A cross-group DLEq proof capable of proving that two public keys, across two different curves, /// share a private key. #[cfg(feature = "experimental")] pub mod cross_group; #[cfg(test)] mod tests; // Produce a non-biased challenge from the transcript in the specified field pub(crate) fn challenge(transcript: &mut T) -> F { // From here, there are three ways to get a scalar under the ff/group API // 1: Scalar::random(ChaCha20Rng::from_seed(self.transcript.rng_seed(b"challenge"))) // 2: Grabbing a UInt library to perform reduction by the modulus, then determining endianness // and loading it in // 3: Iterating over each byte and manually doubling/adding. This is simplest let mut challenge = F::ZERO; // Get a wide amount of bytes to safely reduce without bias // In most cases, <=1.5x bytes is enough. 2x is still standard and there's some theoretical // groups which may technically require more than 1.5x bytes for this to work as intended let target_bytes = usize::try_from(F::NUM_BITS).unwrap().div_ceil(8) * 2; let mut challenge_bytes = transcript.challenge(b"challenge"); let challenge_bytes_len = challenge_bytes.as_ref().len(); // If the challenge is 32 bytes, and we need 64, we need two challenges let needed_challenges = target_bytes.div_ceil(challenge_bytes_len); // The following algorithm should be equivalent to a wide reduction of the challenges, // interpreted as concatenated, big-endian byte string let mut handled_bytes = 0; 'outer: for _ in 0 ..= needed_challenges { // Cursor of which byte of the challenge to use next let mut b = 0; while b < challenge_bytes_len { // Get the next amount of bytes to attempt // Only grabs the needed amount of bytes, up to 8 at a time (u64), so long as they're // available in the challenge let chunk_bytes = (target_bytes - handled_bytes).min(8).min(challenge_bytes_len - b); let mut chunk = 0; for _ in 0 .. chunk_bytes { chunk <<= 8; chunk |= u64::from(challenge_bytes.as_ref()[b]); b += 1; } // Add this chunk challenge += F::from(chunk); handled_bytes += chunk_bytes; // If we've reached the target amount of bytes, break if handled_bytes == target_bytes { break 'outer; } // Shift over by however many bits will be in the next chunk let next_chunk_bytes = (target_bytes - handled_bytes).min(8).min(challenge_bytes_len); for _ in 0 .. (next_chunk_bytes * 8) { challenge = challenge.double(); } } // Secure thanks to the Transcript trait having a bound of updating on challenge challenge_bytes = transcript.challenge(b"challenge_extension"); } challenge } // Helper function to read a scalar #[cfg(feature = "serialize")] fn read_scalar(r: &mut R) -> io::Result { let mut repr = F::Repr::default(); r.read_exact(repr.as_mut())?; let scalar = F::from_repr(repr); if scalar.is_none().into() { Err(Error::other("invalid scalar"))?; } Ok(scalar.unwrap()) } /// Error for DLEq proofs. #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub enum DLEqError { /// The proof was invalid. InvalidProof, } /// A proof that points have the same discrete logarithm across generators. #[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)] pub struct DLEqProof> { c: G::Scalar, s: G::Scalar, } #[allow(non_snake_case)] impl> DLEqProof { fn transcript(transcript: &mut T, generator: G, nonce: G, point: G) { transcript.append_message(b"generator", generator.to_bytes()); transcript.append_message(b"nonce", nonce.to_bytes()); transcript.append_message(b"point", point.to_bytes()); } /// Prove that the points created by `scalar * G`, for each specified generator, share a discrete /// logarithm. pub fn prove( rng: &mut R, transcript: &mut T, generators: &[G], scalar: &Zeroizing, ) -> DLEqProof { let r = Zeroizing::new(G::Scalar::random(rng)); transcript.domain_separate(b"dleq"); for generator in generators { // R, A Self::transcript(transcript, *generator, *generator * r.deref(), *generator * scalar.deref()); } let c = challenge(transcript); // r + ca let s = (c * scalar.deref()) + r.deref(); DLEqProof { c, s } } // Transcript a specific generator/nonce/point (G/R/A), as used when verifying a proof. // This takes in the generator/point, and then the challenge and solution to calculate the nonce. fn verify_statement( transcript: &mut T, generator: G, point: G, c: G::Scalar, s: G::Scalar, ) { // s = r + ca // sG - cA = R // R, A Self::transcript(transcript, generator, (generator * s) - (point * c), point); } /// Verify the specified points share a discrete logarithm across the specified generators. pub fn verify( &self, transcript: &mut T, generators: &[G], points: &[G], ) -> Result<(), DLEqError> { if generators.len() != points.len() { Err(DLEqError::InvalidProof)?; } transcript.domain_separate(b"dleq"); for (generator, point) in generators.iter().zip(points) { Self::verify_statement(transcript, *generator, *point, self.c, self.s); } if self.c != challenge(transcript) { Err(DLEqError::InvalidProof)?; } Ok(()) } /// Write a DLEq proof to something implementing Write. #[cfg(feature = "serialize")] pub fn write(&self, w: &mut W) -> io::Result<()> { w.write_all(self.c.to_repr().as_ref())?; w.write_all(self.s.to_repr().as_ref()) } /// Read a DLEq proof from something implementing Read. #[cfg(feature = "serialize")] pub fn read(r: &mut R) -> io::Result> { Ok(DLEqProof { c: read_scalar(r)?, s: read_scalar(r)? }) } /// Serialize a DLEq proof to a `Vec`. #[cfg(feature = "serialize")] pub fn serialize(&self) -> Vec { let mut res = vec![]; self.write(&mut res).unwrap(); res } } /// A proof that multiple series of points each have a single discrete logarithm across generators. /// /// This is effectively n distinct DLEq proofs, one for each discrete logarithm and its points /// across some generators, yet with a smaller overall proof size. #[cfg(feature = "std")] #[derive(Clone, PartialEq, Eq, Debug, Zeroize)] pub struct MultiDLEqProof> { c: G::Scalar, s: Vec, } #[cfg(feature = "std")] #[allow(non_snake_case)] impl> MultiDLEqProof { /// Prove for each scalar that the series of points created by multiplying it against its /// matching generators share a discrete logarithm. /// This function panics if `generators.len() != scalars.len()`. pub fn prove( rng: &mut R, transcript: &mut T, generators: &[Vec], scalars: &[Zeroizing], ) -> MultiDLEqProof { assert_eq!( generators.len(), scalars.len(), "amount of series of generators doesn't match the amount of scalars" ); transcript.domain_separate(b"multi_dleq"); let mut nonces = vec![]; for (i, (scalar, generators)) in scalars.iter().zip(generators).enumerate() { // Delineate between discrete logarithms transcript.append_message(b"discrete_logarithm", i.to_le_bytes()); let nonce = Zeroizing::new(G::Scalar::random(&mut *rng)); for generator in generators { DLEqProof::transcript( transcript, *generator, *generator * nonce.deref(), *generator * scalar.deref(), ); } nonces.push(nonce); } let c = challenge(transcript); let mut s = vec![]; for (scalar, nonce) in scalars.iter().zip(nonces) { s.push((c * scalar.deref()) + nonce.deref()); } MultiDLEqProof { c, s } } /// Verify each series of points share a discrete logarithm against their matching series of /// generators. pub fn verify( &self, transcript: &mut T, generators: &[Vec], points: &[Vec], ) -> Result<(), DLEqError> { if points.len() != generators.len() { Err(DLEqError::InvalidProof)?; } if self.s.len() != generators.len() { Err(DLEqError::InvalidProof)?; } transcript.domain_separate(b"multi_dleq"); for (i, (generators, points)) in generators.iter().zip(points).enumerate() { if points.len() != generators.len() { Err(DLEqError::InvalidProof)?; } transcript.append_message(b"discrete_logarithm", i.to_le_bytes()); for (generator, point) in generators.iter().zip(points) { DLEqProof::verify_statement(transcript, *generator, *point, self.c, self.s[i]); } } if self.c != challenge(transcript) { Err(DLEqError::InvalidProof)?; } Ok(()) } /// Write a multi-DLEq proof to something implementing Write. #[cfg(feature = "serialize")] pub fn write(&self, w: &mut W) -> io::Result<()> { w.write_all(self.c.to_repr().as_ref())?; for s in &self.s { w.write_all(s.to_repr().as_ref())?; } Ok(()) } /// Read a multi-DLEq proof from something implementing Read. #[cfg(feature = "serialize")] pub fn read(r: &mut R, discrete_logs: usize) -> io::Result> { let c = read_scalar(r)?; let mut s = vec![]; for _ in 0 .. discrete_logs { s.push(read_scalar(r)?); } Ok(MultiDLEqProof { c, s }) } /// Serialize a multi-DLEq proof to a `Vec`. #[cfg(feature = "serialize")] pub fn serialize(&self) -> Vec { let mut res = vec![]; self.write(&mut res).unwrap(); res } } ================================================ FILE: crypto/dleq/src/tests/cross_group/aos.rs ================================================ use rand_core::OsRng; use group::{ff::Field, Group}; use multiexp::BatchVerifier; use crate::{ cross_group::aos::{Re, Aos}, tests::cross_group::{G0, G1, transcript, generators}, }; #[allow(non_snake_case)] #[cfg(feature = "serialize")] fn test_aos_serialization(proof: &Aos, Re_0: Re) { let mut buf = vec![]; proof.write(&mut buf).unwrap(); let deserialized = Aos::read::<&[u8]>(&mut buf.as_ref(), Re_0).unwrap(); assert_eq!(proof, &deserialized); } fn test_aos(default: &Re) { let generators = generators(); let mut ring_keys = [(::Scalar::ZERO, ::Scalar::ZERO); RING_LEN]; // Side-effect of G0 being a type-alias with identity() deprecated #[allow(deprecated)] let mut ring = [(G0::identity(), G1::identity()); RING_LEN]; for i in 0 .. RING_LEN { ring_keys[i] = (::Scalar::random(&mut OsRng), ::Scalar::random(&mut OsRng)); ring[i] = (generators.0.alt * ring_keys[i].0, generators.1.alt * ring_keys[i].1); } for (actual, key) in ring_keys.iter_mut().enumerate() { let proof = Aos::<_, _, RING_LEN>::prove( &mut OsRng, &transcript(), generators, &ring, actual, key, default.clone(), ); let mut batch = (BatchVerifier::new(0), BatchVerifier::new(0)); proof.verify(&mut OsRng, &transcript(), generators, &mut batch, &ring).unwrap(); // For e, these should have nothing. For R, these should have 6 elements each which sum to 0 assert!(batch.0.verify_vartime()); assert!(batch.1.verify_vartime()); #[cfg(feature = "serialize")] test_aos_serialization(&proof, default.clone()); } } #[test] fn test_aos_e() { test_aos::<2>(&Re::e_default()); test_aos::<4>(&Re::e_default()); } #[allow(non_snake_case)] #[test] fn test_aos_R() { // Batch verification appreciates the longer vectors, which means not batching bits test_aos::<2>(&Re::R_default()); } ================================================ FILE: crypto/dleq/src/tests/cross_group/mod.rs ================================================ use core::ops::Deref; use hex_literal::hex; use zeroize::Zeroizing; use rand_core::{RngCore, OsRng}; use ff::{Field, PrimeField}; use group::{Group, GroupEncoding}; use blake2::{Digest, Blake2b512}; use k256::{Scalar, ProjectivePoint}; use dalek_ff_group::{self as dfg, EdwardsPoint}; use transcript::{Transcript, RecommendedTranscript}; use crate::{ cross_group::{ scalar::mutual_scalar_from_bytes, Generators, ClassicLinearDLEq, EfficientLinearDLEq, ConciseLinearDLEq, CompromiseLinearDLEq, }, }; mod scalar; mod aos; type G0 = ProjectivePoint; type G1 = EdwardsPoint; pub(crate) fn transcript() -> RecommendedTranscript { RecommendedTranscript::new(b"Cross-Group DLEq Proof Test") } pub(crate) fn generators() -> (Generators, Generators) { ( Generators::new( ProjectivePoint::GENERATOR, ProjectivePoint::from_bytes( &(hex!("0250929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0").into()), ) .unwrap(), ) .unwrap(), Generators::new( EdwardsPoint::generator(), EdwardsPoint::from_bytes(&hex!( "8b655970153799af2aeadc9ff1add0ea6c7251d54154cfa92c173a0dd39c1f94" )) .unwrap(), ) .unwrap(), ) } macro_rules! verify_and_deserialize { ($type: ty, $proof: ident, $generators: ident, $keys: ident) => { let public_keys = $proof.verify(&mut OsRng, &mut transcript(), $generators).unwrap(); assert_eq!($generators.0.primary * $keys.0.deref(), public_keys.0); assert_eq!($generators.1.primary * $keys.1.deref(), public_keys.1); #[cfg(feature = "serialize")] { let mut buf = vec![]; $proof.write(&mut buf).unwrap(); let deserialized = <$type>::read::<&[u8]>(&mut buf.as_ref()).unwrap(); assert_eq!($proof, deserialized); } }; } macro_rules! test_dleq { ($str: literal, $benchmark: ident, $name: ident, $type: ident) => { #[ignore] #[test] fn $benchmark() { println!("Benchmarking with Secp256k1/Ed25519"); let generators = generators(); let mut seed = [0; 32]; OsRng.fill_bytes(&mut seed); let key = Blake2b512::new().chain_update(seed); let runs = 200; let mut proofs = Vec::with_capacity(usize::try_from(runs).unwrap()); let time = std::time::Instant::now(); for _ in 0 .. runs { proofs.push($type::prove(&mut OsRng, &mut transcript(), generators, key.clone()).0); } println!("{} had a average prove time of {}ms", $str, time.elapsed().as_millis() / runs); let time = std::time::Instant::now(); for proof in &proofs { proof.verify(&mut OsRng, &mut transcript(), generators).unwrap(); } println!("{} had a average verify time of {}ms", $str, time.elapsed().as_millis() / runs); #[cfg(feature = "serialize")] { let mut buf = vec![]; proofs[0].write(&mut buf).unwrap(); println!("{} had a proof size of {} bytes", $str, buf.len()); } } #[test] fn $name() { let generators = generators(); for i in 0 .. 1 { let (proof, keys) = if i == 0 { let mut seed = [0; 32]; OsRng.fill_bytes(&mut seed); $type::prove( &mut OsRng, &mut transcript(), generators, Blake2b512::new().chain_update(seed), ) } else { let mut key; let mut res; while { key = Zeroizing::new(Scalar::random(&mut OsRng)); res = $type::prove_without_bias(&mut OsRng, &mut transcript(), generators, key.clone()); res.is_none() } {} let res = res.unwrap(); assert_eq!(key, res.1 .0); res }; verify_and_deserialize!($type::, proof, generators, keys); } } }; } test_dleq!("ClassicLinear", benchmark_classic_linear, test_classic_linear, ClassicLinearDLEq); test_dleq!("ConciseLinear", benchmark_concise_linear, test_concise_linear, ConciseLinearDLEq); test_dleq!( "EfficientLinear", benchmark_efficient_linear, test_efficient_linear, EfficientLinearDLEq ); test_dleq!( "CompromiseLinear", benchmark_compromise_linear, test_compromise_linear, CompromiseLinearDLEq ); #[test] fn test_rejection_sampling() { let mut pow_2 = Scalar::ONE; for _ in 0 .. dfg::Scalar::CAPACITY { pow_2 = pow_2.double(); } assert!( // Either would work EfficientLinearDLEq::prove_without_bias( &mut OsRng, &mut transcript(), generators(), Zeroizing::new(pow_2) ) .is_none() ); } #[test] fn test_remainder() { // Uses Secp256k1 for both to achieve an odd capacity of 255 assert_eq!(Scalar::CAPACITY, 255); let generators = (generators().0, generators().0); // This will ignore any unused bits, ensuring every remaining one is set let keys = mutual_scalar_from_bytes::(&[0xFF; 32]); let keys = (Zeroizing::new(keys.0), Zeroizing::new(keys.1)); assert_eq!(Scalar::ONE + keys.0.deref(), Scalar::from(2u64).pow_vartime([255])); assert_eq!(keys.0, keys.1); let (proof, res) = ConciseLinearDLEq::prove_without_bias( &mut OsRng, &mut transcript(), generators, keys.0.clone(), ) .unwrap(); assert_eq!(keys, res); verify_and_deserialize!( ConciseLinearDLEq::, proof, generators, keys ); } ================================================ FILE: crypto/dleq/src/tests/cross_group/scalar.rs ================================================ use rand_core::OsRng; use ff::{Field, PrimeField}; use k256::Scalar as K256Scalar; use dalek_ff_group::Scalar as DalekScalar; use crate::cross_group::scalar::{scalar_normalize, scalar_convert}; #[test] fn test_scalar() { assert_eq!( scalar_normalize::<_, DalekScalar>(K256Scalar::ZERO), (K256Scalar::ZERO, DalekScalar::ZERO) ); assert_eq!( scalar_normalize::<_, DalekScalar>(K256Scalar::ONE), (K256Scalar::ONE, DalekScalar::ONE) ); let mut initial; while { initial = K256Scalar::random(&mut OsRng); let (k, ed) = scalar_normalize::<_, DalekScalar>(initial); // The initial scalar should equal the new scalar with Ed25519's capacity let mut initial_bytes = initial.to_repr().to_vec(); // Drop the first 4 bits to hit 252 initial_bytes[0] &= 0b00001111; let k_bytes = k.to_repr().to_vec(); assert_eq!(initial_bytes, k_bytes); let mut ed_bytes = ed.to_repr().as_ref().to_vec(); // Reverse to big endian ed_bytes.reverse(); assert_eq!(k_bytes, ed_bytes); // Verify conversion works as expected assert_eq!(scalar_convert::<_, DalekScalar>(k), Some(ed)); // Run this test again if this secp256k1 scalar didn't have any bits cleared initial == k } {} // Verify conversion returns None when the scalar isn't mutually valid assert!(scalar_convert::<_, DalekScalar>(initial).is_none()); } ================================================ FILE: crypto/dleq/src/tests/cross_group/schnorr.rs ================================================ use core::ops::Deref; use rand_core::OsRng; use zeroize::Zeroize; use group::{ ff::{Field, PrimeFieldBits}, prime::PrimeGroup, }; use multiexp::BatchVerifier; use transcript::{Transcript, RecommendedTranscript}; use crate::cross_group::schnorr::SchnorrPoK; fn test_schnorr + Zeroize>() { let transcript = RecommendedTranscript::new(b"Schnorr Test"); let mut batch = BatchVerifier::new(10); for _ in 0 .. 10 { let private = Zeroizing::new(G::Scalar::random(&mut OsRng)); SchnorrPoK::prove(&mut OsRng, &mut transcript.clone(), G::generator(), &private).verify( &mut OsRng, &mut transcript.clone(), G::generator(), G::generator() * private.deref(), &mut batch, ); } assert!(batch.verify_vartime()); } #[test] fn test_secp256k1() { test_schnorr::(); } #[test] fn test_ed25519() { test_schnorr::(); } ================================================ FILE: crypto/dleq/src/tests/mod.rs ================================================ use core::ops::Deref; use hex_literal::hex; use rand_core::OsRng; use zeroize::Zeroizing; use ff::Field; use group::GroupEncoding; use k256::{Scalar, ProjectivePoint}; use transcript::{Transcript, RecommendedTranscript}; use crate::{DLEqProof, MultiDLEqProof}; #[cfg(feature = "experimental")] mod cross_group; fn generators() -> [k256::ProjectivePoint; 5] { [ ProjectivePoint::GENERATOR, ProjectivePoint::from_bytes( &(hex!("0250929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0").into()), ) .unwrap(), // Just an increment of the last byte from the previous, where the previous two are valid ProjectivePoint::from_bytes( &(hex!("0250929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac4").into()), ) .unwrap(), ProjectivePoint::from_bytes( &(hex!("0250929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803aca").into()), ) .unwrap(), ProjectivePoint::from_bytes( &(hex!("0250929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803acb").into()), ) .unwrap(), ] } #[test] fn test_dleq() { let generators = generators(); let transcript = || RecommendedTranscript::new(b"DLEq Proof Test"); for i in 0 .. 5 { let key = Zeroizing::new(Scalar::random(&mut OsRng)); let proof = DLEqProof::prove(&mut OsRng, &mut transcript(), &generators[.. i], &key); let mut keys = [ProjectivePoint::GENERATOR; 5]; for k in 0 .. 5 { keys[k] = generators[k] * key.deref(); } proof.verify(&mut transcript(), &generators[.. i], &keys[.. i]).unwrap(); // Different challenge assert!(proof .verify( &mut RecommendedTranscript::new(b"different challenge"), &generators[.. i], &keys[.. i] ) .is_err()); // All of these following tests should effectively be a different challenge and accordingly // pointless. They're still nice to have though // We could edit these tests to always test with at least two generators // Then we don't test proofs with zero/one generator(s) // While those are stupid, and pointless, and potentially point to a failure in the caller, // it could also be part of a dynamic system which deals with variable amounts of generators // Not panicking in such use cases, even if they're inefficient, provides seamless behavior if i >= 2 { // Different generators assert!(proof .verify( &mut transcript(), generators[.. i].iter().copied().rev().collect::>().as_ref(), &keys[.. i] ) .is_err()); // Different keys assert!(proof .verify( &mut transcript(), &generators[.. i], keys[.. i].iter().copied().rev().collect::>().as_ref() ) .is_err()); } #[cfg(feature = "serialize")] { let mut buf = vec![]; proof.write(&mut buf).unwrap(); let deserialized = DLEqProof::::read::<&[u8]>(&mut buf.as_ref()).unwrap(); assert_eq!(proof, deserialized); } } } #[test] fn test_multi_dleq() { let generators = generators(); let transcript = || RecommendedTranscript::new(b"MultiDLEq Proof Test"); // Test up to 3 keys for k in 0 ..= 3 { let mut keys = vec![]; let mut these_generators = vec![]; let mut pub_keys = vec![]; for i in 0 .. k { let key = Zeroizing::new(Scalar::random(&mut OsRng)); // For each key, test a variable set of generators // 0: 0 // 1: 1, 2 // 2: 2, 3, 4 let key_generators = generators[i ..= (i + i)].to_vec(); let mut these_pub_keys = vec![]; for generator in &key_generators { these_pub_keys.push(generator * key.deref()); } keys.push(key); these_generators.push(key_generators); pub_keys.push(these_pub_keys); } let proof = MultiDLEqProof::prove(&mut OsRng, &mut transcript(), &these_generators, &keys); proof.verify(&mut transcript(), &these_generators, &pub_keys).unwrap(); // Different challenge assert!(proof .verify(&mut RecommendedTranscript::new(b"different challenge"), &these_generators, &pub_keys) .is_err()); // Test verifying for a different amount of keys fail if k > 0 { assert!(proof.verify(&mut transcript(), &these_generators, &pub_keys[.. k - 1]).is_err()); } #[cfg(feature = "serialize")] { let mut buf = vec![]; proof.write(&mut buf).unwrap(); let deserialized = MultiDLEqProof::::read::<&[u8]>(&mut buf.as_ref(), k).unwrap(); assert_eq!(proof, deserialized); } } } ================================================ FILE: crypto/ed448/Cargo.toml ================================================ [package] name = "minimal-ed448" version = "0.4.2" description = "Unaudited, inefficient implementation of Ed448 in Rust" license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/crypto/ed448" authors = ["Luke Parker "] keywords = ["ed448", "ff", "group"] edition = "2021" rust-version = "1.65" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] rustversion = "1" rand_core = { version = "0.6", default-features = false } zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] } subtle = { version = "^2.4", default-features = false } sha3 = { version = "0.10", default-features = false } ff = { version = "0.13", default-features = false, features = ["bits"] } group = { version = "0.13", default-features = false } ciphersuite = { path = "../ciphersuite", default-features = false } generic-array = { version = "1", default-features = false } crypto-bigint = { version = "0.5", default-features = false, features = ["zeroize"] } [dev-dependencies] hex = { version = "0.4", default-features = false, features = ["std"] } rand_core = { version = "0.6", default-features = false, features = ["std"] } ff-group-tests = { path = "../ff-group-tests" } [features] alloc = ["zeroize/alloc", "ciphersuite/alloc"] std = ["alloc", "rand_core/std", "zeroize/std", "subtle/std", "sha3/std", "ff/std", "ciphersuite/std"] default = ["std"] ================================================ FILE: crypto/ed448/LICENSE ================================================ MIT License Copyright (c) 2022-2023 Luke Parker Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: crypto/ed448/README.md ================================================ # Minimal Ed448 Barebones implementation of Ed448 bound to the ff/group API, rejecting torsion to achieve a PrimeGroup definition. This library has not been audited. While it is complete, and decently tested, any usage of it should be carefully considered. constant time and no_std. ================================================ FILE: crypto/ed448/src/backend.rs ================================================ use zeroize::Zeroize; // Use black_box when possible #[rustversion::since(1.66)] mod black_box { pub(crate) fn black_box(val: T) -> T { #[allow(clippy::incompatible_msrv)] core::hint::black_box(val) } } #[rustversion::before(1.66)] mod black_box { pub(crate) fn black_box(val: T) -> T { val } } use black_box::black_box; pub(crate) fn u8_from_bool(bit_ref: &mut bool) -> u8 { let bit_ref = black_box(bit_ref); let mut bit = black_box(*bit_ref); #[allow(clippy::cast_lossless)] let res = black_box(bit as u8); bit.zeroize(); debug_assert!((res | 1) == 1); bit_ref.zeroize(); res } macro_rules! math_op { ( $Value: ident, $Other: ident, $Op: ident, $op_fn: ident, $Assign: ident, $assign_fn: ident, $function: expr ) => { impl $Op<$Other> for $Value { type Output = $Value; fn $op_fn(self, other: $Other) -> Self::Output { $Value($function(self.0, other.0)) } } impl $Assign<$Other> for $Value { fn $assign_fn(&mut self, other: $Other) { self.0 = $function(self.0, other.0); } } impl<'a> $Op<&'a $Other> for $Value { type Output = $Value; fn $op_fn(self, other: &'a $Other) -> Self::Output { $Value($function(self.0, other.0)) } } impl<'a> $Assign<&'a $Other> for $Value { fn $assign_fn(&mut self, other: &'a $Other) { self.0 = $function(self.0, other.0); } } }; } macro_rules! from_wrapper { ($wrapper: ident, $inner: ident, $uint: ident) => { impl From<$uint> for $wrapper { fn from(a: $uint) -> $wrapper { $wrapper(Residue::new(&$inner::from(a))) } } }; } macro_rules! field { ( $FieldName: ident, $ResidueType: ident, $MODULUS_STR: ident, $MODULUS: ident, $WIDE_MODULUS: ident, $NUM_BITS: literal, $MULTIPLICATIVE_GENERATOR: literal, $DELTA: expr, ) => { use core::{ ops::{Add, AddAssign, Neg, Sub, SubAssign, Mul, MulAssign}, iter::{Sum, Product}, }; use subtle::{Choice, CtOption, ConstantTimeEq, ConstantTimeLess, ConditionallySelectable}; use rand_core::RngCore; use generic_array::{typenum::U57, GenericArray}; use crypto_bigint::{Integer, NonZero, Encoding, impl_modulus}; use ff::{Field, PrimeField, FieldBits, PrimeFieldBits, helpers::sqrt_ratio_generic}; use $crate::backend::u8_from_bool; fn reduce(x: U896) -> U448 { U448::from_le_slice(&x.rem(&NonZero::new($WIDE_MODULUS).unwrap()).to_le_bytes()[.. 56]) } impl ConstantTimeEq for $FieldName { fn ct_eq(&self, other: &Self) -> Choice { self.0.ct_eq(&other.0) } } impl ConditionallySelectable for $FieldName { fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { $FieldName(Residue::conditional_select(&a.0, &b.0, choice)) } } math_op!($FieldName, $FieldName, Add, add, AddAssign, add_assign, |x: $ResidueType, y| x .add(&y)); math_op!($FieldName, $FieldName, Sub, sub, SubAssign, sub_assign, |x: $ResidueType, y| x .sub(&y)); math_op!($FieldName, $FieldName, Mul, mul, MulAssign, mul_assign, |x: $ResidueType, y| x .mul(&y)); from_wrapper!($FieldName, U448, u8); from_wrapper!($FieldName, U448, u16); from_wrapper!($FieldName, U448, u32); from_wrapper!($FieldName, U448, u64); from_wrapper!($FieldName, U448, u128); impl Neg for $FieldName { type Output = $FieldName; fn neg(self) -> $FieldName { $FieldName(self.0.neg()) } } impl<'a> Neg for &'a $FieldName { type Output = $FieldName; fn neg(self) -> Self::Output { (*self).neg() } } impl $FieldName { /// Perform an exponentiation. pub fn pow(&self, other: $FieldName) -> $FieldName { let mut table = [$FieldName(Residue::ONE); 16]; table[1] = *self; for i in 2 .. 16 { table[i] = table[i - 1] * self; } let mut res = $FieldName(Residue::ONE); let mut bits = 0; for (i, mut bit) in other.to_le_bits().iter_mut().rev().enumerate() { bits <<= 1; let mut bit = u8_from_bool(&mut bit); bits |= bit; bit.zeroize(); if ((i + 1) % 4) == 0 { if i != 3 { for _ in 0 .. 4 { res *= res; } } let mut scale_by = $FieldName(Residue::ONE); #[allow(clippy::needless_range_loop)] for i in 0 .. 16 { #[allow(clippy::cast_possible_truncation)] // Safe since 0 .. 16 { scale_by = <_>::conditional_select(&scale_by, &table[i], bits.ct_eq(&(i as u8))); } } res *= scale_by; bits = 0; } } res } } impl Field for $FieldName { const ZERO: Self = $FieldName(Residue::ZERO); const ONE: Self = $FieldName(Residue::ONE); fn random(mut rng: impl RngCore) -> Self { let mut bytes = [0; 112]; rng.fill_bytes(&mut bytes); $FieldName(Residue::new(&reduce(U896::from_le_slice(bytes.as_ref())))) } fn square(&self) -> Self { *self * self } fn double(&self) -> Self { *self + self } fn invert(&self) -> CtOption { const NEG_2: $FieldName = $FieldName($ResidueType::sub(&$ResidueType::ZERO, &$ResidueType::new(&U448::from_u8(2)))); CtOption::new(self.pow(NEG_2), !self.is_zero()) } fn sqrt(&self) -> CtOption { const MOD_1_4: $FieldName = $FieldName($ResidueType::new( &$MODULUS.saturating_add(&U448::ONE).wrapping_div(&U448::from_u8(4)), )); let res = self.pow(MOD_1_4); CtOption::new(res, res.square().ct_eq(self)) } fn sqrt_ratio(num: &Self, div: &Self) -> (Choice, Self) { sqrt_ratio_generic(num, div) } } impl PrimeField for $FieldName { type Repr = GenericArray; const MODULUS: &'static str = $MODULUS_STR; const NUM_BITS: u32 = $NUM_BITS; const CAPACITY: u32 = $NUM_BITS - 1; const TWO_INV: Self = $FieldName($ResidueType::new(&U448::from_u8(2)).invert().0); const MULTIPLICATIVE_GENERATOR: Self = $FieldName(Residue::new(&U448::from_u8($MULTIPLICATIVE_GENERATOR))); // True for both the Ed448 Scalar field and FieldElement field const S: u32 = 1; // Both fields have their root of unity as -1 const ROOT_OF_UNITY: Self = $FieldName($ResidueType::sub(&$ResidueType::ZERO, &$ResidueType::new(&U448::ONE))); const ROOT_OF_UNITY_INV: Self = $FieldName(Self::ROOT_OF_UNITY.0.invert().0); const DELTA: Self = $FieldName(Residue::new(&U448::from_le_hex($DELTA))); fn from_repr(bytes: Self::Repr) -> CtOption { let res = U448::from_le_slice(&bytes[.. 56]); CtOption::new($FieldName(Residue::new(&res)), res.ct_lt(&$MODULUS) & bytes[56].ct_eq(&0)) } fn to_repr(&self) -> Self::Repr { let mut repr = GenericArray::::default(); repr[.. 56].copy_from_slice(&self.0.retrieve().to_le_bytes()); repr } fn is_odd(&self) -> Choice { self.0.retrieve().is_odd() } } impl PrimeFieldBits for $FieldName { type ReprBits = [u8; 56]; fn to_le_bits(&self) -> FieldBits { let mut repr = [0; 56]; repr.copy_from_slice(&self.to_repr()[.. 56]); repr.into() } fn char_le_bits() -> FieldBits { let mut repr = [0; 56]; repr.copy_from_slice(&MODULUS.to_le_bytes()[.. 56]); repr.into() } } impl Sum<$FieldName> for $FieldName { fn sum>(iter: I) -> $FieldName { let mut res = $FieldName::ZERO; for item in iter { res += item; } res } } impl<'a> Sum<&'a $FieldName> for $FieldName { fn sum>(iter: I) -> $FieldName { iter.cloned().sum() } } impl Product<$FieldName> for $FieldName { fn product>(iter: I) -> $FieldName { let mut res = $FieldName::ONE; for item in iter { res *= item; } res } } impl<'a> Product<&'a $FieldName> for $FieldName { fn product>(iter: I) -> $FieldName { iter.cloned().product() } } }; } ================================================ FILE: crypto/ed448/src/ciphersuite.rs ================================================ use zeroize::Zeroize; use sha3::{ digest::{ typenum::U114, core_api::BlockSizeUser, Update, Output, OutputSizeUser, FixedOutput, ExtendableOutput, XofReader, HashMarker, Digest, }, Shake256, }; use group::Group; use crate::{Scalar, Point}; use ciphersuite::Ciphersuite; /// Shake256, fixed to a 114-byte output, as used by Ed448. #[derive(Clone, Default)] pub struct Shake256_114(Shake256); impl BlockSizeUser for Shake256_114 { type BlockSize = ::BlockSize; fn block_size() -> usize { Shake256::block_size() } } impl OutputSizeUser for Shake256_114 { type OutputSize = U114; fn output_size() -> usize { 114 } } impl Update for Shake256_114 { fn update(&mut self, data: &[u8]) { self.0.update(data); } fn chain(mut self, data: impl AsRef<[u8]>) -> Self { Update::update(&mut self, data.as_ref()); self } } impl FixedOutput for Shake256_114 { fn finalize_fixed(self) -> Output { let mut res = Default::default(); FixedOutput::finalize_into(self, &mut res); res } fn finalize_into(self, out: &mut Output) { let mut reader = self.0.finalize_xof(); reader.read(out); } } impl HashMarker for Shake256_114 {} /// Ciphersuite for Ed448, inspired by RFC-8032. This is not recommended for usage. /// /// hash_to_F is implemented with a naive concatenation of the dst and data, allowing transposition /// between the two. This means `dst: b"abc", data: b"def"`, will produce the same scalar as /// `dst: "abcdef", data: b""`. Please use carefully, not letting dsts be substrings of each other. #[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)] pub struct Ed448; impl Ciphersuite for Ed448 { type F = Scalar; type G = Point; type H = Shake256_114; const ID: &'static [u8] = b"ed448"; fn generator() -> Self::G { Point::generator() } fn hash_to_F(dst: &[u8], data: &[u8]) -> Self::F { Scalar::wide_reduce(Self::H::digest([dst, data].concat()).as_ref().try_into().unwrap()) } } #[test] fn test_ed448() { use ff::PrimeField; ff_group_tests::group::test_prime_group_bits::<_, Point>(&mut rand_core::OsRng); // Ideally, a test vector from RFC-8032 (not FROST) would be here // Unfortunately, the IETF draft doesn't provide any vectors for the derived challenges assert_eq!( Ed448::hash_to_F( b"FROST-ED448-SHAKE256-v11nonce", &hex::decode( "\ 89bf16040081ff2990336b200613787937ebe1f024b8cdff90eb6f1c741d91c1\ 4a2b2f5858a932ad3d3b18bd16e76ced3070d72fd79ae4402df201f5\ 25e754716a1bc1b87a502297f2a99d89ea054e0018eb55d39562fd01\ 00" ) .unwrap() ) .to_repr() .to_vec(), hex::decode( "\ 67a6f023e77361707c6e894c625e809e80f33fdb310810053ae29e28\ e7011f3193b9020e73c183a98cc3a519160ed759376dd92c94831622\ 00" ) .unwrap() ); } ================================================ FILE: crypto/ed448/src/field.rs ================================================ use zeroize::{DefaultIsZeroes, Zeroize}; use crypto_bigint::{ U448, U896, modular::constant_mod::{ResidueParams, Residue}, }; const MODULUS_STR: &str = concat!( "fffffffffffffffffffffffffffffffffffffffffffffffffffffffe", "ffffffffffffffffffffffffffffffffffffffffffffffffffffffff", ); impl_modulus!(FieldModulus, U448, MODULUS_STR); pub(crate) type ResidueType = Residue; /// Ed448 field element. #[derive(Clone, Copy, PartialEq, Eq, Default, Debug)] pub struct FieldElement(pub(crate) ResidueType); impl DefaultIsZeroes for FieldElement {} // 2**448 - 2**224 - 1 pub(crate) const MODULUS: U448 = U448::from_be_hex(MODULUS_STR); const WIDE_MODULUS: U896 = U896::from_be_hex(concat!( "00000000000000000000000000000000000000000000000000000000", "00000000000000000000000000000000000000000000000000000000", "fffffffffffffffffffffffffffffffffffffffffffffffffffffffe", "ffffffffffffffffffffffffffffffffffffffffffffffffffffffff" )); pub(crate) const Q_4: FieldElement = FieldElement(ResidueType::new( &MODULUS.saturating_add(&U448::ONE).wrapping_div(&U448::from_u8(4)), )); field!( FieldElement, ResidueType, MODULUS_STR, MODULUS, WIDE_MODULUS, 448, 7, concat!( "31000000000000000000000000000000000000000000000000000000", "00000000000000000000000000000000000000000000000000000000", ), ); #[test] fn test_field() { ff_group_tests::prime_field::test_prime_field_bits::<_, FieldElement>(&mut rand_core::OsRng); } ================================================ FILE: crypto/ed448/src/lib.rs ================================================ #![cfg_attr(docsrs, feature(doc_cfg))] #![doc = include_str!("../README.md")] #![no_std] #![allow(clippy::redundant_closure_call)] #[macro_use] mod backend; mod scalar; pub use scalar::Scalar; mod field; pub use field::FieldElement; mod point; pub use point::Point; mod ciphersuite; pub use crate::ciphersuite::Ed448; ================================================ FILE: crypto/ed448/src/point.rs ================================================ use core::{ ops::{Add, AddAssign, Neg, Sub, SubAssign, Mul, MulAssign}, iter::Sum, }; use rand_core::RngCore; use zeroize::Zeroize; use subtle::{Choice, CtOption, ConstantTimeEq, ConditionallySelectable, ConditionallyNegatable}; use crypto_bigint::{U448, modular::constant_mod::Residue}; use group::{ ff::{Field, PrimeField, PrimeFieldBits}, Group, GroupEncoding, prime::PrimeGroup, }; use crate::{ backend::u8_from_bool, scalar::Scalar, field::{ResidueType, FieldElement, Q_4}, }; const D: FieldElement = FieldElement(ResidueType::sub(&ResidueType::ZERO, &Residue::new(&U448::from_u16(39081)))); const G_Y: FieldElement = FieldElement(Residue::new(&U448::from_be_hex(concat!( "693f46716eb6bc248876203756c9c7624bea73736ca3984087789c1e", "05a0c2d73ad3ff1ce67c39c4fdbd132c4ed7c8ad9808795bf230fa14", )))); const G_X: FieldElement = FieldElement(Residue::new(&U448::from_be_hex(concat!( "4f1970c66bed0ded221d15a622bf36da9e146570470f1767ea6de324", "a3d3a46412ae1af72ab66511433b80e18b00938e2626a82bc70cc05e", )))); fn recover_x(y: FieldElement) -> CtOption { let ysq = y.square(); #[allow(non_snake_case)] let D_ysq = D * ysq; (D_ysq - FieldElement::ONE).invert().and_then(|inverted| { let temp = (ysq - FieldElement::ONE) * inverted; let mut x = temp.pow(Q_4); x.conditional_negate(x.is_odd()); let xsq = x.square(); CtOption::new(x, (xsq + ysq).ct_eq(&(FieldElement::ONE + (xsq * D_ysq)))) }) } /// Ed448 point. #[derive(Clone, Copy, Debug)] pub struct Point { x: FieldElement, y: FieldElement, z: FieldElement, } impl Zeroize for Point { fn zeroize(&mut self) { self.x.zeroize(); self.y.zeroize(); self.z.zeroize(); let identity = Self::identity(); self.x = identity.x; self.y = identity.y; self.z = identity.z; } } const G: Point = Point { x: G_X, y: G_Y, z: FieldElement::ONE }; impl ConstantTimeEq for Point { fn ct_eq(&self, other: &Self) -> Choice { let x1 = self.x * other.z; let x2 = other.x * self.z; let y1 = self.y * other.z; let y2 = other.y * self.z; x1.ct_eq(&x2) & y1.ct_eq(&y2) } } impl PartialEq for Point { fn eq(&self, other: &Point) -> bool { self.ct_eq(other).into() } } impl Eq for Point {} impl ConditionallySelectable for Point { fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { Point { x: FieldElement::conditional_select(&a.x, &b.x, choice), y: FieldElement::conditional_select(&a.y, &b.y, choice), z: FieldElement::conditional_select(&a.z, &b.z, choice), } } } impl Add for Point { type Output = Point; fn add(self, other: Self) -> Self { // 12 muls, 7 additions, 4 negations let xcp = self.x * other.x; let ycp = self.y * other.y; let zcp = self.z * other.z; #[allow(non_snake_case)] let B = zcp.square(); #[allow(non_snake_case)] let E = D * xcp * ycp; #[allow(non_snake_case)] let F = B - E; #[allow(non_snake_case)] let G_ = B + E; Point { x: zcp * F * ((self.x + self.y) * (other.x + other.y) - xcp - ycp), y: zcp * G_ * (ycp - xcp), z: F * G_, } } } impl AddAssign for Point { fn add_assign(&mut self, other: Point) { *self = *self + other; } } impl Add<&Point> for Point { type Output = Point; fn add(self, other: &Point) -> Point { self + *other } } impl AddAssign<&Point> for Point { fn add_assign(&mut self, other: &Point) { *self += *other; } } impl Neg for Point { type Output = Point; fn neg(self) -> Self { Point { x: -self.x, y: self.y, z: self.z } } } impl Sub for Point { type Output = Point; #[allow(clippy::suspicious_arithmetic_impl)] fn sub(self, other: Self) -> Self { self + other.neg() } } impl SubAssign for Point { fn sub_assign(&mut self, other: Point) { *self = *self - other; } } impl Sub<&Point> for Point { type Output = Point; fn sub(self, other: &Point) -> Point { self - *other } } impl SubAssign<&Point> for Point { fn sub_assign(&mut self, other: &Point) { *self -= *other; } } impl Group for Point { type Scalar = Scalar; fn random(mut rng: impl RngCore) -> Self { loop { let mut bytes = FieldElement::random(&mut rng).to_repr(); let mut_ref: &mut [u8] = bytes.as_mut(); mut_ref[56] |= u8::try_from(rng.next_u32() % 2).unwrap() << 7; let opt = Self::from_bytes(&bytes); if opt.is_some().into() { return opt.unwrap(); } } } fn identity() -> Self { Point { x: FieldElement::ZERO, y: FieldElement::ONE, z: FieldElement::ONE } } fn generator() -> Self { G } fn is_identity(&self) -> Choice { self.ct_eq(&Self::identity()) } fn double(&self) -> Self { // 7 muls, 7 additions, 4 negations let xsq = self.x.square(); let ysq = self.y.square(); let zsq = self.z.square(); let xy = self.x + self.y; #[allow(non_snake_case)] let F = xsq + ysq; #[allow(non_snake_case)] let J = F - zsq.double(); Point { x: J * (xy.square() - xsq - ysq), y: F * (xsq - ysq), z: F * J } } } impl Sum for Point { fn sum>(iter: I) -> Point { let mut res = Self::identity(); for i in iter { res += i; } res } } impl<'a> Sum<&'a Point> for Point { fn sum>(iter: I) -> Point { Point::sum(iter.copied()) } } impl Mul for Point { type Output = Point; fn mul(self, mut other: Scalar) -> Point { // Precompute the optimal amount that's a multiple of 2 let mut table = [Point::identity(); 16]; table[1] = self; for i in 2 .. 16 { table[i] = table[i - 1] + self; } let mut res = Self::identity(); let mut bits = 0; for (i, mut bit) in other.to_le_bits().iter_mut().rev().enumerate() { bits <<= 1; let mut bit = u8_from_bool(&mut bit); bits |= bit; bit.zeroize(); if ((i + 1) % 4) == 0 { if i != 3 { for _ in 0 .. 4 { res = res.double(); } } let mut add_by = Point::identity(); #[allow(clippy::needless_range_loop)] for i in 0 .. 16 { #[allow(clippy::cast_possible_truncation)] // Safe since 0 .. 16 { add_by = <_>::conditional_select(&add_by, &table[i], bits.ct_eq(&(i as u8))); } } res += add_by; bits = 0; } } other.zeroize(); res } } impl MulAssign for Point { fn mul_assign(&mut self, other: Scalar) { *self = *self * other; } } impl Mul<&Scalar> for Point { type Output = Point; fn mul(self, other: &Scalar) -> Point { self * *other } } impl MulAssign<&Scalar> for Point { fn mul_assign(&mut self, other: &Scalar) { *self *= *other; } } impl Point { fn is_torsion_free(&self) -> Choice { ((*self * (Scalar::ZERO - Scalar::ONE)) + self).is_identity() } } impl GroupEncoding for Point { type Repr = ::Repr; fn from_bytes(bytes: &Self::Repr) -> CtOption { // Extract and clear the sign bit let sign = Choice::from(bytes[56] >> 7); let mut bytes = *bytes; let mut_ref: &mut [u8] = bytes.as_mut(); mut_ref[56] &= !(1 << 7); // Parse y, recover x FieldElement::from_repr(bytes).and_then(|y| { recover_x(y).and_then(|mut x| { x.conditional_negate(x.is_odd().ct_eq(&!sign)); let not_negative_zero = !(x.is_zero() & sign); let point = Point { x, y, z: FieldElement::ONE }; CtOption::new(point, not_negative_zero & point.is_torsion_free()) }) }) } fn from_bytes_unchecked(bytes: &Self::Repr) -> CtOption { Point::from_bytes(bytes) } fn to_bytes(&self) -> Self::Repr { let z = self.z.invert().unwrap(); let x = self.x * z; let y = self.y * z; let mut bytes = y.to_repr(); let mut_ref: &mut [u8] = bytes.as_mut(); mut_ref[56] |= x.is_odd().unwrap_u8() << 7; bytes } } impl PrimeGroup for Point {} #[test] fn test_group() { ff_group_tests::group::test_prime_group_bits::<_, Point>(&mut rand_core::OsRng); } #[test] fn generator() { assert!(G.x == G_X); assert!(G.y == G_Y); assert!(recover_x(G.y).unwrap() == G.x); } #[test] fn torsion() { use generic_array::GenericArray; // Uses the originally suggested generator which had torsion let old_y = FieldElement::from_repr(*GenericArray::from_slice( &hex::decode( "\ 12796c1532041525945f322e414d434467cfd5c57c9a9af2473b2775\ 8c921c4828b277ca5f2891fc4f3d79afdf29a64c72fb28b59c16fa51\ 00", ) .unwrap(), )) .unwrap(); let old = Point { x: -recover_x(old_y).unwrap(), y: old_y, z: FieldElement::ONE }; assert!(bool::from(!old.is_torsion_free())); } #[test] fn vector() { use generic_array::GenericArray; assert_eq!( Point::generator().double(), Point::from_bytes(GenericArray::from_slice( &hex::decode( "\ ed8693eacdfbeada6ba0cdd1beb2bcbb98302a3a8365650db8c4d88a\ 726de3b7d74d8835a0d76e03b0c2865020d659b38d04d74a63e905ae\ 80" ) .unwrap() )) .unwrap() ); assert_eq!( Point::generator() * Scalar::from_repr(*GenericArray::from_slice( &hex::decode( "\ 6298e1eef3c379392caaed061ed8a31033c9e9e3420726f23b404158\ a401cd9df24632adfe6b418dc942d8a091817dd8bd70e1c72ba52f3c\ 00" ) .unwrap() )) .unwrap(), Point::from_bytes(GenericArray::from_slice( &hex::decode( "\ 3832f82fda00ff5365b0376df705675b63d2a93c24c6e81d40801ba2\ 65632be10f443f95968fadb70d10786827f30dc001c8d0f9b7c1d1b0\ 00" ) .unwrap() )) .unwrap() ); } // Checks random won't infinitely loop #[test] fn random() { Point::random(&mut rand_core::OsRng); } ================================================ FILE: crypto/ed448/src/scalar.rs ================================================ use zeroize::{DefaultIsZeroes, Zeroize}; use crypto_bigint::{ U448, U896, U1024, modular::constant_mod::{ResidueParams, Residue}, }; const MODULUS_STR: &str = concat!( "3fffffffffffffffffffffffffffffffffffffffffffffffffffffff", "7cca23e9c44edb49aed63690216cc2728dc58f552378c292ab5844f3", ); impl_modulus!(ScalarModulus, U448, MODULUS_STR); type ResidueType = Residue; /// Ed448 Scalar field element. #[derive(Clone, Copy, PartialEq, Eq, Default, Debug)] pub struct Scalar(pub(crate) ResidueType); impl DefaultIsZeroes for Scalar {} // 2**446 - 13818066809895115352007386748515426880336692474882178609894547503885 pub(crate) const MODULUS: U448 = U448::from_be_hex(MODULUS_STR); const WIDE_MODULUS: U896 = U896::from_be_hex(concat!( "00000000000000000000000000000000000000000000000000000000", "00000000000000000000000000000000000000000000000000000000", "3fffffffffffffffffffffffffffffffffffffffffffffffffffffff", "7cca23e9c44edb49aed63690216cc2728dc58f552378c292ab5844f3", )); const WIDE_REDUCTION_MODULUS: NonZero = NonZero::from_uint(U1024::from_be_hex(concat!( "00000000000000000000000000000000", "00000000000000000000000000000000000000000000000000000000", "00000000000000000000000000000000000000000000000000000000", "3fffffffffffffffffffffffffffffffffffffffffffffffffffffff", "7cca23e9c44edb49aed63690216cc2728dc58f552378c292ab5844f3", ))); field!( Scalar, ResidueType, MODULUS_STR, MODULUS, WIDE_MODULUS, 446, 2, concat!( "04000000000000000000000000000000000000000000000000000000", "00000000000000000000000000000000000000000000000000000000", ), ); impl Scalar { /// Perform a wide reduction to obtain a non-biased Scalar. pub fn wide_reduce(bytes: [u8; 114]) -> Scalar { let mut bytes_128 = [0; 128]; bytes_128[.. 114].copy_from_slice(&bytes); let wide = U1024::from_le_slice(&bytes_128); Scalar(Residue::new(&U448::from_le_slice( &wide.rem(&WIDE_REDUCTION_MODULUS).to_le_bytes()[.. 56], ))) } } #[test] fn test_scalar() { ff_group_tests::prime_field::test_prime_field_bits::<_, Scalar>(&mut rand_core::OsRng); } ================================================ FILE: crypto/ff-group-tests/Cargo.toml ================================================ [package] name = "ff-group-tests" version = "0.13.2" description = "A collection of sanity tests for implementors of ff/group APIs" license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/crypto/ff-group-tests" authors = ["Luke Parker "] keywords = ["ff", "group", "ecc"] edition = "2021" rust-version = "1.79" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] rand_core = "0.6" subtle = "^2.4" ff = { version = "0.13", features = ["bits"] } group = "0.13" [dev-dependencies] k256 = { version = "^0.13.1", default-features = false, features = ["std", "arithmetic", "bits"] } p256 = { version = "^0.13.1", default-features = false, features = ["std", "arithmetic", "bits"] } bls12_381 = "0.8" pasta_curves = "0.5" ================================================ FILE: crypto/ff-group-tests/LICENSE ================================================ MIT License Copyright (c) 2022-2023 Luke Parker Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: crypto/ff-group-tests/README.md ================================================ # FF/Group Tests A series of sanity checks for implementors of the ff/group APIs. Implementors are assumed to be of a non-trivial size. These tests do not attempt to check if constant time implementations are used. This library was [audited by Cypher Stack in March 2023](https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf), culminating in commit [669d2dbffc1dafb82a09d9419ea182667115df06](https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06). Any subsequent changes have not undergone auditing. ================================================ FILE: crypto/ff-group-tests/src/field.rs ================================================ use rand_core::RngCore; use subtle::Choice; use group::ff::Field; /// Perform basic tests on equality. pub fn test_eq() { let zero = F::ZERO; let one = F::ONE; assert!(zero != one, "0 == 1"); assert!(!bool::from(zero.ct_eq(&one)), "0 ct_eq 1"); assert_eq!(zero, F::ZERO, "0 != 0"); assert!(bool::from(zero.ct_eq(&F::ZERO)), "0 !ct_eq 0"); assert_eq!(one, F::ONE, "1 != 1"); assert!(bool::from(one.ct_eq(&F::ONE)), "1 !ct_eq 1"); } /// Verify conditional selection works. Doesn't verify it's actually constant time. pub fn test_conditional_select() { let zero = F::ZERO; let one = F::ONE; assert_eq!(F::conditional_select(&zero, &one, 0.into()), zero, "couldn't select when false"); assert_eq!(F::conditional_select(&zero, &one, 1.into()), one, "couldn't select when true"); } /// Perform basic tests on addition. pub fn test_add() { assert_eq!(F::ZERO + F::ZERO, F::ZERO, "0 + 0 != 0"); assert_eq!(F::ZERO + F::ONE, F::ONE, "0 + 1 != 1"); assert_eq!(F::ONE + F::ZERO, F::ONE, "1 + 0 != 1"); // Only PrimeField offers From // Accordingly, we assume either double or addition is correct // They either have to be matchingly correct or matchingly incorrect, yet we can't // reliably determine that here assert_eq!(F::ONE + F::ONE, F::ONE.double(), "1 + 1 != 2"); } /// Perform basic tests on sum. pub fn test_sum() { assert_eq!((&[] as &[F]).iter().sum::(), F::ZERO, "[].sum() != 0"); assert_eq!([F::ZERO].iter().sum::(), F::ZERO, "[0].sum() != 0"); assert_eq!([F::ONE].iter().sum::(), F::ONE, "[1].sum() != 1"); let two = F::ONE + F::ONE; assert_eq!([F::ONE, F::ONE].iter().sum::(), two, "[1, 1].sum() != 2"); assert_eq!([two, F::ONE].iter().sum::(), two + F::ONE, "[2, 1].sum() != 3"); assert_eq!([two, F::ZERO, F::ONE].iter().sum::(), two + F::ONE, "[2, 0, 1].sum() != 3"); } /// Perform basic tests on subtraction. pub fn test_sub() { #[allow(clippy::eq_op)] let expr = F::ZERO - F::ZERO; assert_eq!(expr, F::ZERO, "0 - 0 != 0"); assert_eq!(F::ONE - F::ZERO, F::ONE, "1 - 0 != 1"); #[allow(clippy::eq_op)] let expr = F::ONE - F::ONE; assert_eq!(expr, F::ZERO, "1 - 1 != 0"); } /// Perform basic tests on negation. pub fn test_neg() { assert_eq!(-F::ZERO, F::ZERO, "-0 != 0"); assert_eq!(-(-F::ONE), F::ONE, "-(-1) != 1"); assert_eq!(F::ONE + (-F::ONE), F::ZERO, "1 + -1 != 0"); assert_eq!(F::ONE - (-F::ONE), F::ONE.double(), "1 - -1 != 2"); } /// Perform basic tests on multiplication. pub fn test_mul() { assert_eq!(F::ZERO * F::ZERO, F::ZERO, "0 * 0 != 0"); assert_eq!(F::ONE * F::ZERO, F::ZERO, "1 * 0 != 0"); assert_eq!(F::ONE * F::ONE, F::ONE, "1 * 1 != 1"); let two = F::ONE.double(); assert_eq!(two * (two + F::ONE), two + two + two, "2 * 3 != 6"); } /// Perform basic tests on product. pub fn test_product() { assert_eq!((&[] as &[F]).iter().product::(), F::ONE, "[].product() != 1"); assert_eq!([F::ZERO].iter().product::(), F::ZERO, "[0].product() != 0"); assert_eq!([F::ONE].iter().product::(), F::ONE, "[1].product() != 1"); assert_eq!([F::ONE, F::ONE].iter().product::(), F::ONE, "[1, 1].product() != 2"); let two = F::ONE + F::ONE; assert_eq!([two, F::ONE].iter().product::(), two, "[2, 1].product() != 2"); assert_eq!([two, two].iter().product::(), two + two, "[2, 2].product() != 4"); assert_eq!([two, two, F::ONE].iter().product::(), two + two, "[2, 2, 1].product() != 4"); assert_eq!([two, F::ZERO, F::ONE].iter().product::(), F::ZERO, "[2, 0, 1].product() != 0"); } /// Perform basic tests on the square function. pub fn test_square() { assert_eq!(F::ZERO.square(), F::ZERO, "0^2 != 0"); assert_eq!(F::ONE.square(), F::ONE, "1^2 != 1"); let two = F::ONE.double(); assert_eq!(two.square(), two + two, "2^2 != 4"); let three = two + F::ONE; assert_eq!(three.square(), three * three, "3^2 != 9"); } /// Perform basic tests on the invert function. pub fn test_invert() { assert!(bool::from(F::ZERO.invert().is_none()), "0.invert() is some"); assert_eq!(F::ONE.invert().unwrap(), F::ONE, "1.invert() != 1"); let two = F::ONE.double(); let three = two + F::ONE; assert_eq!(two * three.invert().unwrap() * three, two, "2 * 3.invert() * 3 != 2"); } /// Perform basic tests on the sqrt functions. pub fn test_sqrt() { assert_eq!(F::ZERO.sqrt().unwrap(), F::ZERO, "sqrt(0) != 0"); assert!( (F::ONE.sqrt().unwrap() == F::ONE) || (F::ONE.sqrt().unwrap() == -F::ONE), "sqrt(1) != 1" ); let mut has_root = F::ONE.double(); while bool::from(has_root.sqrt().is_none()) { has_root += F::ONE; } // The following code doesn't assume which root is returned, yet it does assume a consistent root // is returned let root = has_root.sqrt().unwrap(); assert_eq!(root * root, has_root, "sqrt(x)^2 != x"); let check = |value: (_, _), expected: (_, F), msg| { assert_eq!(bool::from(value.0), bool::from(expected.0), "{msg}"); assert!((value.1 == expected.1) || (value.1 == -expected.1), "{msg}"); }; check( F::sqrt_ratio(&has_root, &F::ONE), (Choice::from(1), root), "sqrt_ratio didn't return the root with a divisor of 1", ); check( F::sqrt_ratio(&(has_root * F::ONE.double()), &F::ONE.double()), (Choice::from(1), root), "sqrt_ratio didn't return the root with a divisor of 2", ); check(F::sqrt_alt(&F::ZERO), F::sqrt_ratio(&F::ZERO, &F::ONE), "sqrt_alt(0) != sqrt_ratio(0, 1)"); check(F::sqrt_alt(&F::ONE), F::sqrt_ratio(&F::ONE, &F::ONE), "sqrt_alt(1) != sqrt_ratio(1, 1)"); check(F::sqrt_alt(&has_root), (Choice::from(1), root), "sqrt_alt(square) != (1, root)"); // Check 0 divisors are properly implemented check( F::sqrt_ratio(&has_root, &F::ZERO), (Choice::from(0), F::ZERO), "sqrt_ratio didn't return the right value for a 0 divisor", ); // Check non-squares are appropriately marked let mut no_root = has_root + F::ONE; while bool::from(no_root.sqrt().is_some()) { no_root += F::ONE; } assert!( !bool::from(F::sqrt_ratio(&no_root, &F::ONE).0), "sqrt_ratio claimed non-square had root" ); assert!(!bool::from(F::sqrt_alt(&no_root).0), "sqrt_alt claimed non-square had root"); } /// Perform basic tests on the is_zero functions. pub fn test_is_zero() { assert!(bool::from(F::ZERO.is_zero()), "0 is not 0"); assert!(F::ZERO.is_zero_vartime(), "0 is not 0"); } /// Perform basic tests on the cube function. pub fn test_cube() { assert_eq!(F::ZERO.cube(), F::ZERO, "0^3 != 0"); assert_eq!(F::ONE.cube(), F::ONE, "1^3 != 1"); let two = F::ONE.double(); assert_eq!(two.cube(), two * two * two, "2^3 != 8"); } /// Test random. pub fn test_random(rng: &mut R) { let a = F::random(&mut *rng); // Run up to 128 times so small fields, which may occasionally return the same element twice, // are statistically unlikely to fail // Field of order 1 will always fail this test due to lack of distinct elements to sample // from let mut pass = false; for _ in 0 .. 128 { let b = F::random(&mut *rng); // This test passes if a distinct element is returned at least once if b != a { pass = true; } } assert!(pass, "random always returned the same value"); } /// Run all tests on fields implementing Field. pub fn test_field(rng: &mut R) { test_eq::(); test_conditional_select::(); test_add::(); test_sum::(); test_sub::(); test_neg::(); test_mul::(); test_product::(); test_square::(); test_invert::(); test_sqrt::(); test_is_zero::(); test_cube::(); test_random::(rng); } ================================================ FILE: crypto/ff-group-tests/src/group.rs ================================================ use rand_core::RngCore; use group::{ ff::{Field, PrimeFieldBits}, Group, prime::PrimeGroup, }; use crate::prime_field::{test_prime_field, test_prime_field_bits}; /// Test equality. pub fn test_eq() { assert_eq!(G::identity(), G::identity(), "identity != identity"); assert_eq!(G::generator(), G::generator(), "generator != generator"); assert!(G::identity() != G::generator(), "identity == generator"); } /// Test identity. pub fn test_identity() { assert!(bool::from(G::identity().is_identity()), "identity wasn't identity"); assert!( bool::from((G::identity() + G::identity()).is_identity()), "identity + identity wasn't identity" ); assert!( bool::from((G::generator() - G::generator()).is_identity()), "generator - generator wasn't identity" ); assert!(!bool::from(G::generator().is_identity()), "is_identity claimed generator was identity"); } /// Sanity check the generator. pub fn test_generator() { assert!(G::generator() != G::identity(), "generator was identity"); assert!( (G::generator() + G::generator()) != G::generator(), "generator added to itself was identity" ); } /// Test doubling of group elements. pub fn test_double() { assert!(bool::from(G::identity().double().is_identity()), "identity.double() wasn't identity"); assert_eq!( G::generator() + G::generator(), G::generator().double(), "generator + generator != generator.double()" ); } /// Test addition. pub fn test_add() { assert_eq!(G::identity() + G::identity(), G::identity(), "identity + identity != identity"); assert_eq!(G::identity() + G::generator(), G::generator(), "identity + generator != generator"); assert_eq!(G::generator() + G::identity(), G::generator(), "generator + identity != generator"); let two = G::generator().double(); assert_eq!(G::generator() + G::generator(), two, "generator + generator != two"); let four = two.double(); assert_eq!( G::generator() + G::generator() + G::generator() + G::generator(), four, "generator + generator + generator + generator != four" ); } /// Test summation. pub fn test_sum() { assert_eq!( [G::generator(), G::generator()].iter().sum::(), G::generator().double(), "[generator, generator].sum() != two" ); assert_eq!( [G::generator().double(), G::generator()].iter().sum::(), G::generator().double() + G::generator(), "[generator.double(), generator].sum() != three" ); } /// Test negation. pub fn test_neg() { assert_eq!(G::identity(), G::identity().neg(), "identity != -identity"); assert_eq!( G::generator() + G::generator().neg(), G::identity(), "generator + -generator != identity" ); } /// Test subtraction. pub fn test_sub() { assert_eq!(G::generator() - G::generator(), G::identity(), "generator - generator != identity"); let two = G::generator() + G::generator(); assert_eq!(two - G::generator(), G::generator(), "two - one != one"); } /// Test scalar multiplication pub fn test_mul() { assert_eq!(G::generator() * G::Scalar::from(0), G::identity(), "generator * 0 != identity"); assert_eq!(G::generator() * G::Scalar::from(1), G::generator(), "generator * 1 != generator"); assert_eq!( G::generator() * G::Scalar::from(2), G::generator() + G::generator(), "generator * 2 != generator + generator" ); assert_eq!(G::identity() * G::Scalar::from(2), G::identity(), "identity * 2 != identity"); } /// Test `((order - 1) * G) + G == identity`. pub fn test_order() { let minus_one = G::generator() * (G::Scalar::ZERO - G::Scalar::ONE); assert!(minus_one != G::identity(), "(modulus - 1) * G was identity"); assert_eq!(minus_one + G::generator(), G::identity(), "((modulus - 1) * G) + G wasn't identity"); } /// Test random. pub fn test_random(rng: &mut R) { let a = G::random(&mut *rng); assert!(!bool::from(a.is_identity()), "random returned identity"); // Run up to 128 times so small groups, which may occasionally return the same element twice, // are statistically unlikely to fail // Groups of order <= 2 will always fail this test due to lack of distinct elements to sample // from let mut pass = false; for _ in 0 .. 128 { let b = G::random(&mut *rng); assert!(!bool::from(b.is_identity()), "random returned identity"); // This test passes if a distinct element is returned at least once if b != a { pass = true; } } assert!(pass, "random always returned the same value"); } /// Run all tests on groups implementing Group. pub fn test_group(rng: &mut R) { test_prime_field::(rng); test_eq::(); test_identity::(); test_generator::(); test_double::(); test_add::(); test_sum::(); test_neg::(); test_sub::(); test_mul::(); test_order::(); test_random::(rng); } /// Test encoding and decoding of group elements. pub fn test_encoding() { let test = |point: G, msg| -> G { let bytes = point.to_bytes(); let mut repr = G::Repr::default(); repr.as_mut().copy_from_slice(bytes.as_ref()); let decoded = G::from_bytes(&repr).unwrap(); assert_eq!(point, decoded, "{msg} couldn't be encoded and decoded"); assert_eq!( point, G::from_bytes_unchecked(&repr).unwrap(), "{msg} couldn't be encoded and decoded", ); decoded }; assert!(bool::from(test(G::identity(), "identity").is_identity())); test(G::generator(), "generator"); test(G::generator() + G::generator(), "(generator * 2)"); } /// Run all tests on groups implementing PrimeGroup (Group + GroupEncoding). pub fn test_prime_group(rng: &mut R) { test_group::(rng); test_encoding::(); } /// Run all tests offered by this crate on the group. pub fn test_prime_group_bits>(rng: &mut R) { test_prime_field_bits::(rng); test_prime_group::(rng); } // Run these tests against k256/p256 // This ensures that these tests are well formed and won't error for valid implementations, // assuming the validity of k256/p256 // While k256 and p256 may be malformed in a way which coincides with a faulty test, this is // considered unlikely // The other option, not running against any libraries, would leave faulty tests completely // undetected #[test] fn test_k256() { test_prime_group_bits::<_, k256::ProjectivePoint>(&mut rand_core::OsRng); } #[test] fn test_p256() { test_prime_group_bits::<_, p256::ProjectivePoint>(&mut rand_core::OsRng); } #[test] fn test_bls12_381() { test_prime_group_bits::<_, bls12_381::G1Projective>(&mut rand_core::OsRng); test_prime_group_bits::<_, bls12_381::G2Projective>(&mut rand_core::OsRng); } #[test] fn test_pallas_vesta() { test_prime_group_bits::<_, pasta_curves::pallas::Point>(&mut rand_core::OsRng); test_prime_group_bits::<_, pasta_curves::vesta::Point>(&mut rand_core::OsRng); } ================================================ FILE: crypto/ff-group-tests/src/lib.rs ================================================ #![cfg_attr(docsrs, feature(doc_cfg))] #![doc = include_str!("../README.md")] /// Tests for the Field trait. pub mod field; /// Tests for the PrimeField and PrimeFieldBits traits. pub mod prime_field; /// Tests for the Group and GroupEncoding traits. pub mod group; ================================================ FILE: crypto/ff-group-tests/src/prime_field.rs ================================================ use rand_core::RngCore; use group::ff::{PrimeField, PrimeFieldBits}; use crate::field::test_field; // Ideally, this and test_one would be under Field, yet these tests require access to From /// Test zero returns F::from(0). pub fn test_zero() { assert_eq!(F::ZERO, F::from(0u64), "0 != 0"); } /// Test one returns F::from(1). pub fn test_one() { assert_eq!(F::ONE, F::from(1u64), "1 != 1"); } /// Test `From` for F works. pub fn test_from_u64() { assert_eq!(F::ZERO, F::from(0u64), "0 != 0u64"); assert_eq!(F::ONE, F::from(1u64), "1 != 1u64"); assert_eq!(F::ONE.double(), F::from(2u64), "2 != 2u64"); assert_eq!(F::ONE.double() + F::ONE, F::from(3u64), "3 != 3u64"); } /// Test from_u128 for F works. pub fn test_from_u128() { assert_eq!(F::ZERO, F::from_u128(0u128), "0 != 0u128"); assert_eq!(F::ONE, F::from_u128(1u128), "1 != 1u128"); assert_eq!(F::from(2u64), F::from_u128(2u128), "2u64 != 2u128"); assert_eq!(F::from(3u64), F::from_u128(3u128), "3u64 != 3u128"); } /// Test is_odd/is_even works. /// /// This test assumes an odd modulus with oddness being determined by the least-significant bit. /// Accordingly, this test doesn't support fields alternatively defined. /// TODO: Improve in the future. pub fn test_is_odd() { assert_eq!(F::ZERO.is_odd().unwrap_u8(), 0, "0 was odd"); assert_eq!(F::ZERO.is_even().unwrap_u8(), 1, "0 wasn't even"); assert_eq!(F::ONE.is_odd().unwrap_u8(), 1, "1 was even"); assert_eq!(F::ONE.is_even().unwrap_u8(), 0, "1 wasn't odd"); // Make sure an odd value added to an odd value is even let two = F::ONE.double(); assert_eq!(two.is_odd().unwrap_u8(), 0, "2 was odd"); assert_eq!(two.is_even().unwrap_u8(), 1, "2 wasn't even"); // Make sure an even value added to an even value is even let four = two.double(); assert_eq!(four.is_odd().unwrap_u8(), 0, "4 was odd"); assert_eq!(four.is_even().unwrap_u8(), 1, "4 wasn't even"); let neg_one = -F::ONE; assert_eq!(neg_one.is_odd().unwrap_u8(), 0, "-1 was odd"); assert_eq!(neg_one.is_even().unwrap_u8(), 1, "-1 wasn't even"); assert_eq!(neg_one.double().is_odd().unwrap_u8(), 1, "(-1).double() was even"); assert_eq!(neg_one.double().is_even().unwrap_u8(), 0, "(-1).double() wasn't odd"); } /// Test encoding and decoding of field elements. pub fn test_encoding() { let test = |scalar: F, msg| { let bytes = scalar.to_repr(); let mut repr = F::Repr::default(); repr.as_mut().copy_from_slice(bytes.as_ref()); assert_eq!(scalar, F::from_repr(repr).unwrap(), "{msg} couldn't be encoded and decoded"); assert_eq!( scalar, F::from_repr_vartime(repr).unwrap(), "{msg} couldn't be encoded and decoded", ); assert_eq!( bytes.as_ref(), F::from_repr(repr).unwrap().to_repr().as_ref(), "canonical encoding decoded produced distinct encoding" ); }; test(F::ZERO, "0"); test(F::ONE, "1"); test(F::ONE + F::ONE, "2"); test(-F::ONE, "-1"); // Also check if a non-canonical encoding is possible let mut high = (F::ZERO - F::ONE).to_repr(); let mut possible_non_canon = false; for byte in high.as_mut() { // The fact a bit isn't set in the highest possible value suggests there's unused bits // If there's unused bits, mark the possibility of a non-canonical encoding and set the bits if *byte != 255 { possible_non_canon = true; *byte = 255; break; } } // Any non-canonical encoding should fail to be read if possible_non_canon { assert!(!bool::from(F::from_repr(high).is_some())); } } /// Run all tests on fields implementing PrimeField. pub fn test_prime_field(rng: &mut R) { test_field::(rng); test_zero::(); test_one::(); test_from_u64::(); test_from_u128::(); test_is_odd::(); // Do a sanity check on the CAPACITY. A full test can't be done at this time assert!(F::CAPACITY <= F::NUM_BITS, "capacity exceeded number of bits"); test_encoding::(); } /// Test to_le_bits returns the little-endian bits of a value. // This test assumes that the modulus is at least 4. pub fn test_to_le_bits() { { let bits = F::ZERO.to_le_bits(); assert_eq!(bits.iter().filter(|bit| **bit).count(), 0, "0 had bits set"); } { let bits = F::ONE.to_le_bits(); assert!(bits[0], "1 didn't have its least significant bit set"); assert_eq!(bits.iter().filter(|bit| **bit).count(), 1, "1 had multiple bits set"); } { let bits = F::from(2).to_le_bits(); assert!(bits[1], "2 didn't have its second bit set"); assert_eq!(bits.iter().filter(|bit| **bit).count(), 1, "2 had multiple bits set"); } { let bits = F::from(3).to_le_bits(); assert!(bits[0], "3 didn't have its first bit set"); assert!(bits[1], "3 didn't have its second bit set"); assert_eq!(bits.iter().filter(|bit| **bit).count(), 2, "2 didn't have two bits set"); } } /// Test char_le_bits returns the bits of the modulus. pub fn test_char_le_bits() { // A field with a modulus of 0 may be technically valid? Yet these tests assume some basic // functioning. assert!(F::char_le_bits().iter().any(|bit| *bit), "char_le_bits contained 0"); // Test this is the bit pattern of the modulus by reconstructing the modulus from it let mut bit = F::ONE; let mut modulus = F::ZERO; for set in F::char_le_bits() { if set { modulus += bit; } bit = bit.double(); } assert_eq!(modulus, F::ZERO, "char_le_bits did not contain the field's modulus"); } /// Test NUM_BITS is accurate. pub fn test_num_bits() { let mut val = F::ONE; let mut bit = 0; while ((bit + 1) < val.to_le_bits().len()) && val.double().to_le_bits()[bit + 1] { val = val.double(); bit += 1; } assert_eq!( F::NUM_BITS, u32::try_from(bit + 1).unwrap(), "NUM_BITS was incorrect. it should be {}", bit + 1 ); } /// Test CAPACITY is accurate. pub fn test_capacity() { assert!(F::CAPACITY <= F::NUM_BITS, "capacity exceeded number of bits"); let mut val = F::ONE; assert!(val.to_le_bits()[0], "1 didn't have its least significant bit set"); for b in 1 .. F::CAPACITY { val = val.double(); val += F::ONE; for i in 0 ..= b { assert!( val.to_le_bits()[usize::try_from(i).unwrap()], "couldn't set a bit within the capacity", ); } } // If the field has a modulus which is a power of 2, NUM_BITS should equal CAPACITY // Adding one would also be sufficient to trigger an overflow if F::char_le_bits().iter().filter(|bit| **bit).count() == 1 { assert_eq!( F::NUM_BITS, F::CAPACITY, "field has a power of two modulus yet CAPACITY doesn't equal NUM_BITS", ); assert_eq!(val + F::ONE, F::ZERO, "CAPACITY set bits, + 1, != zero for a binary field"); return; } assert_eq!(F::NUM_BITS - 1, F::CAPACITY, "capacity wasn't NUM_BITS - 1"); } fn pow(base: F, exp: F) -> F { let mut res = F::ONE; for bit in exp.to_le_bits().iter().rev() { res *= res; if *bit { res *= base; } } res } // Ideally, this would be under field.rs, yet the above pow function requires PrimeFieldBits /// Perform basic tests on the pow functions, even when passed non-canonical inputs. pub fn test_pow() { // Sanity check the local pow algorithm. Does not have assert messages as these shouldn't fail assert_eq!(pow(F::ONE, F::ZERO), F::ONE); assert_eq!(pow(F::ONE.double(), F::ZERO), F::ONE); assert_eq!(pow(F::ONE, F::ONE), F::ONE); let two = F::ONE.double(); assert_eq!(pow(two, F::ONE), two); assert_eq!(pow(two, two), two.double()); let three = two + F::ONE; assert_eq!(pow(three, F::ONE), three); assert_eq!(pow(three, two), three * three); assert_eq!(pow(three, three), three * three * three); // Choose a small base without a notably uniform bit pattern let bit_0 = F::ONE; let base = { let bit_1 = bit_0.double(); let bit_2 = bit_1.double(); let bit_3 = bit_2.double(); let bit_4 = bit_3.double(); let bit_5 = bit_4.double(); let bit_6 = bit_5.double(); let bit_7 = bit_6.double(); bit_7 + bit_6 + bit_5 + bit_2 + bit_0 }; // Ensure pow/pow_vartime return 1 when the base is raised to 0, handling malleated inputs assert_eq!(base.pow([]), F::ONE, "pow x^0 ([]) != 1"); assert_eq!(base.pow_vartime([]), F::ONE, "pow x^0 ([]) != 1"); assert_eq!(base.pow([0]), F::ONE, "pow_vartime x^0 ([0]) != 1"); assert_eq!(base.pow_vartime([0]), F::ONE, "pow_vartime x^0 ([0]) != 1"); assert_eq!(base.pow([0, 0]), F::ONE, "pow x^0 ([0, 0]) != 1"); assert_eq!(base.pow_vartime([0, 0]), F::ONE, "pow_vartime x^0 ([0, 0]) != 1"); // Ensure pow/pow_vartime return the base when raised to 1, handling malleated inputs assert_eq!(base.pow([1]), base, "pow x^1 ([1]) != x"); assert_eq!(base.pow_vartime([1, 0]), base, "pow_vartime x^1 ([1, 0]) != x"); assert_eq!(base.pow([1]), base, "pow x^1 ([1]) != x"); assert_eq!(base.pow_vartime([1, 0]), base, "pow_vartime x^1 ([1, 0]) != x"); // Ensure pow/pow_vartime can handle multiple u64s properly // Create a scalar which exceeds u64 let mut bit_64 = bit_0; for _ in 0 .. 64 { bit_64 = bit_64.double(); } // Run the tests assert_eq!(base.pow([0, 1]), pow(base, bit_64), "pow x^(2^64) != x^(2^64)"); assert_eq!(base.pow_vartime([0, 1]), pow(base, bit_64), "pow_vartime x^(2^64) != x^(2^64)"); assert_eq!(base.pow([1, 1]), pow(base, bit_64 + F::ONE), "pow x^(2^64 + 1) != x^(2^64 + 1)"); assert_eq!( base.pow_vartime([1, 1]), pow(base, bit_64 + F::ONE), "pow_vartime x^(2^64 + 1) != x^(2^64 + 1)" ); } /// Test the inverted constants are correct. pub fn test_inv_consts() { assert_eq!(F::TWO_INV, F::from(2u64).invert().unwrap(), "F::TWO_INV != 2.invert()"); assert_eq!( F::ROOT_OF_UNITY_INV, F::ROOT_OF_UNITY.invert().unwrap(), "F::ROOT_OF_UNITY_INV != F::ROOT_OF_UNITY.invert()" ); } /// Test S is correct. pub fn test_s() { // "This is the number of leading zero bits in the little-endian bit representation of // `modulus - 1`." let mut s = 0; for b in (F::ZERO - F::ONE).to_le_bits() { if b { break; } s += 1; } assert_eq!(s, F::S, "incorrect S"); } /// Test the root of unity is correct for the provided multiplicative generator. pub fn test_root_of_unity() { // "It can be calculated by exponentiating `Self::multiplicative_generator` by `t`, where // `t = (modulus - 1) >> Self::S`." // Get the bytes to shift let mut bits = (F::ZERO - F::ONE).to_le_bits().iter().map(|bit| *bit).collect::>(); for _ in 0 .. F::S { bits.remove(0); } // Construct t let mut bit = F::ONE; let mut t = F::ZERO; for set in bits { if set { t += bit; } bit = bit.double(); } assert!(bool::from(t.is_odd()), "t wasn't odd"); assert_eq!(pow(F::MULTIPLICATIVE_GENERATOR, t), F::ROOT_OF_UNITY, "incorrect root of unity"); assert_eq!( pow(F::ROOT_OF_UNITY, pow(F::from(2u64), F::from(F::S.into()))), F::ONE, "root of unity raised to 2^S wasn't 1", ); } /// Test DELTA is correct. pub fn test_delta() { assert_eq!( pow(F::MULTIPLICATIVE_GENERATOR, pow(F::from(2u64), F::from(u64::from(F::S)))), F::DELTA, "F::DELTA is incorrect" ); } /// Run all tests on fields implementing PrimeFieldBits. pub fn test_prime_field_bits(rng: &mut R) { test_prime_field::(rng); test_to_le_bits::(); test_char_le_bits::(); test_pow::(); test_inv_consts::(); test_s::(); test_root_of_unity::(); test_delta::(); test_num_bits::(); test_capacity::(); } ================================================ FILE: crypto/frost/Cargo.toml ================================================ [package] name = "modular-frost" version = "0.10.1" description = "Modular implementation of FROST over ff/group" license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/crypto/frost" authors = ["Luke Parker "] keywords = ["frost", "multisig", "threshold"] edition = "2021" rust-version = "1.80" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] thiserror = { version = "2", default-features = false, features = ["std"] } rand_core = { version = "0.6", default-features = false, features = ["std"] } rand_chacha = { version = "0.3", default-features = false, features = ["std"] } zeroize = { version = "^1.5", default-features = false, features = ["std", "zeroize_derive"] } subtle = { version = "^2.4", default-features = false, features = ["std"] } hex = { version = "0.4", default-features = false, features = ["std"], optional = true } digest = { version = "0.10", default-features = false, features = ["std"] } transcript = { package = "flexible-transcript", path = "../transcript", version = "^0.3.2", default-features = false, features = ["std", "recommended"] } dalek-ff-group = { path = "../dalek-ff-group", version = "0.4", default-features = false, features = ["std"], optional = true } minimal-ed448 = { path = "../ed448", version = "0.4", default-features = false, features = ["std"], optional = true } ciphersuite = { path = "../ciphersuite", version = "^0.4.1", default-features = false, features = ["std"] } ciphersuite-kp256 = { path = "../ciphersuite/kp256", version = "0.4", default-features = false, features = ["std"], optional = true } multiexp = { path = "../multiexp", version = "0.4", default-features = false, features = ["std", "batch"] } schnorr = { package = "schnorr-signatures", path = "../schnorr", version = "^0.5.1", default-features = false, features = ["std"] } dkg = { path = "../dkg", version = "0.6.1", default-features = false, features = ["std"] } dkg-recovery = { path = "../dkg/recovery", version = "0.6", default-features = false, features = ["std"], optional = true } dkg-dealer = { path = "../dkg/dealer", version = "0.6", default-features = false, features = ["std"], optional = true } [dev-dependencies] hex = "0.4" serde_json = { version = "1", default-features = false, features = ["std"] } dkg = { path = "../dkg", default-features = false, features = ["std"] } dkg-recovery = { path = "../dkg/recovery", default-features = false, features = ["std"] } dkg-dealer = { path = "../dkg/dealer", default-features = false, features = ["std"] } [features] ed25519 = ["dalek-ff-group"] ristretto = ["dalek-ff-group"] secp256k1 = ["ciphersuite-kp256"] p256 = ["ciphersuite-kp256"] ed448 = ["minimal-ed448"] tests = ["hex", "rand_core/getrandom", "dkg-dealer", "dkg-recovery"] ================================================ FILE: crypto/frost/LICENSE ================================================ MIT License Copyright (c) 2021-2023 Luke Parker Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: crypto/frost/README.md ================================================ # Modular FROST A modular implementation of FROST for any curve with a ff/group API. Additionally, custom algorithms may be specified so any signature reducible to Schnorr-like may be used with FROST. A Schnorr algorithm is provided, of the form (R, s) where `s = r + cx`, which allows specifying the challenge format. This is intended to easily allow integrating with existing systems. This library offers ciphersuites compatible with the [IETF draft](https://github.com/cfrg/draft-irtf-cfrg-frost). Currently, version 15 is supported. A variety of testing utilities are provided under the `tests` feature. These are provided with no guarantees and may have completely arbitrary behavior, including panicking for completely well-reasoned input. This library was [audited by Cypher Stack in March 2023](https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf), culminating in commit [669d2dbffc1dafb82a09d9419ea182667115df06](https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06). Any subsequent changes have not undergone auditing. While this audit included FROST's definition of Ed448, the underlying Ed448 ciphersuite (offered by the ciphersuite crate) was not audited, nor was the minimal-ed448 crate implementing the curve itself. ================================================ FILE: crypto/frost/src/algorithm.rs ================================================ use core::{marker::PhantomData, fmt::Debug}; use std::io::{self, Read, Write}; use zeroize::Zeroizing; use rand_core::{RngCore, CryptoRng}; use transcript::Transcript; use crate::{Participant, ThresholdKeys, ThresholdView, Curve, FrostError}; pub use schnorr::SchnorrSignature; /// Write an addendum to a writer. pub trait WriteAddendum { fn write(&self, writer: &mut W) -> io::Result<()>; } impl WriteAddendum for () { fn write(&self, _: &mut W) -> io::Result<()> { Ok(()) } } /// Trait alias for the requirements to be used as an addendum. pub trait Addendum: Send + Sync + Clone + PartialEq + Debug + WriteAddendum {} impl Addendum for A {} /// Algorithm trait usable by the FROST signing machine to produce signatures.. pub trait Algorithm: Send + Sync { /// The transcript format this algorithm uses. This likely should NOT be the IETF-compatible /// transcript included in this crate. type Transcript: Sync + Clone + Debug + Transcript; /// Serializable addendum, used in algorithms requiring more data than just the nonces. type Addendum: Addendum; /// The resulting type of the signatures this algorithm will produce. type Signature: Clone + PartialEq + Debug; /// Obtain a mutable borrow of the underlying transcript. fn transcript(&mut self) -> &mut Self::Transcript; /// Obtain the list of nonces to generate, as specified by the generators to create commitments /// against per-nonce. /// /// The Algorithm is responsible for all transcripting of these nonce specifications/generators. /// /// The prover will be passed the commitments, and the commitments will be sent to all other /// participants. No guarantees the commitments are internally consistent (have the same discrete /// logarithm across generators) are made. Any Algorithm which specifies multiple generators for /// a single nonce must handle that itself. fn nonces(&self) -> Vec>; /// Generate an addendum to FROST"s preprocessing stage. fn preprocess_addendum( &mut self, rng: &mut R, keys: &ThresholdKeys, ) -> Self::Addendum; /// Read an addendum from a reader. fn read_addendum(&self, reader: &mut R) -> io::Result; /// Process the addendum for the specified participant. Guaranteed to be called in order. fn process_addendum( &mut self, params: &ThresholdView, l: Participant, reader: Self::Addendum, ) -> Result<(), FrostError>; /// Sign a share with the given secret/nonce. /// The secret will already have been its lagrange coefficient applied so it is the necessary /// key share. /// The nonce will already have been processed into the combined form d + (e * p). fn sign_share( &mut self, params: &ThresholdView, nonce_sums: &[Vec], nonces: Vec>, msg: &[u8], ) -> C::F; /// Verify a signature. #[must_use] fn verify(&self, group_key: C::G, nonces: &[Vec], sum: C::F) -> Option; /// Verify a specific share given as a response. /// This function should return a series of pairs whose products should sum to zero for a valid /// share. Any error raised is treated as the share being invalid. #[allow(clippy::type_complexity, clippy::result_unit_err)] fn verify_share( &self, verification_share: C::G, nonces: &[Vec], share: C::F, ) -> Result, ()>; } mod sealed { pub use super::*; /// IETF-compliant transcript. This is incredibly naive and should not be used within larger /// protocols. #[derive(Clone, Debug)] pub struct IetfTranscript(pub(crate) Vec); impl Transcript for IetfTranscript { type Challenge = Vec; fn new(_: &'static [u8]) -> IetfTranscript { IetfTranscript(vec![]) } fn domain_separate(&mut self, _: &[u8]) {} fn append_message>(&mut self, _: &'static [u8], message: M) { self.0.extend(message.as_ref()); } fn challenge(&mut self, _: &'static [u8]) -> Vec { self.0.clone() } // FROST won't use this and this shouldn't be used outside of FROST fn rng_seed(&mut self, _: &'static [u8]) -> [u8; 32] { unimplemented!() } } } pub(crate) use sealed::IetfTranscript; /// HRAm usable by the included Schnorr signature algorithm to generate challenges. pub trait Hram: Send + Sync + Clone { /// HRAm function to generate a challenge. /// H2 from the IETF draft, despite having a different argument set (not being pre-formatted). #[allow(non_snake_case)] fn hram(R: &C::G, A: &C::G, m: &[u8]) -> C::F; } /// Schnorr signature algorithm ((R, s) where s = r + cx). /// /// `verify`, `verify_share` must be called after `sign_share` is called. #[derive(Clone)] pub struct Schnorr> { transcript: T, c: Option, _hram: PhantomData, } /// IETF-compliant Schnorr signature algorithm. /// /// This algorithm specifically uses the transcript format defined in the FROST IETF draft. /// It's a naive transcript format not viable for usage in larger protocols, yet is presented here /// in order to provide compatibility. /// /// Usage of this with key offsets will break the intended compatibility as the IETF draft does not /// specify a protocol for offsets. pub type IetfSchnorr = Schnorr; impl> Schnorr { /// Construct a Schnorr algorithm continuing the specified transcript. pub fn new(transcript: T) -> Schnorr { Schnorr { transcript, c: None, _hram: PhantomData } } } impl> IetfSchnorr { /// Construct a IETF-compatible Schnorr algorithm. /// /// Please see the `IetfSchnorr` documentation for the full details of this. pub fn ietf() -> IetfSchnorr { Schnorr::new(IetfTranscript(vec![])) } } impl> Algorithm for Schnorr { type Transcript = T; type Addendum = (); type Signature = SchnorrSignature; fn transcript(&mut self) -> &mut Self::Transcript { &mut self.transcript } fn nonces(&self) -> Vec> { vec![vec![C::generator()]] } fn preprocess_addendum(&mut self, _: &mut R, _: &ThresholdKeys) {} fn read_addendum(&self, _: &mut R) -> io::Result { Ok(()) } fn process_addendum( &mut self, _: &ThresholdView, _: Participant, (): (), ) -> Result<(), FrostError> { Ok(()) } fn sign_share( &mut self, params: &ThresholdView, nonce_sums: &[Vec], mut nonces: Vec>, msg: &[u8], ) -> C::F { let c = H::hram(&nonce_sums[0][0], ¶ms.group_key(), msg); self.c = Some(c); SchnorrSignature::::sign(params.secret_share(), nonces.swap_remove(0), c).s } #[must_use] fn verify(&self, group_key: C::G, nonces: &[Vec], sum: C::F) -> Option { let sig = SchnorrSignature { R: nonces[0][0], s: sum }; Some(sig).filter(|sig| sig.verify(group_key, self.c.unwrap())) } fn verify_share( &self, verification_share: C::G, nonces: &[Vec], share: C::F, ) -> Result, ()> { Ok( SchnorrSignature:: { R: nonces[0][0], s: share } .batch_statements(verification_share, self.c.unwrap()) .to_vec(), ) } } ================================================ FILE: crypto/frost/src/curve/dalek.rs ================================================ use digest::Digest; use dalek_ff_group::Scalar; use ciphersuite::Ciphersuite; use crate::{curve::Curve, algorithm::Hram}; macro_rules! dalek_curve { ( $feature: literal, $Curve: ident, $Hram: ident, $CONTEXT: literal, $chal: literal ) => { pub use dalek_ff_group::$Curve; impl Curve for $Curve { const CONTEXT: &'static [u8] = $CONTEXT; } /// The challenge function for this ciphersuite. #[derive(Copy, Clone)] pub struct $Hram; impl Hram<$Curve> for $Hram { #[allow(non_snake_case)] fn hram(R: &<$Curve as Ciphersuite>::G, A: &<$Curve as Ciphersuite>::G, m: &[u8]) -> Scalar { let mut hash = <$Curve as Ciphersuite>::H::new(); if $chal.len() != 0 { hash.update(&[$CONTEXT.as_ref(), $chal].concat()); } Scalar::from_hash( hash.chain_update(&[&R.compress().to_bytes(), &A.compress().to_bytes(), m].concat()), ) } } }; } #[cfg(feature = "ristretto")] dalek_curve!("ristretto", Ristretto, IetfRistrettoHram, b"FROST-RISTRETTO255-SHA512-v1", b"chal"); #[cfg(feature = "ed25519")] dalek_curve!("ed25519", Ed25519, IetfEd25519Hram, b"FROST-ED25519-SHA512-v1", b""); ================================================ FILE: crypto/frost/src/curve/ed448.rs ================================================ use digest::Digest; use minimal_ed448::{Scalar, Point}; pub use minimal_ed448::Ed448; pub use ciphersuite::{group::GroupEncoding, Ciphersuite}; use crate::{curve::Curve, algorithm::Hram}; const CONTEXT: &[u8] = b"FROST-ED448-SHAKE256-v1"; impl Curve for Ed448 { const CONTEXT: &'static [u8] = CONTEXT; } // The RFC-8032 Ed448 challenge function. #[derive(Copy, Clone)] pub(crate) struct Ietf8032Ed448Hram; impl Ietf8032Ed448Hram { #[allow(non_snake_case)] pub(crate) fn hram(context: &[u8], R: &Point, A: &Point, m: &[u8]) -> Scalar { Scalar::wide_reduce( ::H::digest( [ &[b"SigEd448".as_ref(), &[0, u8::try_from(context.len()).unwrap()]].concat(), context, &[R.to_bytes().as_ref(), A.to_bytes().as_ref(), m].concat(), ] .concat(), ) .as_ref() .try_into() .unwrap(), ) } } /// The challenge function for FROST's Ed448 ciphersuite. #[derive(Copy, Clone)] pub struct IetfEd448Hram; impl Hram for IetfEd448Hram { #[allow(non_snake_case)] fn hram(R: &Point, A: &Point, m: &[u8]) -> Scalar { Ietf8032Ed448Hram::hram(&[], R, A, m) } } ================================================ FILE: crypto/frost/src/curve/kp256.rs ================================================ use ciphersuite::{group::GroupEncoding, Ciphersuite}; use crate::{curve::Curve, algorithm::Hram}; macro_rules! kp_curve { ( $feature: literal, $Curve: ident, $Hram: ident, $CONTEXT: literal ) => { pub use ciphersuite_kp256::$Curve; impl Curve for $Curve { const CONTEXT: &'static [u8] = $CONTEXT; } /// The challenge function for this ciphersuite. #[derive(Clone)] pub struct $Hram; impl Hram<$Curve> for $Hram { #[allow(non_snake_case)] fn hram( R: &<$Curve as Ciphersuite>::G, A: &<$Curve as Ciphersuite>::G, m: &[u8], ) -> <$Curve as Ciphersuite>::F { <$Curve as Curve>::hash_to_F( b"chal", &[R.to_bytes().as_ref(), A.to_bytes().as_ref(), m].concat(), ) } } }; } #[cfg(feature = "p256")] kp_curve!("p256", P256, IetfP256Hram, b"FROST-P256-SHA256-v1"); #[cfg(feature = "secp256k1")] kp_curve!("secp256k1", Secp256k1, IetfSecp256k1Hram, b"FROST-secp256k1-SHA256-v1"); ================================================ FILE: crypto/frost/src/curve/mod.rs ================================================ use core::ops::Deref; use std::io::{self, Read}; use rand_core::{RngCore, CryptoRng}; use zeroize::{Zeroize, Zeroizing}; use subtle::ConstantTimeEq; use digest::{Digest, Output}; pub use ciphersuite::{ group::{ ff::{Field, PrimeField}, Group, }, Ciphersuite, }; #[cfg(any(feature = "ristretto", feature = "ed25519"))] mod dalek; #[cfg(feature = "ristretto")] pub use dalek::{Ristretto, IetfRistrettoHram}; #[cfg(feature = "ed25519")] pub use dalek::{Ed25519, IetfEd25519Hram}; #[cfg(any(feature = "secp256k1", feature = "p256"))] mod kp256; #[cfg(feature = "secp256k1")] pub use kp256::{Secp256k1, IetfSecp256k1Hram}; #[cfg(feature = "p256")] pub use kp256::{P256, IetfP256Hram}; #[cfg(feature = "ed448")] mod ed448; #[cfg(feature = "ed448")] pub use ed448::{Ed448, IetfEd448Hram}; #[cfg(all(test, feature = "ed448"))] pub(crate) use ed448::Ietf8032Ed448Hram; /// FROST Ciphersuite. /// /// This exclude the signing algorithm specific H2, making this solely the curve, its associated /// hash function, and the functions derived from it. pub trait Curve: Ciphersuite { /// Context string for this curve. const CONTEXT: &'static [u8]; /// Hash the given dst and data to a byte vector. Used to instantiate H4 and H5. fn hash(dst: &[u8], data: &[u8]) -> Output { Self::H::digest([Self::CONTEXT, dst, data].concat()) } /// Field element from hash. Used during key gen and by other crates under Serai as a general /// utility. Used to instantiate H1 and H3. #[allow(non_snake_case)] fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F { ::hash_to_F(&[Self::CONTEXT, dst].concat(), msg) } /// Hash the message for the binding factor. H4 from the IETF draft. fn hash_msg(msg: &[u8]) -> Output { Self::hash(b"msg", msg) } /// Hash the commitments for the binding factor. H5 from the IETF draft. fn hash_commitments(commitments: &[u8]) -> Output { Self::hash(b"com", commitments) } /// Hash the commitments and message to calculate the binding factor. H1 from the IETF draft. // // This may return 0, which is invalid according to the FROST preprint, as all binding factors // are expected to be in the multiplicative subgroup. This isn't a practical issue, as there's a // negligible probability of this returning 0. // // When raised in // https://github.com/cfrg/draft-irtf-cfrg-frost/issues/451#issuecomment-1715985505, // the negligible probbility was seen as sufficient reason not to edit the spec to be robust in // this regard. // // While that decision may be disagreeable, this library cannot implement a robust scheme while // following the specification. Following the specification is preferred to being robust against // an impractical probability enabling a complex attack (made infeasible by the impractical // probability required). // // We could still panic on the 0-hash, preferring correctness to liveliness. Finding the 0-hash // is as computationally complex as simply calculating the group key's discrete log however, // making it not worth having a panic (as this library is expected not to panic). fn hash_binding_factor(binding: &[u8]) -> Self::F { ::hash_to_F(b"rho", binding) } /// Securely generate a random nonce. H3 from the IETF draft. fn random_nonce( secret: &Zeroizing, rng: &mut R, ) -> Zeroizing { let mut seed = Zeroizing::new(vec![0; 32]); rng.fill_bytes(seed.as_mut()); let mut repr = secret.to_repr(); // Perform rejection sampling until we reach a non-zero nonce // While the IETF spec doesn't explicitly require this, generating a zero nonce will produce // commitments which will be rejected for being zero (and if they were used, leak the secret // share) // Rejection sampling here will prevent an honest participant from ever generating 'malicious' // values and ensure safety let mut res; while { seed.extend(repr.as_ref()); res = Zeroizing::new(::hash_to_F(b"nonce", seed.deref())); res.ct_eq(&Self::F::ZERO).into() } { seed = Zeroizing::new(vec![0; 32]); rng.fill_bytes(&mut seed); } repr.as_mut().zeroize(); res } /// Read a point from a reader, rejecting identity. #[allow(non_snake_case)] fn read_G(reader: &mut R) -> io::Result { let res = ::read_G(reader)?; if res.is_identity().into() { Err(io::Error::other("identity point"))?; } Ok(res) } } ================================================ FILE: crypto/frost/src/lib.rs ================================================ #![cfg_attr(docsrs, feature(doc_cfg))] #![doc = include_str!("../README.md")] use core::fmt::Debug; use std::collections::HashMap; use thiserror::Error; /// Distributed key generation protocol. pub use dkg::{self, Participant, ThresholdParams, ThresholdKeys, ThresholdView}; /// Curve trait and provided curves/HRAMs, forming various ciphersuites. pub mod curve; use curve::Curve; /// Algorithm for the signing process. pub mod algorithm; mod nonce; /// Threshold signing protocol. pub mod sign; /// Tests for application-provided curves and algorithms. #[cfg(any(test, feature = "tests"))] pub mod tests; /// Various errors possible during signing. #[derive(Clone, Copy, PartialEq, Eq, Debug, Error)] pub enum FrostError { #[error("internal error: {0}")] InternalError(&'static str), #[error("invalid participant (0 < participant <= {0}, yet participant is {1})")] InvalidParticipant(u16, Participant), #[error("invalid signing set ({0})")] InvalidSigningSet(&'static str), #[error("invalid participant quantity (expected {0}, got {1})")] InvalidParticipantQuantity(usize, usize), #[error("duplicated participant ({0})")] DuplicatedParticipant(Participant), #[error("missing participant {0}")] MissingParticipant(Participant), #[error("invalid preprocess (participant {0})")] InvalidPreprocess(Participant), #[error("invalid share (participant {0})")] InvalidShare(Participant), } /// Validate a map of values to have the expected participants. pub fn validate_map( map: &HashMap, included: &[Participant], ours: Participant, ) -> Result<(), FrostError> { if (map.len() + 1) != included.len() { Err(FrostError::InvalidParticipantQuantity(included.len(), map.len() + 1))?; } for included in included { if *included == ours { if map.contains_key(included) { Err(FrostError::DuplicatedParticipant(*included))?; } continue; } if !map.contains_key(included) { Err(FrostError::MissingParticipant(*included))?; } } Ok(()) } ================================================ FILE: crypto/frost/src/nonce.rs ================================================ // FROST defines its nonce as sum(Di, Ei * bi) // // In order for this library to be robust, it supports generating an arbitrary amount of nonces, // each against an arbitrary list of generators // // Each nonce remains of the form (d, e) and made into a proper nonce with d + (e * b) use core::ops::Deref; use std::{ io::{self, Read, Write}, collections::HashMap, }; use rand_core::{RngCore, CryptoRng}; use zeroize::{Zeroize, Zeroizing}; use transcript::Transcript; use ciphersuite::group::{ff::PrimeField, Group, GroupEncoding}; use multiexp::multiexp_vartime; use crate::{curve::Curve, Participant}; // Each nonce is actually a pair of random scalars, notated as d, e under the FROST paper // This is considered a single nonce as r = d + be #[derive(Clone, Zeroize)] pub(crate) struct Nonce(pub(crate) [Zeroizing; 2]); // Commitments to a specific generator for this binomial nonce #[derive(Copy, Clone, PartialEq, Eq)] pub(crate) struct GeneratorCommitments(pub(crate) [C::G; 2]); impl GeneratorCommitments { fn read(reader: &mut R) -> io::Result> { Ok(GeneratorCommitments([::read_G(reader)?, ::read_G(reader)?])) } fn write(&self, writer: &mut W) -> io::Result<()> { writer.write_all(self.0[0].to_bytes().as_ref())?; writer.write_all(self.0[1].to_bytes().as_ref()) } } // A single nonce's commitments #[derive(Clone, PartialEq, Eq)] pub(crate) struct NonceCommitments { // Called generators as these commitments are indexed by generator later on // So to get the commitments for the first generator, it'd be commitments.generators[0] pub(crate) generators: Vec>, } impl NonceCommitments { pub(crate) fn new( rng: &mut R, secret_share: &Zeroizing, generators: &[C::G], ) -> (Nonce, NonceCommitments) { let nonce = Nonce::([ C::random_nonce(secret_share, &mut *rng), C::random_nonce(secret_share, &mut *rng), ]); let mut commitments = Vec::with_capacity(generators.len()); for generator in generators { commitments.push(GeneratorCommitments([ *generator * nonce.0[0].deref(), *generator * nonce.0[1].deref(), ])); } (nonce, NonceCommitments { generators: commitments }) } fn read(reader: &mut R, generators: &[C::G]) -> io::Result> { Ok(NonceCommitments { generators: (0 .. generators.len()) .map(|_| GeneratorCommitments::read(reader)) .collect::>()?, }) } fn write(&self, writer: &mut W) -> io::Result<()> { for generator in &self.generators { generator.write(writer)?; } Ok(()) } fn transcript(&self, t: &mut T) { t.domain_separate(b"nonce"); for commitments in &self.generators { t.append_message(b"commitment_D", commitments.0[0].to_bytes()); t.append_message(b"commitment_E", commitments.0[1].to_bytes()); } } } /// Commitments for all the nonces across all their generators. #[derive(Clone, PartialEq, Eq)] pub(crate) struct Commitments { // Called nonces as these commitments are indexed by nonce // So to get the commitments for the first nonce, it'd be commitments.nonces[0] pub(crate) nonces: Vec>, } impl Commitments { pub(crate) fn new( rng: &mut R, secret_share: &Zeroizing, planned_nonces: &[Vec], ) -> (Vec>, Commitments) { let mut nonces = vec![]; let mut commitments = vec![]; for generators in planned_nonces { let (nonce, these_commitments): (Nonce, _) = NonceCommitments::new(&mut *rng, secret_share, generators); nonces.push(nonce); commitments.push(these_commitments); } (nonces, Commitments { nonces: commitments }) } pub(crate) fn transcript(&self, t: &mut T) { t.domain_separate(b"commitments"); for nonce in &self.nonces { nonce.transcript(t); } } pub(crate) fn read(reader: &mut R, generators: &[Vec]) -> io::Result { let nonces = (0 .. generators.len()) .map(|i| NonceCommitments::read(reader, &generators[i])) .collect::>, _>>()?; Ok(Commitments { nonces }) } pub(crate) fn write(&self, writer: &mut W) -> io::Result<()> { for nonce in &self.nonces { nonce.write(writer)?; } Ok(()) } } pub(crate) struct IndividualBinding { commitments: Commitments, binding_factors: Option>, } pub(crate) struct BindingFactor(pub(crate) HashMap>); impl BindingFactor { pub(crate) fn insert(&mut self, i: Participant, commitments: Commitments) { self.0.insert(i, IndividualBinding { commitments, binding_factors: None }); } pub(crate) fn calculate_binding_factors(&mut self, transcript: &T) { for (l, binding) in &mut self.0 { let mut transcript = transcript.clone(); transcript.append_message(b"participant", C::F::from(u64::from(u16::from(*l))).to_repr()); // It *should* be perfectly fine to reuse a binding factor for multiple nonces // This generates a binding factor per nonce just to ensure it never comes up as a question binding.binding_factors = Some( (0 .. binding.commitments.nonces.len()) .map(|_| C::hash_binding_factor(transcript.challenge(b"rho").as_ref())) .collect(), ); } } pub(crate) fn binding_factors(&self, i: Participant) -> &[C::F] { self.0[&i].binding_factors.as_ref().unwrap() } // Get the bound nonces for a specific party pub(crate) fn bound(&self, l: Participant) -> Vec> { let mut res = vec![]; for (i, (nonce, rho)) in self.0[&l].commitments.nonces.iter().zip(self.binding_factors(l).iter()).enumerate() { res.push(vec![]); for generator in &nonce.generators { res[i].push(generator.0[0] + (generator.0[1] * rho)); } } res } // Get the nonces for this signing session pub(crate) fn nonces(&self, planned_nonces: &[Vec]) -> Vec> { let mut nonces = Vec::with_capacity(planned_nonces.len()); for n in 0 .. planned_nonces.len() { nonces.push(Vec::with_capacity(planned_nonces[n].len())); for g in 0 .. planned_nonces[n].len() { #[allow(non_snake_case)] let mut D = C::G::identity(); let mut statements = Vec::with_capacity(self.0.len()); #[allow(non_snake_case)] for IndividualBinding { commitments, binding_factors } in self.0.values() { D += commitments.nonces[n].generators[g].0[0]; statements .push((binding_factors.as_ref().unwrap()[n], commitments.nonces[n].generators[g].0[1])); } nonces[n].push(D + multiexp_vartime(&statements)); } } nonces } } ================================================ FILE: crypto/frost/src/sign.rs ================================================ use core::{ops::Deref, fmt::Debug}; use std::{ io::{self, Read, Write}, collections::HashMap, }; use rand_core::{RngCore, CryptoRng, SeedableRng}; use rand_chacha::ChaCha20Rng; use zeroize::{Zeroize, Zeroizing}; use transcript::Transcript; use ciphersuite::group::{ ff::{Field, PrimeField}, GroupEncoding, }; use multiexp::BatchVerifier; use crate::{ curve::Curve, Participant, FrostError, ThresholdParams, ThresholdKeys, ThresholdView, algorithm::{WriteAddendum, Addendum, Algorithm}, validate_map, }; pub(crate) use crate::nonce::*; /// Trait enabling writing preprocesses and signature shares. pub trait Writable { fn write(&self, writer: &mut W) -> io::Result<()>; fn serialize(&self) -> Vec { let mut buf = vec![]; self.write(&mut buf).unwrap(); buf } } impl Writable for Vec { fn write(&self, writer: &mut W) -> io::Result<()> { for w in self { w.write(writer)?; } Ok(()) } } // Pairing of an Algorithm with a ThresholdKeys instance. #[derive(Zeroize)] struct Params> { // Skips the algorithm due to being too large a bound to feasibly enforce on users #[zeroize(skip)] algorithm: A, keys: ThresholdKeys, } impl> Params { fn new(algorithm: A, keys: ThresholdKeys) -> Params { Params { algorithm, keys } } fn multisig_params(&self) -> ThresholdParams { self.keys.params() } } /// Preprocess for an instance of the FROST signing protocol. #[derive(Clone, PartialEq, Eq)] pub struct Preprocess { pub(crate) commitments: Commitments, /// The addendum used by the algorithm. pub addendum: A, } impl Writable for Preprocess { fn write(&self, writer: &mut W) -> io::Result<()> { self.commitments.write(writer)?; self.addendum.write(writer) } } /// A cached preprocess. /// /// A preprocess MUST only be used once. Reuse will enable third-party recovery of your private /// key share. Additionally, this MUST be handled with the same security as your private key share, /// as knowledge of it also enables recovery. // Directly exposes the [u8; 32] member to void needing to route through std::io interfaces. // Still uses Zeroizing internally so when users grab it, they have a higher likelihood of // appreciating how to handle it and don't immediately start copying it just by grabbing it. #[derive(Zeroize)] pub struct CachedPreprocess(pub Zeroizing<[u8; 32]>); /// Trait for the initial state machine of a two-round signing protocol. pub trait PreprocessMachine: Send { /// Preprocess message for this machine. type Preprocess: Clone + PartialEq + Writable; /// Signature produced by this machine. type Signature: Clone + PartialEq + Debug; /// SignMachine this PreprocessMachine turns into. type SignMachine: SignMachine; /// Perform the preprocessing round required in order to sign. /// Returns a preprocess message to be broadcast to all participants, over an authenticated /// channel. fn preprocess(self, rng: &mut R) -> (Self::SignMachine, Self::Preprocess); } /// State machine which manages signing for an arbitrary signature algorithm. pub struct AlgorithmMachine> { params: Params, } impl> AlgorithmMachine { /// Creates a new machine to generate a signature with the specified keys. pub fn new(algorithm: A, keys: ThresholdKeys) -> AlgorithmMachine { AlgorithmMachine { params: Params::new(algorithm, keys) } } fn seeded_preprocess( self, seed: CachedPreprocess, ) -> (AlgorithmSignMachine, Preprocess) { let mut params = self.params; let mut rng = ChaCha20Rng::from_seed(*seed.0); let (nonces, commitments) = Commitments::new::<_>( &mut rng, params.keys.original_secret_share(), ¶ms.algorithm.nonces(), ); let addendum = params.algorithm.preprocess_addendum(&mut rng, ¶ms.keys); let preprocess = Preprocess { commitments, addendum }; // Also obtain entropy to randomly sort the included participants if we need to identify blame let mut blame_entropy = [0; 32]; rng.fill_bytes(&mut blame_entropy); ( AlgorithmSignMachine { params, seed, nonces, preprocess: preprocess.clone(), blame_entropy }, preprocess, ) } #[cfg(any(test, feature = "tests"))] pub(crate) fn unsafe_override_preprocess( self, nonces: Vec>, preprocess: Preprocess, ) -> AlgorithmSignMachine { AlgorithmSignMachine { params: self.params, seed: CachedPreprocess(Zeroizing::new([0; 32])), nonces, preprocess, // Uses 0s since this is just used to protect against a malicious participant from // deliberately increasing the amount of time needed to identify them (and is accordingly // not necessary to function) blame_entropy: [0; 32], } } } impl> PreprocessMachine for AlgorithmMachine { type Preprocess = Preprocess; type Signature = A::Signature; type SignMachine = AlgorithmSignMachine; fn preprocess( self, rng: &mut R, ) -> (Self::SignMachine, Preprocess) { let mut seed = CachedPreprocess(Zeroizing::new([0; 32])); rng.fill_bytes(seed.0.as_mut()); self.seeded_preprocess(seed) } } /// Share of a signature produced via FROST. #[derive(Clone, PartialEq, Eq)] pub struct SignatureShare(C::F); impl Writable for SignatureShare { fn write(&self, writer: &mut W) -> io::Result<()> { writer.write_all(self.0.to_repr().as_ref()) } } #[cfg(any(test, feature = "tests"))] impl SignatureShare { pub(crate) fn invalidate(&mut self) { self.0 += C::F::ONE; } } /// Trait for the second machine of a two-round signing protocol. pub trait SignMachine: Send + Sync + Sized { /// Params used to instantiate this machine which can be used to rebuild from a cache. type Params; /// Keys used for signing operations. type Keys; /// Preprocess message for this machine. type Preprocess: Clone + PartialEq + Writable; /// SignatureShare message for this machine. type SignatureShare: Clone + PartialEq + Writable; /// SignatureMachine this SignMachine turns into. type SignatureMachine: SignatureMachine; /// Cache this preprocess for usage later. /// /// This cached preprocess MUST only be used once. Reuse of it enables recovery of your private /// key share. Third-party recovery of a cached preprocess also enables recovery of your private /// key share, so this MUST be treated with the same security as your private key share. fn cache(self) -> CachedPreprocess; /// Create a sign machine from a cached preprocess. /// /// After this, the preprocess must be deleted so it's never reused. Any reuse will presumably /// cause the signer to leak their secret share. fn from_cache( params: Self::Params, keys: Self::Keys, cache: CachedPreprocess, ) -> (Self, Self::Preprocess); /// Read a Preprocess message. /// /// Despite taking self, this does not save the preprocess. It must be externally cached and /// passed into sign. fn read_preprocess(&self, reader: &mut R) -> io::Result; /// Sign a message. /// /// Takes in the participants' preprocess messages. Returns the signature share to be broadcast /// to all participants, over an authenticated channel. The parties who participate here will /// become the signing set for this session. fn sign( self, commitments: HashMap, msg: &[u8], ) -> Result<(Self::SignatureMachine, Self::SignatureShare), FrostError>; } /// Next step of the state machine for the signing process. #[derive(Zeroize)] pub struct AlgorithmSignMachine> { params: Params, seed: CachedPreprocess, pub(crate) nonces: Vec>, // Skips the preprocess due to being too large a bound to feasibly enforce on users #[zeroize(skip)] pub(crate) preprocess: Preprocess, pub(crate) blame_entropy: [u8; 32], } impl> SignMachine for AlgorithmSignMachine { type Params = A; type Keys = ThresholdKeys; type Preprocess = Preprocess; type SignatureShare = SignatureShare; type SignatureMachine = AlgorithmSignatureMachine; fn cache(self) -> CachedPreprocess { self.seed } fn from_cache( algorithm: A, keys: ThresholdKeys, cache: CachedPreprocess, ) -> (Self, Self::Preprocess) { AlgorithmMachine::new(algorithm, keys).seeded_preprocess(cache) } fn read_preprocess(&self, reader: &mut R) -> io::Result { Ok(Preprocess { commitments: Commitments::read::<_>(reader, &self.params.algorithm.nonces())?, addendum: self.params.algorithm.read_addendum(reader)?, }) } fn sign( mut self, mut preprocesses: HashMap>, msg: &[u8], ) -> Result<(Self::SignatureMachine, SignatureShare), FrostError> { let multisig_params = self.params.multisig_params(); let mut included = Vec::with_capacity(preprocesses.len() + 1); included.push(multisig_params.i()); for l in preprocesses.keys() { included.push(*l); } included.sort_unstable(); // Included < threshold if included.len() < usize::from(multisig_params.t()) { Err(FrostError::InvalidSigningSet("not enough signers"))?; } // OOB index if u16::from(included[included.len() - 1]) > multisig_params.n() { Err(FrostError::InvalidParticipant(multisig_params.n(), included[included.len() - 1]))?; } // Same signer included multiple times for i in 0 .. (included.len() - 1) { if included[i] == included[i + 1] { Err(FrostError::DuplicatedParticipant(included[i]))?; } } let view = self.params.keys.view(included.clone()).unwrap(); validate_map(&preprocesses, &included, multisig_params.i())?; { // Domain separate FROST self.params.algorithm.transcript().domain_separate(b"FROST"); } let nonces = self.params.algorithm.nonces(); #[allow(non_snake_case)] let mut B = BindingFactor(HashMap::::with_capacity(included.len())); { // Parse the preprocesses for l in &included { { self .params .algorithm .transcript() .append_message(b"participant", C::F::from(u64::from(u16::from(*l))).to_repr()); } if *l == self.params.keys.params().i() { let commitments = self.preprocess.commitments.clone(); commitments.transcript(self.params.algorithm.transcript()); let addendum = self.preprocess.addendum.clone(); { let mut buf = vec![]; addendum.write(&mut buf).unwrap(); self.params.algorithm.transcript().append_message(b"addendum", buf); } B.insert(*l, commitments); self.params.algorithm.process_addendum(&view, *l, addendum)?; } else { let preprocess = preprocesses.remove(l).unwrap(); preprocess.commitments.transcript(self.params.algorithm.transcript()); { let mut buf = vec![]; preprocess.addendum.write(&mut buf).unwrap(); self.params.algorithm.transcript().append_message(b"addendum", buf); } B.insert(*l, preprocess.commitments); self.params.algorithm.process_addendum(&view, *l, preprocess.addendum)?; } } // Re-format into the FROST-expected rho transcript let mut rho_transcript = A::Transcript::new(b"FROST_rho"); rho_transcript.append_message(b"group_key", self.params.keys.group_key().to_bytes()); rho_transcript.append_message(b"message", C::hash_msg(msg)); rho_transcript.append_message( b"preprocesses", C::hash_commitments(self.params.algorithm.transcript().challenge(b"preprocesses").as_ref()), ); // Generate the per-signer binding factors B.calculate_binding_factors(&rho_transcript); // Merge the rho transcript back into the global one to ensure its advanced, while // simultaneously committing to everything self .params .algorithm .transcript() .append_message(b"rho_transcript", rho_transcript.challenge(b"merge")); } #[allow(non_snake_case)] let Rs = B.nonces(&nonces); let our_binding_factors = B.binding_factors(multisig_params.i()); let nonces = self .nonces .drain(..) .enumerate() .map(|(n, nonces)| { let [base, mut actual] = nonces.0; *actual *= our_binding_factors[n]; *actual += base.deref(); actual }) .collect::>(); let share = self.params.algorithm.sign_share(&view, &Rs, nonces, msg); Ok(( AlgorithmSignatureMachine { params: self.params, view, B, Rs, share, blame_entropy: self.blame_entropy, }, SignatureShare(share), )) } } /// Trait for the final machine of a two-round signing protocol. pub trait SignatureMachine: Send + Sync { /// SignatureShare message for this machine. type SignatureShare: Clone + PartialEq + Writable; /// Read a Signature Share message. fn read_share(&self, reader: &mut R) -> io::Result; /// Complete signing. /// Takes in everyone elses' shares. Returns the signature. fn complete(self, shares: HashMap) -> Result; } /// Final step of the state machine for the signing process. /// /// This may panic if an invalid algorithm is provided. #[allow(non_snake_case)] pub struct AlgorithmSignatureMachine> { params: Params, view: ThresholdView, B: BindingFactor, Rs: Vec>, share: C::F, blame_entropy: [u8; 32], } impl> SignatureMachine for AlgorithmSignatureMachine { type SignatureShare = SignatureShare; fn read_share(&self, reader: &mut R) -> io::Result> { Ok(SignatureShare(C::read_F(reader)?)) } fn complete( self, mut shares: HashMap>, ) -> Result { let params = self.params.multisig_params(); validate_map(&shares, self.view.included(), params.i())?; let mut responses = HashMap::new(); responses.insert(params.i(), self.share); let mut sum = self.share; for (l, share) in shares.drain() { responses.insert(l, share.0); sum += share.0; } // Perform signature validation instead of individual share validation // For the success route, which should be much more frequent, this should be faster // It also acts as an integrity check of this library's signing function if let Some(sig) = self.params.algorithm.verify(self.view.group_key(), &self.Rs, sum) { return Ok(sig); } // We could remove blame_entropy by taking in an RNG here // Considering we don't need any RNG for a valid signature, and we only use the RNG here for // performance reasons, it doesn't feel worthwhile to include as an argument to every // implementor of the trait let mut rng = ChaCha20Rng::from_seed(self.blame_entropy); let mut batch = BatchVerifier::new(self.view.included().len()); for l in self.view.included() { if let Ok(statements) = self.params.algorithm.verify_share( self.view.verification_share(*l), &self.B.bound(*l), responses[l], ) { batch.queue(&mut rng, *l, statements); } else { Err(FrostError::InvalidShare(*l))?; } } if let Err(l) = batch.verify_vartime_with_vartime_blame() { Err(FrostError::InvalidShare(l))?; } // If everyone has a valid share, and there were enough participants, this should've worked // The only known way to cause this, for valid parameters/algorithms, is to deserialize a // semantically invalid FrostKeys Err(FrostError::InternalError("everyone had a valid share yet the signature was still invalid")) } } ================================================ FILE: crypto/frost/src/tests/literal/dalek.rs ================================================ use rand_core::OsRng; use crate::{ curve, tests::vectors::{Vectors, test_with_vectors}, }; #[cfg(feature = "ristretto")] #[test] fn ristretto_vectors() { test_with_vectors::<_, curve::Ristretto, curve::IetfRistrettoHram>( &mut OsRng, &Vectors::from( serde_json::from_str::(include_str!( "vectors/frost-ristretto255-sha512.json" )) .unwrap(), ), ); } #[cfg(feature = "ed25519")] #[test] fn ed25519_vectors() { test_with_vectors::<_, curve::Ed25519, curve::IetfEd25519Hram>( &mut OsRng, &Vectors::from( serde_json::from_str::(include_str!("vectors/frost-ed25519-sha512.json")) .unwrap(), ), ); } ================================================ FILE: crypto/frost/src/tests/literal/ed448.rs ================================================ use rand_core::OsRng; use ciphersuite::Ciphersuite; use schnorr::SchnorrSignature; use crate::{ curve::{Ed448, Ietf8032Ed448Hram, IetfEd448Hram}, tests::vectors::{Vectors, test_with_vectors}, }; // This is a vector from RFC 8032 to sanity check the HRAM is properly implemented // The RFC 8032 Ed448 HRAM is much more complex than the other HRAMs, hence why it's helpful to // have additional testing for it // Additionally, FROST, despite being supposed to use the RFC 8032 HRAMs, originally applied // Ed25519's HRAM to both Ed25519 and Ed448 // This test was useful when proposing the corrections to the spec to demonstrate the correctness // the new algorithm/vectors // While we could test all Ed448 vectors here, this is sufficient for sanity #[test] fn ed448_8032_vector() { let context = hex::decode("666f6f").unwrap(); #[allow(non_snake_case)] let A = Ed448::read_G::<&[u8]>( &mut hex::decode( "43ba28f430cdff456ae531545f7ecd0ac834a55d9358c0372bfa0c6c".to_owned() + "6798c0866aea01eb00742802b8438ea4cb82169c235160627b4c3a94" + "80", ) .unwrap() .as_ref(), ) .unwrap(); let msg = hex::decode("03").unwrap(); let sig = hex::decode( "d4f8f6131770dd46f40867d6fd5d5055de43541f8c5e35abbcd001b3".to_owned() + "2a89f7d2151f7647f11d8ca2ae279fb842d607217fce6e042f6815ea" + "00" + "0c85741de5c8da1144a6a1aba7f96de42505d7a7298524fda538fccb" + "bb754f578c1cad10d54d0d5428407e85dcbc98a49155c13764e66c3c" + "00", ) .unwrap(); #[allow(non_snake_case)] let R = Ed448::read_G::<&[u8]>(&mut sig.as_ref()).unwrap(); let s = Ed448::read_F::<&[u8]>(&mut &sig[57 ..]).unwrap(); assert!( SchnorrSignature:: { R, s }.verify(A, Ietf8032Ed448Hram::hram(&context, &R, &A, &msg)) ); } #[test] fn ed448_vectors() { test_with_vectors::<_, Ed448, IetfEd448Hram>( &mut OsRng, &Vectors::from( serde_json::from_str::(include_str!("vectors/frost-ed448-shake256.json")) .unwrap(), ), ); } ================================================ FILE: crypto/frost/src/tests/literal/kp256.rs ================================================ use rand_core::OsRng; use crate::tests::vectors::{Vectors, test_with_vectors}; #[cfg(feature = "secp256k1")] use crate::curve::{Secp256k1, IetfSecp256k1Hram}; #[cfg(feature = "p256")] use crate::curve::{P256, IetfP256Hram}; #[cfg(feature = "secp256k1")] #[test] fn secp256k1_vectors() { test_with_vectors::<_, Secp256k1, IetfSecp256k1Hram>( &mut OsRng, &Vectors::from( serde_json::from_str::(include_str!( "vectors/frost-secp256k1-sha256.json" )) .unwrap(), ), ); } #[cfg(feature = "p256")] #[test] fn p256_vectors() { test_with_vectors::<_, P256, IetfP256Hram>( &mut OsRng, &Vectors::from( serde_json::from_str::(include_str!("vectors/frost-p256-sha256.json")) .unwrap(), ), ); } ================================================ FILE: crypto/frost/src/tests/literal/mod.rs ================================================ #[cfg(any(feature = "ristretto", feature = "ed25519"))] mod dalek; #[cfg(any(feature = "secp256k1", feature = "p256"))] mod kp256; #[cfg(feature = "ed448")] mod ed448; ================================================ FILE: crypto/frost/src/tests/literal/vectors/frost-ed25519-sha512.json ================================================ { "config": { "MAX_PARTICIPANTS": "3", "NUM_PARTICIPANTS": "2", "MIN_PARTICIPANTS": "2", "name": "FROST(Ed25519, SHA-512)", "group": "ed25519", "hash": "SHA-512" }, "inputs": { "participant_list": [ 1, 3 ], "group_secret_key": "7b1c33d3f5291d85de664833beb1ad469f7fb6025a0ec78b3a790c6e13a98304", "group_public_key": "15d21ccd7ee42959562fc8aa63224c8851fb3ec85a3faf66040d380fb9738673", "message": "74657374", "share_polynomial_coefficients": [ "178199860edd8c62f5212ee91eff1295d0d670ab4ed4506866bae57e7030b204" ], "participant_shares": [ { "identifier": 1, "participant_share": "929dcc590407aae7d388761cddb0c0db6f5627aea8e217f4a033f2ec83d93509" }, { "identifier": 2, "participant_share": "a91e66e012e4364ac9aaa405fcafd370402d9859f7b6685c07eed76bf409e80d" }, { "identifier": 3, "participant_share": "d3cb090a075eb154e82fdb4b3cb507f110040905468bb9c46da8bdea643a9a02" } ] }, "round_one_outputs": { "outputs": [ { "identifier": 1, "hiding_nonce_randomness": "0fd2e39e111cdc266f6c0f4d0fd45c947761f1f5d3cb583dfcb9bbaf8d4c9fec", "binding_nonce_randomness": "69cd85f631d5f7f2721ed5e40519b1366f340a87c2f6856363dbdcda348a7501", "hiding_nonce": "812d6104142944d5a55924de6d49940956206909f2acaeedecda2b726e630407", "binding_nonce": "b1110165fc2334149750b28dd813a39244f315cff14d4e89e6142f262ed83301", "hiding_nonce_commitment": "b5aa8ab305882a6fc69cbee9327e5a45e54c08af61ae77cb8207be3d2ce13de3", "binding_nonce_commitment": "67e98ab55aa310c3120418e5050c9cf76cf387cb20ac9e4b6fdb6f82a469f932", "binding_factor_input": "15d21ccd7ee42959562fc8aa63224c8851fb3ec85a3faf66040d380fb9738673504df914fa965023fb75c25ded4bb260f417de6d32e5c442c6ba313791cc9a4948d6273e8d3511f93348ea7a708a9b862bc73ba2a79cfdfe07729a193751cbc973af46d8ac3440e518d4ce440a0e7d4ad5f62ca8940f32de6d8dc00fc12c660b817d587d82f856d277ce6473cae6d2f5763f7da2e8b4d799a3f3e725d4522ec70100000000000000000000000000000000000000000000000000000000000000", "binding_factor": "f2cb9d7dd9beff688da6fcc83fa89046b3479417f47f55600b106760eb3b5603" }, { "identifier": 3, "hiding_nonce_randomness": "86d64a260059e495d0fb4fcc17ea3da7452391baa494d4b00321098ed2a0062f", "binding_nonce_randomness": "13e6b25afb2eba51716a9a7d44130c0dbae0004a9ef8d7b5550c8a0e07c61775", "hiding_nonce": "c256de65476204095ebdc01bd11dc10e57b36bc96284595b8215222374f99c0e", "binding_nonce": "243d71944d929063bc51205714ae3c2218bd3451d0214dfb5aeec2a90c35180d", "hiding_nonce_commitment": "cfbdb165bd8aad6eb79deb8d287bcc0ab6658ae57fdcc98ed12c0669e90aec91", "binding_nonce_commitment": "7487bc41a6e712eea2f2af24681b58b1cf1da278ea11fe4e8b78398965f13552", "binding_factor_input": "15d21ccd7ee42959562fc8aa63224c8851fb3ec85a3faf66040d380fb9738673504df914fa965023fb75c25ded4bb260f417de6d32e5c442c6ba313791cc9a4948d6273e8d3511f93348ea7a708a9b862bc73ba2a79cfdfe07729a193751cbc973af46d8ac3440e518d4ce440a0e7d4ad5f62ca8940f32de6d8dc00fc12c660b817d587d82f856d277ce6473cae6d2f5763f7da2e8b4d799a3f3e725d4522ec70300000000000000000000000000000000000000000000000000000000000000", "binding_factor": "b087686bf35a13f3dc78e780a34b0fe8a77fef1b9938c563f5573d71d8d7890f" } ] }, "round_two_outputs": { "outputs": [ { "identifier": 1, "sig_share": "001719ab5a53ee1a12095cd088fd149702c0720ce5fd2f29dbecf24b7281b603" }, { "identifier": 3, "sig_share": "bd86125de990acc5e1f13781d8e32c03a9bbd4c53539bbc106058bfd14326007" } ] }, "final_output": { "sig": "36282629c383bb820a88b71cae937d41f2f2adfcc3d02e55507e2fb9e2dd3cbebd9d2b0844e49ae0f3fa935161e1419aab7b47d21a37ebeae1f17d4987b3160b" } } ================================================ FILE: crypto/frost/src/tests/literal/vectors/frost-ed448-shake256.json ================================================ { "config": { "MAX_PARTICIPANTS": "3", "NUM_PARTICIPANTS": "2", "MIN_PARTICIPANTS": "2", "name": "FROST(Ed448, SHAKE256)", "group": "ed448", "hash": "SHAKE256" }, "inputs": { "participant_list": [ 1, 3 ], "group_secret_key": "6298e1eef3c379392caaed061ed8a31033c9e9e3420726f23b404158a401cd9df24632adfe6b418dc942d8a091817dd8bd70e1c72ba52f3c00", "group_public_key": "3832f82fda00ff5365b0376df705675b63d2a93c24c6e81d40801ba265632be10f443f95968fadb70d10786827f30dc001c8d0f9b7c1d1b000", "message": "74657374", "share_polynomial_coefficients": [ "dbd7a514f7a731976620f0436bd135fe8dddc3fadd6e0d13dbd58a1981e587d377d48e0b7ce4e0092967c5e85884d0275a7a740b6abdcd0500" ], "participant_shares": [ { "identifier": 1, "participant_share": "4a2b2f5858a932ad3d3b18bd16e76ced3070d72fd79ae4402df201f525e754716a1bc1b87a502297f2a99d89ea054e0018eb55d39562fd0100" }, { "identifier": 2, "participant_share": "2503d56c4f516444a45b080182b8a2ebbe4d9b2ab509f25308c88c0ea7ccdc44e2ef4fc4f63403a11b116372438a1e287265cadeff1fcb0700" }, { "identifier": 3, "participant_share": "00db7a8146f995db0a7cf844ed89d8e94c2b5f259378ff66e39d172828b264185ac4decf7219e4aa4478285b9c0eef4fccdf3eea69dd980d00" } ] }, "round_one_outputs": { "outputs": [ { "identifier": 1, "hiding_nonce_randomness": "9cda90c98863ef3141b75f09375757286b4bc323dd61aeb45c07de45e4937bbd", "binding_nonce_randomness": "781bf4881ffe1aa06f9341a747179f07a49745f8cd37d4696f226aa065683c0a", "hiding_nonce": "f922beb51a5ac88d1e862278d89e12c05263b945147db04b9566acb2b5b0f7422ccea4f9286f4f80e6b646e72143eeaecc0e5988f8b2b93100", "binding_nonce": "1890f16a120cdeac092df29955a29c7cf29c13f6f7be60e63d63f3824f2d37e9c3a002dfefc232972dc08658a8c37c3ec06a0c5dc146150500", "hiding_nonce_commitment": "3518c2246c874569e54ab254cb1da666ca30f7879605cc43b4d2c47a521f8b5716080ab723d3a0cd04b7e41f3cc1d3031c94ccf3829b23fe80", "binding_nonce_commitment": "11b3d5220c57d02057497de3c4eebab384900206592d877059b0a5f1d5250d002682f0e22dff096c46bb81b46d60fcfe7752ed47cea76c3900", "binding_factor_input": "3832f82fda00ff5365b0376df705675b63d2a93c24c6e81d40801ba265632be10f443f95968fadb70d10786827f30dc001c8d0f9b7c1d1b000e9a0f30b97fe77ef751b08d4e252a3719ae9135e7f7926f7e3b7dd6656b27089ca354997fe5a633aa0946c89f022462e7e9d50fd6ef313f72d956ea4571089427daa1862f623a41625177d91e4a8f350ce9c8bd3bc7c766515dc1dd3a0eab93777526b616cccb148fe1e5992dc1ae705c8ba2f97ca8983328d41d375ed1e5fde5c9d672121c9e8f177f4a1a9b2575961531b33f054451363c8f27618382cd66ce14ad93b68dac6a09f5edcbccc813906b3fc50b8fef1cc09757b06646f38ceed1674cd6ced28a59c93851b325c6a9ef6a4b3b88860b7138ee246034561c7460db0b3fae5010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "binding_factor": "71966390dfdbed73cf9b79486f3b70e23b243e6c40638fb55998642a60109daecbfcb879eed9fe7dbbed8d9e47317715a5740f772173342e00" }, { "identifier": 3, "hiding_nonce_randomness": "b3adf97ceea770e703ab295babf311d77e956a20d3452b4b3344aa89a828e6df", "binding_nonce_randomness": "81dbe7742b0920930299197322b255734e52bbb91f50cfe8ce689f56fadbce31", "hiding_nonce": "ccb5c1e82f23e0a4b966b824dbc7b0ef1cc5f56eeac2a4126e2b2143c5f3a4d890c52d27803abcf94927faf3fc405c0b2123a57a93cefa3b00", "binding_nonce": "e089df9bf311cf711e2a24ea27af53e07b846d09692fe11035a1112f04d8b7462a62f34d8c01493a22b57a1cbf1f0a46c77d64d46449a90100", "hiding_nonce_commitment": "1254546d7d104c04e4fbcf29e05747e2edd392f6787d05a6216f3713ef859efe573d180d291e48411e5e3006e9f90ee986ccc26b7a42490b80", "binding_nonce_commitment": "3ef0cec20be15e56b3ddcb6f7b956fca0c8f71990f45316b537b4f64c5e8763e6629d7262ff7cd0235d0781f23be97bf8fa8817643ea19cd00", "binding_factor_input": "3832f82fda00ff5365b0376df705675b63d2a93c24c6e81d40801ba265632be10f443f95968fadb70d10786827f30dc001c8d0f9b7c1d1b000e9a0f30b97fe77ef751b08d4e252a3719ae9135e7f7926f7e3b7dd6656b27089ca354997fe5a633aa0946c89f022462e7e9d50fd6ef313f72d956ea4571089427daa1862f623a41625177d91e4a8f350ce9c8bd3bc7c766515dc1dd3a0eab93777526b616cccb148fe1e5992dc1ae705c8ba2f97ca8983328d41d375ed1e5fde5c9d672121c9e8f177f4a1a9b2575961531b33f054451363c8f27618382cd66ce14ad93b68dac6a09f5edcbccc813906b3fc50b8fef1cc09757b06646f38ceed1674cd6ced28a59c93851b325c6a9ef6a4b3b88860b7138ee246034561c7460db0b3fae5030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "binding_factor": "236a6f7239ac2019334bad21323ec93bef2fead37bd55114356419f3fc1fb59f797f44079f28b1a64f51dd0a113f90f2c3a1c27d2faa4f1300" } ] }, "round_two_outputs": { "outputs": [ { "identifier": 1, "sig_share": "e1eb9bfbef792776b7103891032788406c070c5c315e3bf5d64acd46ea8855e85b53146150a09149665cbfec71626810b575e6f4dbe9ba3700" }, { "identifier": 3, "sig_share": "815434eb0b9f9242d54b8baf2141fe28976cabe5f441ccfcd5ee7cdb4b52185b02b99e6de28e2ab086c7764068c5a01b5300986b9f084f3e00" } ] }, "final_output": { "sig": "cd642cba59c449dad8e896a78a60e8edfcbd9040df524370891ff8077d47ce721d683874483795f0d85efcbd642c4510614328605a19c6ed806ffb773b6956419537cdfdb2b2a51948733de192dcc4b82dc31580a536db6d435e0cb3ce322fbcf9ec23362dda27092c08767e607bf2093600" } } ================================================ FILE: crypto/frost/src/tests/literal/vectors/frost-p256-sha256.json ================================================ { "config": { "MAX_PARTICIPANTS": "3", "NUM_PARTICIPANTS": "2", "MIN_PARTICIPANTS": "2", "name": "FROST(P-256, SHA-256)", "group": "P-256", "hash": "SHA-256" }, "inputs": { "participant_list": [ 1, 3 ], "group_secret_key": "8ba9bba2e0fd8c4767154d35a0b7562244a4aaf6f36c8fb8735fa48b301bd8de", "group_public_key": "023a309ad94e9fe8a7ba45dfc58f38bf091959d3c99cfbd02b4dc00585ec45ab70", "message": "74657374", "share_polynomial_coefficients": [ "80f25e6c0709353e46bfbe882a11bdbb1f8097e46340eb8673b7e14556e6c3a4" ], "participant_shares": [ { "identifier": 1, "participant_share": "0c9c1a0fe806c184add50bbdcac913dda73e482daf95dcb9f35dbb0d8a9f7731" }, { "identifier": 2, "participant_share": "8d8e787bef0ff6c2f494ca45f4dad198c6bee01212d6c84067159c52e1863ad5" }, { "identifier": 3, "participant_share": "0e80d6e8f6192c003b5488ce1eec8f5429587d48cf001541e713b2d53c09d928" } ] }, "round_one_outputs": { "outputs": [ { "identifier": 1, "hiding_nonce_randomness": "ec4c891c85fee802a9d757a67d1252e7f4e5efb8a538991ac18fbd0e06fb6fd3", "binding_nonce_randomness": "9334e29d09061223f69a09421715a347e4e6deba77444c8f42b0c833f80f4ef9", "hiding_nonce": "9f0542a5ba879a58f255c09f06da7102ef6a2dec6279700c656d58394d8facd4", "binding_nonce": "6513dfe7429aa2fc972c69bb495b27118c45bbc6e654bb9dc9be55385b55c0d7", "hiding_nonce_commitment": "0213b3e6298bf8ad46fd5e9389519a8665d63d98f4ec6a1fcca434e809d2d8070e", "binding_nonce_commitment": "02188ff1390bf69374d7b272e454b1878ef10a6b6ea3ff36f114b300b4dbd5233b", "binding_factor_input": "023a309ad94e9fe8a7ba45dfc58f38bf091959d3c99cfbd02b4dc00585ec45ab70825371853e974bc30ac5b947b216d70461919666584c70c51f9f56f117736c5d178dd0b521ad9c1abe98048419cbdec81504c85e12eb40e3bcb6ec73d3fc4afd0000000000000000000000000000000000000000000000000000000000000001", "binding_factor": "7925f0d4693f204e6e59233e92227c7124664a99739d2c06b81cf64ddf90559e" }, { "identifier": 3, "hiding_nonce_randomness": "c0451c5a0a5480d6c1f860e5db7d655233dca2669fd90ff048454b8ce983367b", "binding_nonce_randomness": "2ba5f7793ae700e40e78937a82f407dd35e847e33d1e607b5c7eb6ed2a8ed799", "hiding_nonce": "f73444a8972bcda9e506bbca3d2b1c083c10facdf4bb5d47fef7c2dc1d9f2a0d", "binding_nonce": "44c6a29075d6e7e4f8b97796205f9e22062e7835141470afe9417fd317c1c303", "hiding_nonce_commitment": "033ac9a5fe4a8b57316ba1c34e8a6de453033b750e8984924a984eb67a11e73a3f", "binding_nonce_commitment": "03a7a2480ee16199262e648aea3acab628a53e9b8c1945078f2ddfbdc98b7df369", "binding_factor_input": "023a309ad94e9fe8a7ba45dfc58f38bf091959d3c99cfbd02b4dc00585ec45ab70825371853e974bc30ac5b947b216d70461919666584c70c51f9f56f117736c5d178dd0b521ad9c1abe98048419cbdec81504c85e12eb40e3bcb6ec73d3fc4afd0000000000000000000000000000000000000000000000000000000000000003", "binding_factor": "e10d24a8a403723bcb6f9bb4c537f316593683b472f7a89f166630dde11822c4" } ] }, "round_two_outputs": { "outputs": [ { "identifier": 1, "sig_share": "400308eaed7a2ddee02a265abe6a1cfe04d946ee8720768899619cfabe7a3aeb" }, { "identifier": 3, "sig_share": "561da3c179edbb0502d941bb3e3ace3c37d122aaa46fb54499f15f3a3331de44" } ] }, "final_output": { "sig": "026d8d434874f87bdb7bc0dfd239b2c00639044f9dcb195e9a04426f70bfa4b70d9620acac6767e8e3e3036815fca4eb3a3caa69992b902bcd3352fc34f1ac192f" } } ================================================ FILE: crypto/frost/src/tests/literal/vectors/frost-ristretto255-sha512.json ================================================ { "config": { "MAX_PARTICIPANTS": "3", "NUM_PARTICIPANTS": "2", "MIN_PARTICIPANTS": "2", "name": "FROST(ristretto255, SHA-512)", "group": "ristretto255", "hash": "SHA-512" }, "inputs": { "participant_list": [ 1, 3 ], "group_secret_key": "1b25a55e463cfd15cf14a5d3acc3d15053f08da49c8afcf3ab265f2ebc4f970b", "group_public_key": "e2a62f39eede11269e3bd5a7d97554f5ca384f9f6d3dd9c3c0d05083c7254f57", "message": "74657374", "share_polynomial_coefficients": [ "410f8b744b19325891d73736923525a4f596c805d060dfb9c98009d34e3fec02" ], "participant_shares": [ { "identifier": 1, "participant_share": "5c3430d391552f6e60ecdc093ff9f6f4488756aa6cebdbad75a768010b8f830e" }, { "identifier": 2, "participant_share": "b06fc5eac20b4f6e1b271d9df2343d843e1e1fb03c4cbb673f2872d459ce6f01" }, { "identifier": 3, "participant_share": "f17e505f0e2581c6acfe54d3846a622834b5e7b50cad9a2109a97ba7a80d5c04" } ] }, "round_one_outputs": { "outputs": [ { "identifier": 1, "hiding_nonce_randomness": "f595a133b4d95c6e1f79887220c8b275ce6277e7f68a6640e1e7140f9be2fb5c", "binding_nonce_randomness": "34dd1001360e3513cb37bebfabe7be4a32c5bb91ba19fbd4360d039111f0fbdc", "hiding_nonce": "214f2cabb86ed71427ea7ad4283b0fae26b6746c801ce824b83ceb2b99278c03", "binding_nonce": "c9b8f5e16770d15603f744f8694c44e335e8faef00dad182b8d7a34a62552f0c", "hiding_nonce_commitment": "965def4d0958398391fc06d8c2d72932608b1e6255226de4fb8d972dac15fd57", "binding_nonce_commitment": "ec5170920660820007ae9e1d363936659ef622f99879898db86e5bf1d5bf2a14", "binding_factor_input": "e2a62f39eede11269e3bd5a7d97554f5ca384f9f6d3dd9c3c0d05083c7254f572889dde2854e26377a16caf77dfee5f6be8fe5b4c80318da84698a4161021b033911db5ef8205362701bc9ecd983027814abee94f46d094943a2f4b79a6e4d4603e52c435d8344554942a0a472d8ad84320585b8da3ae5b9ce31cd1903f795c1af66de22af1a45f652cd05ee446b1b4091aaccc91e2471cd18a85a659cecd11f0100000000000000000000000000000000000000000000000000000000000000", "binding_factor": "8967fd70fa06a58e5912603317fa94c77626395a695a0e4e4efc4476662eba0c" }, { "identifier": 3, "hiding_nonce_randomness": "daa0cf42a32617786d390e0c7edfbf2efbd428037069357b5173ae61d6dd5d5e", "binding_nonce_randomness": "b4387e72b2e4108ce4168931cc2c7fcce5f345a5297368952c18b5fc8473f050", "hiding_nonce": "3f7927872b0f9051dd98dd73eb2b91494173bbe0feb65a3e7e58d3e2318fa40f", "binding_nonce": "ffd79445fb8030f0a3ddd3861aa4b42b618759282bfe24f1f9304c7009728305", "hiding_nonce_commitment": "480e06e3de182bf83489c45d7441879932fd7b434a26af41455756264fbd5d6e", "binding_nonce_commitment": "3064746dfd3c1862ef58fc68c706da287dd925066865ceacc816b3a28c7b363b", "binding_factor_input": "e2a62f39eede11269e3bd5a7d97554f5ca384f9f6d3dd9c3c0d05083c7254f572889dde2854e26377a16caf77dfee5f6be8fe5b4c80318da84698a4161021b033911db5ef8205362701bc9ecd983027814abee94f46d094943a2f4b79a6e4d4603e52c435d8344554942a0a472d8ad84320585b8da3ae5b9ce31cd1903f795c1af66de22af1a45f652cd05ee446b1b4091aaccc91e2471cd18a85a659cecd11f0300000000000000000000000000000000000000000000000000000000000000", "binding_factor": "f2c1bb7c33a10511158c2f1766a4a5fadf9f86f2a92692ed333128277cc31006" } ] }, "round_two_outputs": { "outputs": [ { "identifier": 1, "sig_share": "9285f875923ce7e0c491a592e9ea1865ec1b823ead4854b48c8a46287749ee09" }, { "identifier": 3, "sig_share": "7cb211fe0e3d59d25db6e36b3fb32344794139602a7b24f1ae0dc4e26ad7b908" } ] }, "final_output": { "sig": "fc45655fbc66bbffad654ea4ce5fdae253a49a64ace25d9adb62010dd9fb25552164141787162e5b4cab915b4aa45d94655dbb9ed7c378a53b980a0be220a802" } } ================================================ FILE: crypto/frost/src/tests/literal/vectors/frost-secp256k1-sha256.json ================================================ { "config": { "MAX_PARTICIPANTS": "3", "NUM_PARTICIPANTS": "2", "MIN_PARTICIPANTS": "2", "name": "FROST(secp256k1, SHA-256)", "group": "secp256k1", "hash": "SHA-256" }, "inputs": { "participant_list": [ 1, 3 ], "group_secret_key": "0d004150d27c3bf2a42f312683d35fac7394b1e9e318249c1bfe7f0795a83114", "group_public_key": "02f37c34b66ced1fb51c34a90bdae006901f10625cc06c4f64663b0eae87d87b4f", "message": "74657374", "share_polynomial_coefficients": [ "fbf85eadae3058ea14f19148bb72b45e4399c0b16028acaf0395c9b03c823579" ], "participant_shares": [ { "identifier": 1, "participant_share": "08f89ffe80ac94dcb920c26f3f46140bfc7f95b493f8310f5fc1ea2b01f4254c" }, { "identifier": 2, "participant_share": "04f0feac2edcedc6ce1253b7fab8c86b856a797f44d83d82a385554e6e401984" }, { "identifier": 3, "participant_share": "00e95d59dd0d46b0e303e500b62b7ccb0e555d49f5b849f5e748c071da8c0dbc" } ] }, "round_one_outputs": { "outputs": [ { "identifier": 1, "hiding_nonce_randomness": "7ea5ed09af19f6ff21040c07ec2d2adbd35b759da5a401d4c99dd26b82391cb2", "binding_nonce_randomness": "47acab018f116020c10cb9b9abdc7ac10aae1b48ca6e36dc15acb6ec9be5cdc5", "hiding_nonce": "841d3a6450d7580b4da83c8e618414d0f024391f2aeb511d7579224420aa81f0", "binding_nonce": "8d2624f532af631377f33cf44b5ac5f849067cae2eacb88680a31e77c79b5a80", "hiding_nonce_commitment": "03c699af97d26bb4d3f05232ec5e1938c12f1e6ae97643c8f8f11c9820303f1904", "binding_nonce_commitment": "02fa2aaccd51b948c9dc1a325d77226e98a5a3fe65fe9ba213761a60123040a45e", "binding_factor_input": "02f37c34b66ced1fb51c34a90bdae006901f10625cc06c4f64663b0eae87d87b4fff9b5210ffbb3c07a73a7c8935be4a8c62cf015f6cf7ade6efac09a6513540fc3f5a816aaebc2114a811a415d7a55db7c5cbc1cf27183e79dd9def941b5d48010000000000000000000000000000000000000000000000000000000000000001", "binding_factor": "3e08fe561e075c653cbfd46908a10e7637c70c74f0a77d5fd45d1a750c739ec6" }, { "identifier": 3, "hiding_nonce_randomness": "e6cc56ccbd0502b3f6f831d91e2ebd01c4de0479e0191b66895a4ffd9b68d544", "binding_nonce_randomness": "7203d55eb82a5ca0d7d83674541ab55f6e76f1b85391d2c13706a89a064fd5b9", "hiding_nonce": "2b19b13f193f4ce83a399362a90cdc1e0ddcd83e57089a7af0bdca71d47869b2", "binding_nonce": "7a443bde83dc63ef52dda354005225ba0e553243402a4705ce28ffaafe0f5b98", "hiding_nonce_commitment": "03077507ba327fc074d2793955ef3410ee3f03b82b4cdc2370f71d865beb926ef6", "binding_nonce_commitment": "02ad53031ddfbbacfc5fbda3d3b0c2445c8e3e99cbc4ca2db2aa283fa68525b135", "binding_factor_input": "02f37c34b66ced1fb51c34a90bdae006901f10625cc06c4f64663b0eae87d87b4fff9b5210ffbb3c07a73a7c8935be4a8c62cf015f6cf7ade6efac09a6513540fc3f5a816aaebc2114a811a415d7a55db7c5cbc1cf27183e79dd9def941b5d48010000000000000000000000000000000000000000000000000000000000000003", "binding_factor": "93f79041bb3fd266105be251adaeb5fd7f8b104fb554a4ba9a0becea48ddbfd7" } ] }, "round_two_outputs": { "outputs": [ { "identifier": 1, "sig_share": "c4fce1775a1e141fb579944166eab0d65eefe7b98d480a569bbbfcb14f91c197" }, { "identifier": 3, "sig_share": "0160fd0d388932f4826d2ebcd6b9eaba734f7c71cf25b4279a4ca2581e47b18d" } ] }, "final_output": { "sig": "0205b6d04d3774c8929413e3c76024d54149c372d57aae62574ed74319b5ea14d0c65dde8492a7471437e6c2fe3da49b90d23f642b5c6dbe7e36089f096dd97324" } } ================================================ FILE: crypto/frost/src/tests/mod.rs ================================================ use std::collections::HashMap; use rand_core::{RngCore, CryptoRng}; use ciphersuite::Ciphersuite; pub use dkg_recovery::recover_key; use crate::{ Curve, Participant, ThresholdKeys, FrostError, algorithm::{Algorithm, Hram, IetfSchnorr}, sign::{Writable, PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine}, }; /// Tests for the nonce handling code. pub mod nonces; use nonces::test_multi_nonce; /// Vectorized test suite to ensure consistency. pub mod vectors; // Literal test definitions to run during `cargo test` #[cfg(test)] mod literal; /// Constant amount of participants to use when testing. pub const PARTICIPANTS: u16 = 5; /// Constant threshold of participants to use when signing. pub const THRESHOLD: u16 = ((PARTICIPANTS * 2) / 3) + 1; /// Create a key, for testing purposes. pub fn key_gen( rng: &mut R, ) -> HashMap> { let res = dkg_dealer::key_gen::(rng, THRESHOLD, PARTICIPANTS).unwrap(); assert_eq!( C::generator() * *recover_key(&res.values().cloned().collect::>()).unwrap(), res.values().next().unwrap().group_key() ); res } /// Clone a map without a specific value. pub fn clone_without( map: &HashMap, without: &K, ) -> HashMap { let mut res = map.clone(); res.remove(without).unwrap(); res } /// Spawn algorithm machines for a random selection of signers, each executing the given algorithm. pub fn algorithm_machines_without_clone>( rng: &mut R, keys: &HashMap>, machines: HashMap>, ) -> HashMap> { let mut included = vec![]; while included.len() < usize::from(keys[&Participant::new(1).unwrap()].params().t()) { let n = Participant::new( u16::try_from((rng.next_u64() % u64::try_from(keys.len()).unwrap()) + 1).unwrap(), ) .unwrap(); if included.contains(&n) { continue; } included.push(n); } machines .into_iter() .filter_map(|(i, machine)| if included.contains(&i) { Some((i, machine)) } else { None }) .collect() } /// Spawn algorithm machines for a random selection of signers, each executing the given algorithm. pub fn algorithm_machines>( rng: &mut R, algorithm: &A, keys: &HashMap>, ) -> HashMap> { algorithm_machines_without_clone( rng, keys, keys .values() .map(|keys| (keys.params().i(), AlgorithmMachine::new(algorithm.clone(), keys.clone()))) .collect(), ) } // Run the preprocess step pub(crate) fn preprocess< R: RngCore + CryptoRng, M: PreprocessMachine, F: FnMut(&mut R, &mut HashMap), >( rng: &mut R, mut machines: HashMap, mut cache: F, ) -> (HashMap, HashMap) { let mut commitments = HashMap::new(); let mut machines = machines .drain() .map(|(i, machine)| { let (machine, preprocess) = machine.preprocess(rng); commitments.insert(i, { let mut buf = vec![]; preprocess.write(&mut buf).unwrap(); machine.read_preprocess::<&[u8]>(&mut buf.as_ref()).unwrap() }); (i, machine) }) .collect::>(); cache(rng, &mut machines); (machines, commitments) } // Run the preprocess and generate signature shares #[allow(clippy::type_complexity)] pub(crate) fn preprocess_and_shares< R: RngCore + CryptoRng, M: PreprocessMachine, F: FnMut(&mut R, &mut HashMap), >( rng: &mut R, machines: HashMap, cache: F, msg: &[u8], ) -> ( HashMap>::SignatureMachine>, HashMap>::SignatureShare>, ) { let (mut machines, commitments) = preprocess(rng, machines, cache); let mut shares = HashMap::new(); let machines = machines .drain() .map(|(i, machine)| { let (machine, share) = machine.sign(clone_without(&commitments, &i), msg).unwrap(); shares.insert(i, { let mut buf = vec![]; share.write(&mut buf).unwrap(); machine.read_share::<&[u8]>(&mut buf.as_ref()).unwrap() }); (i, machine) }) .collect::>(); (machines, shares) } fn sign_internal< R: RngCore + CryptoRng, M: PreprocessMachine, F: FnMut(&mut R, &mut HashMap), >( rng: &mut R, machines: HashMap, cache: F, msg: &[u8], ) -> M::Signature { let (mut machines, shares) = preprocess_and_shares(rng, machines, cache, msg); let mut signature = None; for (i, machine) in machines.drain() { let sig = machine.complete(clone_without(&shares, &i)).unwrap(); if signature.is_none() { signature = Some(sig.clone()); } assert_eq!(&sig, signature.as_ref().unwrap()); } signature.unwrap() } /// Execute the signing protocol, without caching any machines. This isn't as comprehensive at /// testing as sign, and accordingly isn't preferred, yet is usable for machines not supporting /// caching. pub fn sign_without_caching( rng: &mut R, machines: HashMap, msg: &[u8], ) -> M::Signature { sign_internal(rng, machines, |_, _| {}, msg) } /// Execute the signing protocol, randomly caching various machines to ensure they can cache /// successfully. pub fn sign_without_clone( rng: &mut R, mut keys: HashMap>::Keys>, mut params: HashMap>::Params>, machines: HashMap, msg: &[u8], ) -> M::Signature { sign_internal( rng, machines, |rng, machines| { // Cache and rebuild half of the machines let included = machines.keys().copied().collect::>(); for i in included { if (rng.next_u64() % 2) == 0 { let cache = machines.remove(&i).unwrap().cache(); machines.insert( i, M::SignMachine::from_cache(params.remove(&i).unwrap(), keys.remove(&i).unwrap(), cache) .0, ); } } }, msg, ) } /// Execute the signing protocol, randomly caching various machines to ensure they can cache /// successfully. pub fn sign< R: RngCore + CryptoRng, M: PreprocessMachine>, >( rng: &mut R, params: &>::Params, keys: HashMap>::Keys>, machines: HashMap, msg: &[u8], ) -> M::Signature { let params = keys.keys().map(|i| (*i, params.clone())).collect(); sign_without_clone(rng, keys, params, machines, msg) } /// Test a basic Schnorr signature with the provided keys. pub fn test_schnorr_with_keys>( rng: &mut R, keys: &HashMap>, ) { const MSG: &[u8] = b"Hello, World!"; let machines = algorithm_machines(&mut *rng, &IetfSchnorr::::ietf(), keys); let sig = sign(&mut *rng, &IetfSchnorr::::ietf(), keys.clone(), machines, MSG); let group_key = keys[&Participant::new(1).unwrap()].group_key(); assert!(sig.verify(group_key, H::hram(&sig.R, &group_key, MSG))); } /// Test a basic Schnorr signature. pub fn test_schnorr>(rng: &mut R) { let keys = key_gen(&mut *rng); test_schnorr_with_keys::<_, _, H>(&mut *rng, &keys) } /// Test an offset Schnorr signature. pub fn test_offset_schnorr>(rng: &mut R) { const MSG: &[u8] = b"Hello, World!"; let mut keys = key_gen(&mut *rng); let group_key = keys[&Participant::new(1).unwrap()].group_key(); let scalar = C::F::from(3); let offset = C::F::from(5); let offset_key = (group_key * scalar) + (C::generator() * offset); for keys in keys.values_mut() { *keys = keys.clone().scale(scalar).unwrap().offset(offset); assert_eq!(keys.group_key(), offset_key); } let machines = algorithm_machines(&mut *rng, &IetfSchnorr::::ietf(), &keys); let sig = sign(&mut *rng, &IetfSchnorr::::ietf(), keys.clone(), machines, MSG); let group_key = keys[&Participant::new(1).unwrap()].group_key(); assert!(sig.verify(offset_key, H::hram(&sig.R, &group_key, MSG))); } /// Test blame for an invalid Schnorr signature share. pub fn test_schnorr_blame>(rng: &mut R) { const MSG: &[u8] = b"Hello, World!"; let keys = key_gen(&mut *rng); let machines = algorithm_machines(&mut *rng, &IetfSchnorr::::ietf(), &keys); let (mut machines, shares) = preprocess_and_shares(&mut *rng, machines, |_, _| {}, MSG); for (i, machine) in machines.drain() { let mut shares = clone_without(&shares, &i); // Select a random participant to give an invalid share let participants = shares.keys().collect::>(); let faulty = *participants [usize::try_from(rng.next_u64() % u64::try_from(participants.len()).unwrap()).unwrap()]; shares.get_mut(&faulty).unwrap().invalidate(); assert_eq!(machine.complete(shares).err(), Some(FrostError::InvalidShare(faulty))); } } /// Run a variety of tests against a ciphersuite. pub fn test_ciphersuite>(rng: &mut R) { test_schnorr::(rng); test_offset_schnorr::(rng); test_schnorr_blame::(rng); test_multi_nonce::(rng); } ================================================ FILE: crypto/frost/src/tests/nonces.rs ================================================ use std::io::{self, Read}; use zeroize::Zeroizing; use rand_core::{RngCore, CryptoRng, SeedableRng}; use rand_chacha::ChaCha20Rng; use transcript::{Transcript, RecommendedTranscript}; use ciphersuite::group::{ff::Field, Group, GroupEncoding}; use crate::{ Curve, Participant, ThresholdView, ThresholdKeys, FrostError, algorithm::Algorithm, tests::{key_gen, algorithm_machines, sign}, }; #[derive(Clone)] struct MultiNonce { transcript: RecommendedTranscript, nonces: Option>>, } impl MultiNonce { fn new() -> MultiNonce { MultiNonce { transcript: RecommendedTranscript::new(b"FROST MultiNonce Algorithm Test"), nonces: None, } } } fn nonces() -> Vec> { vec![ vec![C::generator(), C::generator().double()], vec![C::generator(), C::generator() * C::F::from(3), C::generator() * C::F::from(4)], ] } fn verify_nonces(nonces: &[Vec]) { assert_eq!(nonces.len(), 2); // Each nonce should be a series of commitments, over some generators, which share a discrete log // Since they share a discrete log, their only distinction should be the generator // Above, the generators were created with a known relationship // Accordingly, we can check here that relationship holds to make sure these commitments are well // formed assert_eq!(nonces[0].len(), 2); assert_eq!(nonces[0][0].double(), nonces[0][1]); assert_eq!(nonces[1].len(), 3); assert_eq!(nonces[1][0] * C::F::from(3), nonces[1][1]); assert_eq!(nonces[1][0] * C::F::from(4), nonces[1][2]); assert!(nonces[0][0] != nonces[1][0]); } impl Algorithm for MultiNonce { type Transcript = RecommendedTranscript; type Addendum = (); type Signature = (); fn transcript(&mut self) -> &mut Self::Transcript { &mut self.transcript } fn nonces(&self) -> Vec> { nonces::() } fn preprocess_addendum(&mut self, _: &mut R, _: &ThresholdKeys) {} fn read_addendum(&self, _: &mut R) -> io::Result { Ok(()) } fn process_addendum( &mut self, _: &ThresholdView, _: Participant, (): (), ) -> Result<(), FrostError> { Ok(()) } fn sign_share( &mut self, _: &ThresholdView, nonce_sums: &[Vec], nonces: Vec>, _: &[u8], ) -> C::F { // Verify the nonce sums are as expected verify_nonces::(nonce_sums); // Verify we actually have two nonces and that they're distinct assert_eq!(nonces.len(), 2); assert!(nonces[0] != nonces[1]); // Save the nonce sums for later so we can check they're consistent with the call to verify assert!(self.nonces.is_none()); self.nonces = Some(nonce_sums.to_vec()); // Sum the nonces so we can later check they actually have a relationship to nonce_sums let mut res = C::F::ZERO; // Weight each nonce // This is probably overkill, since their unweighted forms would practically still require // some level of crafting to pass a naive sum via malleability, yet this makes it more robust for nonce in nonce_sums { self.transcript.domain_separate(b"nonce"); for commitment in nonce { self.transcript.append_message(b"commitment", commitment.to_bytes()); } } let mut rng = ChaCha20Rng::from_seed(self.transcript.clone().rng_seed(b"weight")); for nonce in nonces { res += *nonce * C::F::random(&mut rng); } res } #[must_use] fn verify(&self, _: C::G, nonces: &[Vec], sum: C::F) -> Option { verify_nonces::(nonces); assert_eq!(&self.nonces.clone().unwrap(), nonces); // Make sure the nonce sums actually relate to the nonces let mut res = C::G::identity(); let mut rng = ChaCha20Rng::from_seed(self.transcript.clone().rng_seed(b"weight")); for nonce in nonces { res += nonce[0] * C::F::random(&mut rng); } assert_eq!(res, C::generator() * sum); Some(()) } fn verify_share(&self, _: C::G, _: &[Vec], _: C::F) -> Result, ()> { panic!("share verification triggered"); } } /// Test a multi-nonce, multi-generator algorithm. // Specifically verifies this library can: // 1) Generate multiple nonces // 2) Provide the group nonces (nonce_sums) across multiple generators, still with the same // discrete log // 3) Provide algorithms with nonces which match the group nonces pub fn test_multi_nonce(rng: &mut R) { let keys = key_gen::(&mut *rng); let machines = algorithm_machines(&mut *rng, &MultiNonce::::new(), &keys); sign(&mut *rng, &MultiNonce::::new(), keys.clone(), machines, &[]); } ================================================ FILE: crypto/frost/src/tests/vectors.rs ================================================ use core::ops::Deref; use std::collections::HashMap; #[cfg(test)] use std::str::FromStr; use zeroize::Zeroizing; use rand_core::{RngCore, CryptoRng, SeedableRng}; use rand_chacha::ChaCha20Rng; use ciphersuite::group::{ff::PrimeField, GroupEncoding}; use crate::{ curve::Curve, Participant, ThresholdKeys, algorithm::{Hram, IetfSchnorr}, sign::{ Writable, Nonce, GeneratorCommitments, NonceCommitments, Commitments, Preprocess, PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine, }, tests::{clone_without, recover_key, test_ciphersuite}, }; /// Vectors for a ciphersuite. pub struct Vectors { pub threshold: u16, pub group_secret: String, pub group_key: String, pub shares: Vec, pub msg: String, pub included: Vec, pub nonce_randomness: Vec<[String; 2]>, pub nonces: Vec<[String; 2]>, pub commitments: Vec<[String; 2]>, pub sig_shares: Vec, pub sig: String, } // Vectors are expected to be formatted per the IETF proof of concept // The included vectors are directly from // https://github.com/cfrg/draft-irtf-cfrg-frost/tree/draft-irtf-cfrg-frost-14/poc #[cfg(test)] impl From for Vectors { fn from(value: serde_json::Value) -> Vectors { let to_str = |value: &serde_json::Value| value.as_str().unwrap().to_string(); Vectors { threshold: u16::from_str(value["config"]["NUM_PARTICIPANTS"].as_str().unwrap()).unwrap(), group_secret: to_str(&value["inputs"]["group_secret_key"]), group_key: to_str(&value["inputs"]["group_public_key"]), shares: value["inputs"]["participant_shares"] .as_array() .unwrap() .iter() .map(|share| to_str(&share["participant_share"])) .collect(), msg: to_str(&value["inputs"]["message"]), included: value["inputs"]["participant_list"] .as_array() .unwrap() .iter() .map(|i| Participant::new(u16::try_from(i.as_u64().unwrap()).unwrap()).unwrap()) .collect(), nonce_randomness: value["round_one_outputs"]["outputs"] .as_array() .unwrap() .iter() .map(|value| { [to_str(&value["hiding_nonce_randomness"]), to_str(&value["binding_nonce_randomness"])] }) .collect(), nonces: value["round_one_outputs"]["outputs"] .as_array() .unwrap() .iter() .map(|value| [to_str(&value["hiding_nonce"]), to_str(&value["binding_nonce"])]) .collect(), commitments: value["round_one_outputs"]["outputs"] .as_array() .unwrap() .iter() .map(|value| { [to_str(&value["hiding_nonce_commitment"]), to_str(&value["binding_nonce_commitment"])] }) .collect(), sig_shares: value["round_two_outputs"]["outputs"] .as_array() .unwrap() .iter() .map(|value| to_str(&value["sig_share"])) .collect(), sig: to_str(&value["final_output"]["sig"]), } } } // Load these vectors into ThresholdKeys using a custom serialization it'll deserialize fn vectors_to_multisig_keys(vectors: &Vectors) -> HashMap> { let shares = vectors .shares .iter() .map(|secret| C::read_F::<&[u8]>(&mut hex::decode(secret).unwrap().as_ref()).unwrap()) .collect::>(); let verification_shares = shares.iter().map(|secret| C::generator() * secret).collect::>(); let mut keys = HashMap::new(); for i in 1 ..= u16::try_from(shares.len()).unwrap() { // Manually re-implement the serialization for ThresholdKeys to import this data let mut serialized = vec![]; serialized.extend(u32::try_from(C::ID.len()).unwrap().to_le_bytes()); serialized.extend(C::ID); serialized.extend(vectors.threshold.to_le_bytes()); serialized.extend(u16::try_from(shares.len()).unwrap().to_le_bytes()); serialized.extend(i.to_le_bytes()); serialized.push(1); serialized.extend(shares[usize::from(i) - 1].to_repr().as_ref()); for share in &verification_shares { serialized.extend(share.to_bytes().as_ref()); } let these_keys = ThresholdKeys::::read::<&[u8]>(&mut serialized.as_ref()).unwrap(); assert_eq!(these_keys.params().t(), vectors.threshold); assert_eq!(usize::from(these_keys.params().n()), shares.len()); let participant = Participant::new(i).unwrap(); assert_eq!(these_keys.params().i(), participant); assert_eq!(these_keys.original_secret_share().deref(), &shares[usize::from(i - 1)]); assert_eq!(hex::encode(these_keys.group_key().to_bytes().as_ref()), vectors.group_key); keys.insert(participant, these_keys); } keys } /// Test a Ciphersuite with its vectors. pub fn test_with_vectors>( rng: &mut R, vectors: &Vectors, ) { test_ciphersuite::(rng); // Test against the vectors let keys = vectors_to_multisig_keys::(vectors); { let group_key = ::read_G::<&[u8]>(&mut hex::decode(&vectors.group_key).unwrap().as_ref()) .unwrap(); let secret = C::read_F::<&[u8]>(&mut hex::decode(&vectors.group_secret).unwrap().as_ref()).unwrap(); assert_eq!(C::generator() * secret, group_key); assert_eq!(*recover_key(&keys.values().cloned().collect::>()).unwrap(), secret); let mut machines = vec![]; for i in &vectors.included { machines.push((i, AlgorithmMachine::new(IetfSchnorr::::ietf(), keys[i].clone()))); } let mut commitments = HashMap::new(); let machines = machines .into_iter() .enumerate() .map(|(c, (i, machine))| { let nonce = |i| { Zeroizing::new( C::read_F::<&[u8]>(&mut hex::decode(&vectors.nonces[c][i]).unwrap().as_ref()).unwrap(), ) }; let nonces = [nonce(0), nonce(1)]; let these_commitments = [C::generator() * nonces[0].deref(), C::generator() * nonces[1].deref()]; assert_eq!( these_commitments[0].to_bytes().as_ref(), hex::decode(&vectors.commitments[c][0]).unwrap() ); assert_eq!( these_commitments[1].to_bytes().as_ref(), hex::decode(&vectors.commitments[c][1]).unwrap() ); let preprocess = Preprocess { commitments: Commitments { nonces: vec![NonceCommitments { generators: vec![GeneratorCommitments(these_commitments)], }], }, addendum: (), }; // FROST doesn't specify how to serialize these together, yet this is sane // (and the simplest option) assert_eq!( preprocess.serialize(), hex::decode(vectors.commitments[c][0].clone() + &vectors.commitments[c][1]).unwrap() ); let machine = machine.unsafe_override_preprocess(vec![Nonce(nonces)], preprocess); commitments.insert( *i, machine .read_preprocess::<&[u8]>( &mut [ these_commitments[0].to_bytes().as_ref(), these_commitments[1].to_bytes().as_ref(), ] .concat() .as_ref(), ) .unwrap(), ); (i, machine) }) .collect::>(); let mut shares = HashMap::new(); let machines = machines .into_iter() .enumerate() .map(|(c, (i, machine))| { let (machine, share) = machine .sign(clone_without(&commitments, i), &hex::decode(&vectors.msg).unwrap()) .unwrap(); let share = { let mut buf = vec![]; share.write(&mut buf).unwrap(); buf }; assert_eq!(share, hex::decode(&vectors.sig_shares[c]).unwrap()); shares.insert(*i, machine.read_share::<&[u8]>(&mut share.as_ref()).unwrap()); (i, machine) }) .collect::>(); for (i, machine) in machines { let sig = machine.complete(clone_without(&shares, i)).unwrap(); let mut serialized = sig.R.to_bytes().as_ref().to_vec(); serialized.extend(sig.s.to_repr().as_ref()); assert_eq!(hex::encode(serialized), vectors.sig); } } // The above code didn't test the nonce generation due to the infeasibility of doing so against // the current codebase // A transparent RNG which has a fixed output struct TransparentRng(Vec<[u8; 32]>); impl RngCore for TransparentRng { fn next_u32(&mut self) -> u32 { unimplemented!() } fn next_u64(&mut self) -> u64 { unimplemented!() } fn fill_bytes(&mut self, dest: &mut [u8]) { dest.copy_from_slice(&self.0.remove(0)) } fn try_fill_bytes(&mut self, _: &mut [u8]) -> Result<(), rand_core::Error> { unimplemented!() } } // CryptoRng requires the output not reveal any info about any other outputs // Since this only will produce one output, this is actually met, even though it'd be fine to // fake it as this is a test impl CryptoRng for TransparentRng {} // Test C::random_nonce matches the expected vectors for (i, l) in vectors.included.iter().enumerate() { let l = usize::from(u16::from(*l)); // Shares are a zero-indexed array of all participants, hence l - 1 let share = Zeroizing::new( C::read_F::<&[u8]>(&mut hex::decode(&vectors.shares[l - 1]).unwrap().as_ref()).unwrap(), ); let randomness = vectors.nonce_randomness[i] .iter() .map(|randomness| hex::decode(randomness).unwrap().try_into().unwrap()) .collect::>(); let nonces = vectors.nonces[i] .iter() .map(|nonce| { Zeroizing::new(C::read_F::<&[u8]>(&mut hex::decode(nonce).unwrap().as_ref()).unwrap()) }) .collect::>(); for (randomness, nonce) in randomness.iter().zip(&nonces) { // Nonces are only present for participating signers, hence i assert_eq!(C::random_nonce(&share, &mut TransparentRng(vec![*randomness])), *nonce); } // Also test it at the Commitments level let (generated_nonces, commitments) = Commitments::::new::<_>(&mut TransparentRng(randomness), &share, &[vec![C::generator()]]); assert_eq!(generated_nonces.len(), 1); assert_eq!(generated_nonces[0].0, [nonces[0].clone(), nonces[1].clone()]); let mut commitments_bytes = vec![]; commitments.write(&mut commitments_bytes).unwrap(); assert_eq!( commitments_bytes, hex::decode(vectors.commitments[i][0].clone() + &vectors.commitments[i][1]).unwrap() ); } // This doesn't verify C::random_nonce is called correctly, where the code should call it with // the output from a ChaCha20 stream // Create a known ChaCha20 stream to verify it ends up at random_nonce properly { let mut chacha_seed = [0; 32]; rng.fill_bytes(&mut chacha_seed); let mut ours = ChaCha20Rng::from_seed(chacha_seed); let frosts = ours.clone(); // The machines should geenerate a seed, and then use that seed in a ChaCha20 RNG for nonces let mut preprocess_seed = [0; 32]; ours.fill_bytes(&mut preprocess_seed); let mut ours = ChaCha20Rng::from_seed(preprocess_seed); // Get the randomness which will be used let mut randomness = ([0; 32], [0; 32]); ours.fill_bytes(&mut randomness.0); ours.fill_bytes(&mut randomness.1); // Create the machines let mut machines = vec![]; for i in &vectors.included { machines.push((i, AlgorithmMachine::new(IetfSchnorr::::ietf(), keys[i].clone()))); } for (i, machine) in machines { let (_, preprocess) = machine.preprocess(&mut frosts.clone()); // Calculate the expected nonces let mut expected = (C::generator() * C::random_nonce( keys[i].original_secret_share(), &mut TransparentRng(vec![randomness.0]), ) .deref()) .to_bytes() .as_ref() .to_vec(); expected.extend( (C::generator() * C::random_nonce( keys[i].original_secret_share(), &mut TransparentRng(vec![randomness.1]), ) .deref()) .to_bytes() .as_ref(), ); // Ensure they match assert_eq!(preprocess.serialize(), expected); } } } ================================================ FILE: crypto/multiexp/Cargo.toml ================================================ [package] name = "multiexp" version = "0.4.2" description = "Multiexponentiation algorithms for ff/group" license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/crypto/multiexp" authors = ["Luke Parker "] keywords = ["multiexp", "ff", "group"] edition = "2021" rust-version = "1.79" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] rustversion = "1" std-shims = { path = "../../common/std-shims", version = "^0.1.1", default-features = false } zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] } ff = { version = "0.13", default-features = false, features = ["bits"] } group = { version = "0.13", default-features = false } rand_core = { version = "0.6", default-features = false, optional = true } [dev-dependencies] rand_core = { version = "0.6", features = ["std"] } k256 = { version = "^0.13.1", default-features = false, features = ["arithmetic", "bits"] } dalek-ff-group = { path = "../dalek-ff-group" } [features] std = ["std-shims/std", "zeroize/std", "ff/std", "rand_core?/std"] batch = ["rand_core"] default = ["std"] ================================================ FILE: crypto/multiexp/LICENSE ================================================ MIT License Copyright (c) 2022-2023 Luke Parker Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: crypto/multiexp/README.md ================================================ # Multiexp A multiexp implementation for ff/group implementing Straus and Pippenger. A batch verification API is also available via the "batch" feature, which enables secure multiexponentiation batch verification given a series of values which should sum to the identity, identifying which doesn't via binary search if they don't. This library was [audited by Cypher Stack in March 2023](https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf), culminating in commit [669d2dbffc1dafb82a09d9419ea182667115df06](https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06). Any subsequent changes have not undergone auditing. This library is usable under no_std, via alloc, when the default features are disabled. ================================================ FILE: crypto/multiexp/src/batch.rs ================================================ use std_shims::vec::Vec; use rand_core::{RngCore, CryptoRng}; use zeroize::{Zeroize, Zeroizing}; use ff::{Field, PrimeFieldBits}; use group::Group; use crate::{multiexp, multiexp_vartime}; // Flatten the contained statements to a single Vec. // Wrapped in Zeroizing in case any of the included statements contain private values. #[allow(clippy::type_complexity)] fn flat>( slice: &[(Id, Vec<(G::Scalar, G)>)], ) -> Zeroizing> { Zeroizing::new(slice.iter().flat_map(|pairs| pairs.1.iter()).copied().collect::>()) } /// A batch verifier intended to verify a series of statements are each equivalent to zero. #[allow(clippy::type_complexity)] #[derive(Clone, Zeroize)] pub struct BatchVerifier>( Zeroizing)>>, ); impl> BatchVerifier { /// Create a new batch verifier, expected to verify the following amount of statements. /// /// `capacity` is a size hint and is not required to be accurate. pub fn new(capacity: usize) -> BatchVerifier { BatchVerifier(Zeroizing::new(Vec::with_capacity(capacity))) } /// Queue a statement for batch verification. pub fn queue>( &mut self, rng: &mut R, id: Id, pairs: I, ) { // Define a unique scalar factor for this set of variables so individual items can't overlap let u = if self.0.is_empty() { G::Scalar::ONE } else { let mut weight; while { // Generate a random scalar weight = G::Scalar::random(&mut *rng); // Clears half the bits, maintaining security, to minimize scalar additions // Is not practically faster for whatever reason /* // Generate a random scalar let mut repr = G::Scalar::random(&mut *rng).to_repr(); // Calculate the amount of bytes to clear. We want to clear less than half let repr_len = repr.as_ref().len(); let unused_bits = (repr_len * 8) - usize::try_from(G::Scalar::CAPACITY).unwrap(); // Don't clear any partial bytes let to_clear = (repr_len / 2) - ((unused_bits + 7) / 8); // Clear a safe amount of bytes for b in &mut repr.as_mut()[.. to_clear] { *b = 0; } // Ensure these bits are used as the low bits so low scalars multiplied by this don't // become large scalars weight = G::Scalar::from_repr(repr).unwrap(); // Tests if any bit we supposedly just cleared is set, and if so, reverses it // Not a security issue if this fails, just a minor performance hit at ~2^-120 odds if weight.to_le_bits().iter().take(to_clear * 8).any(|bit| *bit) { repr.as_mut().reverse(); weight = G::Scalar::from_repr(repr).unwrap(); } */ // Ensure it's non-zero, as a zero scalar would cause this item to pass no matter what weight.is_zero().into() } {} weight }; self.0.push((id, pairs.into_iter().map(|(scalar, point)| (scalar * u, point)).collect())); } /// Perform batch verification, returning a boolean of if the statements equaled zero. #[must_use] pub fn verify(&self) -> bool { multiexp(&flat(&self.0)).is_identity().into() } /// Perform batch verification in variable time. #[must_use] pub fn verify_vartime(&self) -> bool { multiexp_vartime(&flat(&self.0)).is_identity().into() } /// Perform a binary search to identify which statement does not equal 0, returning None if all /// statements do. /// /// This function will only return the ID of one invalid statement, even if multiple are invalid. // A constant time variant may be beneficial for robust protocols pub fn blame_vartime(&self) -> Option { let mut slice = self.0.as_slice(); while slice.len() > 1 { let split = slice.len() / 2; if multiexp_vartime(&flat(&slice[.. split])).is_identity().into() { slice = &slice[split ..]; } else { slice = &slice[.. split]; } } slice .first() .filter(|(_, value)| !bool::from(multiexp_vartime(value).is_identity())) .map(|(id, _)| *id) } /// Perform constant time batch verification, and if verification fails, identify one faulty /// statement in variable time. pub fn verify_with_vartime_blame(&self) -> Result<(), Id> { if self.verify() { Ok(()) } else { Err(self.blame_vartime().unwrap()) } } /// Perform variable time batch verification, and if verification fails, identify one faulty /// statement in variable time. pub fn verify_vartime_with_vartime_blame(&self) -> Result<(), Id> { if self.verify_vartime() { Ok(()) } else { Err(self.blame_vartime().unwrap()) } } } ================================================ FILE: crypto/multiexp/src/lib.rs ================================================ #![cfg_attr(docsrs, feature(doc_cfg))] #![doc = include_str!("../README.md")] #![cfg_attr(not(feature = "std"), no_std)] #[cfg(not(feature = "std"))] #[macro_use] extern crate alloc; #[allow(unused_imports)] use std_shims::prelude::*; use std_shims::vec::Vec; use zeroize::Zeroize; use ff::PrimeFieldBits; use group::Group; mod straus; use straus::*; mod pippenger; use pippenger::*; #[cfg(feature = "batch")] mod batch; #[cfg(feature = "batch")] pub use batch::BatchVerifier; #[cfg(test)] mod tests; // Use black_box when possible #[rustversion::since(1.66)] use core::hint::black_box; #[rustversion::before(1.66)] fn black_box(val: T) -> T { val } fn u8_from_bool(bit_ref: &mut bool) -> u8 { let bit_ref = black_box(bit_ref); let mut bit = black_box(*bit_ref); #[allow(clippy::cast_lossless)] let res = black_box(bit as u8); bit.zeroize(); debug_assert!((res | 1) == 1); bit_ref.zeroize(); res } // Convert scalars to `window`-sized bit groups, as needed to index a table // This algorithm works for `window <= 8` pub(crate) fn prep_bits>( pairs: &[(G::Scalar, G)], window: u8, ) -> Vec> { let w_usize = usize::from(window); let mut groupings = vec![]; for pair in pairs { let p = groupings.len(); let mut bits = pair.0.to_le_bits(); groupings.push(vec![0; bits.len().div_ceil(w_usize)]); for (i, mut bit) in bits.iter_mut().enumerate() { let mut bit = u8_from_bool(&mut bit); groupings[p][i / w_usize] |= bit << (i % w_usize); bit.zeroize(); } } groupings } #[derive(Clone, Copy, PartialEq, Eq, Debug)] enum Algorithm { Null, Single, Straus(u8), Pippenger(u8), } /* Release (with runs 20, so all of these are off by 20x): k256 Straus 3 is more efficient at 5 with 678µs per Straus 4 is more efficient at 10 with 530µs per Straus 5 is more efficient at 35 with 467µs per Pippenger 5 is more efficient at 125 with 431µs per Pippenger 6 is more efficient at 275 with 349µs per Pippenger 7 is more efficient at 375 with 360µs per dalek Straus 3 is more efficient at 5 with 519µs per Straus 4 is more efficient at 10 with 376µs per Straus 5 is more efficient at 170 with 330µs per Pippenger 5 is more efficient at 125 with 305µs per Pippenger 6 is more efficient at 275 with 250µs per Pippenger 7 is more efficient at 450 with 205µs per Pippenger 8 is more efficient at 800 with 213µs per Debug (with runs 5, so...): k256 Straus 3 is more efficient at 5 with 2532µs per Straus 4 is more efficient at 10 with 1930µs per Straus 5 is more efficient at 80 with 1632µs per Pippenger 5 is more efficient at 150 with 1441µs per Pippenger 6 is more efficient at 300 with 1235µs per Pippenger 7 is more efficient at 475 with 1182µs per Pippenger 8 is more efficient at 625 with 1170µs per dalek: Straus 3 is more efficient at 5 with 971µs per Straus 4 is more efficient at 10 with 782µs per Straus 5 is more efficient at 75 with 778µs per Straus 6 is more efficient at 165 with 867µs per Pippenger 5 is more efficient at 125 with 677µs per Pippenger 6 is more efficient at 250 with 655µs per Pippenger 7 is more efficient at 475 with 500µs per Pippenger 8 is more efficient at 875 with 499µs per */ fn algorithm(len: usize) -> Algorithm { #[cfg(not(debug_assertions))] if len == 0 { Algorithm::Null } else if len == 1 { Algorithm::Single } else if len < 10 { // Straus 2 never showed a performance benefit, even with just 2 elements Algorithm::Straus(3) } else if len < 20 { Algorithm::Straus(4) } else if len < 50 { Algorithm::Straus(5) } else if len < 100 { Algorithm::Pippenger(4) } else if len < 125 { Algorithm::Pippenger(5) } else if len < 275 { Algorithm::Pippenger(6) } else if len < 400 { Algorithm::Pippenger(7) } else { Algorithm::Pippenger(8) } #[cfg(debug_assertions)] if len == 0 { Algorithm::Null } else if len == 1 { Algorithm::Single } else if len < 10 { Algorithm::Straus(3) } else if len < 80 { Algorithm::Straus(4) } else if len < 100 { Algorithm::Straus(5) } else if len < 125 { Algorithm::Pippenger(4) } else if len < 275 { Algorithm::Pippenger(5) } else if len < 475 { Algorithm::Pippenger(6) } else if len < 750 { Algorithm::Pippenger(7) } else { Algorithm::Pippenger(8) } } /// Performs a multiexponentiation, automatically selecting the optimal algorithm based on the /// amount of pairs. pub fn multiexp>( pairs: &[(G::Scalar, G)], ) -> G { match algorithm(pairs.len()) { Algorithm::Null => Group::identity(), Algorithm::Single => pairs[0].1 * pairs[0].0, // These functions panic if called without any pairs Algorithm::Straus(window) => straus(pairs, window), Algorithm::Pippenger(window) => pippenger(pairs, window), } } /// Performs a multiexponentiation in variable time, automatically selecting the optimal algorithm /// based on the amount of pairs. pub fn multiexp_vartime>(pairs: &[(G::Scalar, G)]) -> G { match algorithm(pairs.len()) { Algorithm::Null => Group::identity(), Algorithm::Single => pairs[0].1 * pairs[0].0, Algorithm::Straus(window) => straus_vartime(pairs, window), Algorithm::Pippenger(window) => pippenger_vartime(pairs, window), } } ================================================ FILE: crypto/multiexp/src/pippenger.rs ================================================ use zeroize::Zeroize; use ff::PrimeFieldBits; use group::Group; use crate::prep_bits; // Pippenger's algorithm for multiexponentiation, as published in the SIAM Journal on Computing // DOI: 10.1137/0209022 pub(crate) fn pippenger>( pairs: &[(G::Scalar, G)], window: u8, ) -> G { let mut bits = prep_bits(pairs, window); let mut res = G::identity(); for n in (0 .. bits[0].len()).rev() { if n != (bits[0].len() - 1) { for _ in 0 .. window { res = res.double(); } } let mut buckets = vec![G::identity(); 2_usize.pow(window.into())]; for p in 0 .. bits.len() { buckets[usize::from(bits[p][n])] += pairs[p].1; } let mut intermediate_sum = G::identity(); for b in (1 .. buckets.len()).rev() { intermediate_sum += buckets[b]; res += intermediate_sum; } buckets.zeroize(); } bits.zeroize(); res } pub(crate) fn pippenger_vartime>( pairs: &[(G::Scalar, G)], window: u8, ) -> G { let bits = prep_bits(pairs, window); let mut res = G::identity(); for n in (0 .. bits[0].len()).rev() { if n != (bits[0].len() - 1) { for _ in 0 .. window { res = res.double(); } } // Use None to represent identity since is_none is likely faster than is_identity let mut buckets = vec![None; 2_usize.pow(window.into())]; for p in 0 .. bits.len() { let nibble = usize::from(bits[p][n]); if nibble != 0 { if let Some(bucket) = buckets[nibble].as_mut() { *bucket += pairs[p].1; } else { buckets[nibble] = Some(pairs[p].1); } } } let mut intermediate_sum = None; for b in (1 .. buckets.len()).rev() { if let Some(bucket) = buckets[b].as_ref() { if let Some(intermediate_sum) = intermediate_sum.as_mut() { *intermediate_sum += bucket; } else { intermediate_sum = Some(*bucket); } } if let Some(intermediate_sum) = intermediate_sum.as_ref() { res += intermediate_sum; } } } res } ================================================ FILE: crypto/multiexp/src/straus.rs ================================================ use std_shims::vec::Vec; use zeroize::Zeroize; use ff::PrimeFieldBits; use group::Group; use crate::prep_bits; // Create tables for every included point of size 2^window fn prep_tables(pairs: &[(G::Scalar, G)], window: u8) -> Vec> { let mut tables = Vec::with_capacity(pairs.len()); for pair in pairs { let p = tables.len(); tables.push(vec![G::identity(); 2_usize.pow(window.into())]); let mut accum = G::identity(); for i in 1 .. tables[p].len() { accum += pair.1; tables[p][i] = accum; } } tables } // Straus's algorithm for multiexponentiation, as published in The American Mathematical Monthly // DOI: 10.2307/2310929 pub(crate) fn straus>( pairs: &[(G::Scalar, G)], window: u8, ) -> G { let mut groupings = prep_bits(pairs, window); let mut tables = prep_tables(pairs, window); let mut res = G::identity(); for b in (0 .. groupings[0].len()).rev() { if b != (groupings[0].len() - 1) { for _ in 0 .. window { res = res.double(); } } for s in 0 .. tables.len() { res += tables[s][usize::from(groupings[s][b])]; } } groupings.zeroize(); tables.zeroize(); res } pub(crate) fn straus_vartime>( pairs: &[(G::Scalar, G)], window: u8, ) -> G { let groupings = prep_bits(pairs, window); let tables = prep_tables(pairs, window); let mut res: Option = None; for b in (0 .. groupings[0].len()).rev() { if b != (groupings[0].len() - 1) { for _ in 0 .. window { res = res.map(|res| res.double()); } } for s in 0 .. tables.len() { if groupings[s][b] != 0 { if let Some(res) = res.as_mut() { *res += tables[s][usize::from(groupings[s][b])]; } else { res = Some(tables[s][usize::from(groupings[s][b])]); } } } } res.unwrap_or_else(G::identity) } ================================================ FILE: crypto/multiexp/src/tests/batch.rs ================================================ use rand_core::OsRng; use zeroize::Zeroize; use rand_core::RngCore; use ff::{Field, PrimeFieldBits}; use group::Group; use crate::BatchVerifier; pub(crate) fn test_batch>() { let valid = |batch: BatchVerifier<_, G>| { assert!(batch.verify()); assert!(batch.verify_vartime()); assert_eq!(batch.blame_vartime(), None); assert_eq!(batch.verify_with_vartime_blame(), Ok(())); assert_eq!(batch.verify_vartime_with_vartime_blame(), Ok(())); }; let invalid = |batch: BatchVerifier<_, G>, id| { assert!(!batch.verify()); assert!(!batch.verify_vartime()); assert_eq!(batch.blame_vartime(), Some(id)); assert_eq!(batch.verify_with_vartime_blame(), Err(id)); assert_eq!(batch.verify_vartime_with_vartime_blame(), Err(id)); }; // Test an empty batch let batch = BatchVerifier::new(0); valid(batch); // Test a batch with one set of statements let valid_statements = vec![(-G::Scalar::ONE, G::generator()), (G::Scalar::ONE, G::generator())]; let mut batch = BatchVerifier::new(1); batch.queue(&mut OsRng, 0, valid_statements.clone()); valid(batch); // Test a batch with an invalid set of statements fails properly let invalid_statements = vec![(-G::Scalar::ONE, G::generator())]; let mut batch = BatchVerifier::new(1); batch.queue(&mut OsRng, 0, invalid_statements.clone()); invalid(batch, 0); // Test blame can properly identify faulty participants // Run with 17 statements, rotating which one is faulty for i in 0 .. 17 { let mut batch = BatchVerifier::new(17); for j in 0 .. 17 { batch.queue( &mut OsRng, j, if i == j { invalid_statements.clone() } else { valid_statements.clone() }, ); } invalid(batch, i); } // Test blame always identifies the left-most invalid statement for i in 1 .. 32 { for j in 1 .. i { let mut batch = BatchVerifier::new(j); let mut leftmost = None; // Create j statements for k in 0 .. j { batch.queue( &mut OsRng, k, // The usage of i / 10 makes this less likely to add invalid elements, and increases // the space between them // For high i values, yet low j values, this will make it likely that random elements // are at/near the end if ((OsRng.next_u64() % u64::try_from(1 + (i / 4)).unwrap()) == 0) || (leftmost.is_none() && (k == (j - 1))) { if leftmost.is_none() { leftmost = Some(k); } invalid_statements.clone() } else { valid_statements.clone() }, ); } invalid(batch, leftmost.unwrap()); } } } ================================================ FILE: crypto/multiexp/src/tests/mod.rs ================================================ use std::time::Instant; use rand_core::OsRng; use zeroize::Zeroize; use ff::{Field, PrimeFieldBits}; use group::Group; use k256::ProjectivePoint; use dalek_ff_group::EdwardsPoint; use crate::{straus, straus_vartime, pippenger, pippenger_vartime, multiexp, multiexp_vartime}; #[cfg(feature = "batch")] mod batch; #[cfg(feature = "batch")] use batch::test_batch; #[allow(dead_code)] fn benchmark_internal>(straus_bool: bool) { let runs: usize = 20; let mut start = 0; let mut increment: usize = 5; let mut total: usize = 250; let mut current = 2; if !straus_bool { start = 100; increment = 25; total = 1000; current = 4; }; let mut pairs = Vec::with_capacity(total); let mut sum = G::identity(); for _ in 0 .. start { pairs.push((G::Scalar::random(&mut OsRng), G::generator() * G::Scalar::random(&mut OsRng))); sum += pairs[pairs.len() - 1].1 * pairs[pairs.len() - 1].0; } for _ in 0 .. (total / increment) { for _ in 0 .. increment { pairs.push((G::Scalar::random(&mut OsRng), G::generator() * G::Scalar::random(&mut OsRng))); sum += pairs[pairs.len() - 1].1 * pairs[pairs.len() - 1].0; } let now = Instant::now(); for _ in 0 .. runs { if straus_bool { assert_eq!(straus(&pairs, current), sum); } else { assert_eq!(pippenger(&pairs, current), sum); } } let current_per = now.elapsed().as_micros() / u128::try_from(pairs.len()).unwrap(); let now = Instant::now(); for _ in 0 .. runs { if straus_bool { assert_eq!(straus(&pairs, current + 1), sum); } else { assert_eq!(pippenger(&pairs, current + 1), sum); } } let next_per = now.elapsed().as_micros() / u128::try_from(pairs.len()).unwrap(); if next_per < current_per { current += 1; println!( "{} {} is more efficient at {} with {}µs per", if straus_bool { "Straus" } else { "Pippenger" }, current, pairs.len(), next_per ); if current >= 8 { return; } } } } fn test_multiexp>() { let test = |pairs: &[_], sum| { // These should automatically determine the best algorithm assert_eq!(multiexp(pairs), sum); assert_eq!(multiexp_vartime(pairs), sum); // Also explicitly test straus/pippenger for each bit size if !pairs.is_empty() { for window in 1 .. 8 { assert_eq!(straus(pairs, window), sum); assert_eq!(straus_vartime(pairs, window), sum); assert_eq!(pippenger(pairs, window), sum); assert_eq!(pippenger_vartime(pairs, window), sum); } } }; // Test an empty multiexp is identity test(&[], G::identity()); // Test an multiexp of identity/zero elements is identity test(&[(G::Scalar::ZERO, G::generator())], G::identity()); test(&[(G::Scalar::ONE, G::identity())], G::identity()); // Test a variety of multiexp sizes let mut pairs = Vec::with_capacity(1000); let mut sum = G::identity(); for _ in 0 .. 10 { // Test a multiexp of a single item // On successive loop iterations, this will test a multiexp with an odd number of pairs pairs.push((G::Scalar::random(&mut OsRng), G::generator() * G::Scalar::random(&mut OsRng))); sum += pairs[pairs.len() - 1].1 * pairs[pairs.len() - 1].0; test(&pairs, sum); for _ in 0 .. 100 { pairs.push((G::Scalar::random(&mut OsRng), G::generator() * G::Scalar::random(&mut OsRng))); sum += pairs[pairs.len() - 1].1 * pairs[pairs.len() - 1].0; } test(&pairs, sum); } } #[test] fn test_secp256k1() { test_multiexp::(); #[cfg(feature = "batch")] test_batch::(); } #[test] fn test_ed25519() { test_multiexp::(); #[cfg(feature = "batch")] test_batch::(); } #[ignore] #[test] fn benchmark() { // Activate the processor's boost clock for _ in 0 .. 30 { test_multiexp::(); } benchmark_internal::(true); benchmark_internal::(false); benchmark_internal::(true); benchmark_internal::(false); } ================================================ FILE: crypto/schnorr/Cargo.toml ================================================ [package] name = "schnorr-signatures" version = "0.5.2" description = "Minimal Schnorr signatures crate hosting common code" license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/crypto/schnorr" authors = ["Luke Parker "] keywords = ["schnorr", "ff", "group"] edition = "2021" rust-version = "1.79" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] std-shims = { path = "../../common/std-shims", version = "^0.1.1", default-features = false } rand_core = { version = "0.6", default-features = false } zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] } transcript = { package = "flexible-transcript", path = "../transcript", version = "^0.3.2", default-features = false, optional = true } ciphersuite = { path = "../ciphersuite", version = "^0.4.1", default-features = false, features = ["alloc"] } multiexp = { path = "../multiexp", version = "0.4", default-features = false, features = ["batch"] } [dev-dependencies] hex = "0.4" rand_core = { version = "0.6", features = ["std"] } sha2 = "0.10" dalek-ff-group = { path = "../dalek-ff-group" } ciphersuite = { path = "../ciphersuite" } [features] aggregate = ["transcript"] std = ["std-shims/std", "rand_core/std", "zeroize/std", "transcript?/std", "ciphersuite/std", "multiexp/std"] default = ["std"] ================================================ FILE: crypto/schnorr/LICENSE ================================================ MIT License Copyright (c) 2021-2023 Luke Parker Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: crypto/schnorr/README.md ================================================ # Schnorr Signatures A challenge (and therefore HRAm) agnostic Schnorr signature library. This is intended to be used as a primitive by a variety of crates relying on Schnorr signatures, voiding the need to constantly define a Schnorr signature struct with associated functions. This library provides signatures of the `R, s` form. Batch verification is supported via the multiexp crate. Half-aggregation, as defined in , is also supported. This library was [audited by Cypher Stack in March 2023](https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf), culminating in commit [669d2dbffc1dafb82a09d9419ea182667115df06](https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06). Any subsequent changes have not undergone auditing. This library is usable under no_std, via alloc, when the default features are disabled. ================================================ FILE: crypto/schnorr/src/aggregate.rs ================================================ use std_shims::{ vec::Vec, io::{self, Read, Write}, }; use zeroize::Zeroize; use transcript::{Transcript, SecureDigest, DigestTranscript}; use ciphersuite::{ group::{ ff::{Field, PrimeField}, Group, GroupEncoding, }, Ciphersuite, }; use multiexp::multiexp_vartime; use crate::SchnorrSignature; // Returns a unbiased scalar weight to use on a signature in order to prevent malleability fn weight(digest: &mut DigestTranscript) -> F { let mut bytes = digest.challenge(b"aggregation_weight"); debug_assert_eq!(bytes.len() % 8, 0); // This should be guaranteed thanks to SecureDigest debug_assert!(bytes.len() >= 32); let mut res = F::ZERO; let mut i = 0; // Derive a scalar from enough bits of entropy that bias is < 2^128 // This can't be const due to its usage of a generic // Also due to the usize::try_from, yet that could be replaced with an `as` #[allow(non_snake_case)] let BYTES: usize = usize::try_from((F::NUM_BITS + 128).div_ceil(8)).unwrap(); let mut remaining = BYTES; // We load bits in as u64s const WORD_LEN_IN_BITS: usize = 64; const WORD_LEN_IN_BYTES: usize = WORD_LEN_IN_BITS / 8; let mut first = true; while i < remaining { // Shift over the already loaded bits if !first { for _ in 0 .. WORD_LEN_IN_BITS { res += res; } } first = false; // Add the next 64 bits res += F::from(u64::from_be_bytes(bytes[i .. (i + WORD_LEN_IN_BYTES)].try_into().unwrap())); i += WORD_LEN_IN_BYTES; // If we've exhausted this challenge, get another if i == bytes.len() { bytes = digest.challenge(b"aggregation_weight_continued"); remaining -= i; i = 0; } } res } /// Aggregate Schnorr signature as defined in . #[allow(non_snake_case)] #[derive(Clone, PartialEq, Eq, Debug, Zeroize)] pub struct SchnorrAggregate { Rs: Vec, s: C::F, } impl SchnorrAggregate { /// Read a SchnorrAggregate from something implementing Read. pub fn read(reader: &mut R) -> io::Result { let mut len = [0; 4]; reader.read_exact(&mut len)?; #[allow(non_snake_case)] let mut Rs = vec![]; for _ in 0 .. u32::from_le_bytes(len) { Rs.push(C::read_G(reader)?); } Ok(SchnorrAggregate { Rs, s: C::read_F(reader)? }) } /// Write a SchnorrAggregate to something implementing Write. /// /// This will panic if more than 4 billion signatures were aggregated. pub fn write(&self, writer: &mut W) -> io::Result<()> { writer.write_all( &u32::try_from(self.Rs.len()) .expect("more than 4 billion signatures in aggregate") .to_le_bytes(), )?; #[allow(non_snake_case)] for R in &self.Rs { writer.write_all(R.to_bytes().as_ref())?; } writer.write_all(self.s.to_repr().as_ref()) } /// Serialize a SchnorrAggregate, returning a `Vec`. pub fn serialize(&self) -> Vec { let mut buf = vec![]; self.write(&mut buf).unwrap(); buf } #[allow(non_snake_case)] pub fn Rs(&self) -> &[C::G] { self.Rs.as_slice() } /// Perform signature verification. /// /// Challenges must be properly crafted, which means being binding to the public key, nonce, and /// any message. Failure to do so will let a malicious adversary to forge signatures for /// different keys/messages. /// /// The DST used here must prevent a collision with whatever hash function produced the /// challenges. #[must_use] pub fn verify(&self, dst: &'static [u8], keys_and_challenges: &[(C::G, C::F)]) -> bool { if self.Rs.len() != keys_and_challenges.len() { return false; } let mut digest = DigestTranscript::::new(dst); digest.domain_separate(b"signatures"); for (_, challenge) in keys_and_challenges { digest.append_message(b"challenge", challenge.to_repr()); } let mut pairs = Vec::with_capacity((2 * keys_and_challenges.len()) + 1); for (i, (key, challenge)) in keys_and_challenges.iter().enumerate() { let z = weight(&mut digest); pairs.push((z, self.Rs[i])); pairs.push((z * challenge, *key)); } pairs.push((-self.s, C::generator())); multiexp_vartime(&pairs).is_identity().into() } } /// A signature aggregator capable of consuming signatures in order to produce an aggregate. #[allow(non_snake_case)] #[derive(Clone, Debug, Zeroize)] pub struct SchnorrAggregator { digest: DigestTranscript, sigs: Vec>, } impl SchnorrAggregator { /// Create a new aggregator. /// /// The DST used here must prevent a collision with whatever hash function produced the /// challenges. pub fn new(dst: &'static [u8]) -> Self { let mut res = Self { digest: DigestTranscript::::new(dst), sigs: vec![] }; res.digest.domain_separate(b"signatures"); res } /// Aggregate a signature. pub fn aggregate(&mut self, challenge: C::F, sig: SchnorrSignature) { self.digest.append_message(b"challenge", challenge.to_repr()); self.sigs.push(sig); } /// Complete aggregation, returning None if none were aggregated. pub fn complete(mut self) -> Option> { if self.sigs.is_empty() { return None; } let mut aggregate = SchnorrAggregate { Rs: Vec::with_capacity(self.sigs.len()), s: C::F::ZERO }; for i in 0 .. self.sigs.len() { aggregate.Rs.push(self.sigs[i].R); aggregate.s += self.sigs[i].s * weight::<_, C::F>(&mut self.digest); } Some(aggregate) } } ================================================ FILE: crypto/schnorr/src/lib.rs ================================================ #![cfg_attr(docsrs, feature(doc_cfg))] #![doc = include_str!("../README.md")] #![cfg_attr(not(feature = "std"), no_std)] use core::ops::Deref; #[cfg(not(feature = "std"))] #[macro_use] extern crate alloc; use std_shims::{ vec::Vec, io::{self, Read, Write}, }; use rand_core::{RngCore, CryptoRng}; use zeroize::{Zeroize, Zeroizing}; use ciphersuite::{ group::{ ff::{Field, PrimeField}, Group, GroupEncoding, }, Ciphersuite, }; use multiexp::{multiexp_vartime, BatchVerifier}; /// Half-aggregation from . #[cfg(feature = "aggregate")] pub mod aggregate; #[cfg(test)] mod tests; /// A Schnorr signature of the form (R, s) where s = r + cx. /// /// These are intended to be strict. It is generic over Ciphersuite which is for PrimeGroups, /// and mandates canonical encodings in its read function. /// /// RFC 8032 has an alternative verification formula, 8R = 8s - 8cX, which is intended to handle /// torsioned nonces/public keys. Due to this library's strict requirements, such signatures will /// not be verifiable with this library. #[allow(non_snake_case)] #[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)] pub struct SchnorrSignature { pub R: C::G, pub s: C::F, } impl SchnorrSignature { /// Read a SchnorrSignature from something implementing Read. pub fn read(reader: &mut R) -> io::Result { Ok(SchnorrSignature { R: C::read_G(reader)?, s: C::read_F(reader)? }) } /// Write a SchnorrSignature to something implementing Read. pub fn write(&self, writer: &mut W) -> io::Result<()> { writer.write_all(self.R.to_bytes().as_ref())?; writer.write_all(self.s.to_repr().as_ref()) } /// Serialize a SchnorrSignature, returning a `Vec`. pub fn serialize(&self) -> Vec { let mut buf = vec![]; self.write(&mut buf).unwrap(); buf } /// Sign a Schnorr signature with the given nonce for the specified challenge. /// /// This challenge must be properly crafted, which means being binding to the public key, nonce, /// and any message. Failure to do so will let a malicious adversary to forge signatures for /// different keys/messages. #[allow(clippy::needless_pass_by_value)] // Prevents further-use of this single-use value pub fn sign( private_key: &Zeroizing, nonce: Zeroizing, challenge: C::F, ) -> SchnorrSignature { SchnorrSignature { // Uses deref instead of * as * returns C::F yet deref returns &C::F, preventing a copy R: C::generator() * nonce.deref(), s: (challenge * private_key.deref()) + nonce.deref(), } } /// Return the series of pairs whose products sum to zero for a valid signature. /// This is intended to be used with a multiexp. pub fn batch_statements(&self, public_key: C::G, challenge: C::F) -> [(C::F, C::G); 3] { // s = r + ca // sG == R + cA // R + cA - sG == 0 [ // R (C::F::ONE, self.R), // cA (challenge, public_key), // -sG (-self.s, C::generator()), ] } /// Verify a Schnorr signature for the given key with the specified challenge. /// /// This challenge must be properly crafted, which means being binding to the public key, nonce, /// and any message. Failure to do so will let a malicious adversary to forge signatures for /// different keys/messages. #[must_use] pub fn verify(&self, public_key: C::G, challenge: C::F) -> bool { multiexp_vartime(&self.batch_statements(public_key, challenge)).is_identity().into() } /// Queue a signature for batch verification. /// /// This challenge must be properly crafted, which means being binding to the public key, nonce, /// and any message. Failure to do so will let a malicious adversary to forge signatures for /// different keys/messages. pub fn batch_verify( &self, rng: &mut R, batch: &mut BatchVerifier, id: I, public_key: C::G, challenge: C::F, ) { batch.queue(rng, id, self.batch_statements(public_key, challenge)); } } ================================================ FILE: crypto/schnorr/src/tests/mod.rs ================================================ use core::ops::Deref; use zeroize::Zeroizing; use rand_core::OsRng; use dalek_ff_group::Ed25519; use ciphersuite::{ group::{ff::Field, Group}, Ciphersuite, }; use multiexp::BatchVerifier; use crate::SchnorrSignature; #[cfg(feature = "aggregate")] use crate::aggregate::{SchnorrAggregator, SchnorrAggregate}; mod rfc8032; pub(crate) fn sign() { let private_key = Zeroizing::new(C::random_nonzero_F(&mut OsRng)); let nonce = Zeroizing::new(C::random_nonzero_F(&mut OsRng)); let challenge = C::random_nonzero_F(&mut OsRng); // Doesn't bother to craft an HRAm assert!(SchnorrSignature::::sign(&private_key, nonce, challenge) .verify(C::generator() * private_key.deref(), challenge)); } // The above sign function verifies signing works // This verifies invalid signatures don't pass, using zero signatures, which should effectively be // random pub(crate) fn verify() { assert!(!SchnorrSignature:: { R: C::G::identity(), s: C::F::ZERO } .verify(C::generator() * C::random_nonzero_F(&mut OsRng), C::random_nonzero_F(&mut OsRng))); } pub(crate) fn batch_verify() { // Create 5 signatures let mut keys = vec![]; let mut challenges = vec![]; let mut sigs = vec![]; for i in 0 .. 5 { keys.push(Zeroizing::new(C::random_nonzero_F(&mut OsRng))); challenges.push(C::random_nonzero_F(&mut OsRng)); sigs.push(SchnorrSignature::::sign( &keys[i], Zeroizing::new(C::random_nonzero_F(&mut OsRng)), challenges[i], )); } // Batch verify { let mut batch = BatchVerifier::new(5); for (i, sig) in sigs.iter().enumerate() { sig.batch_verify(&mut OsRng, &mut batch, i, C::generator() * keys[i].deref(), challenges[i]); } batch.verify_vartime_with_vartime_blame().unwrap(); } // Shift 1 from s from one to another and verify it fails // This test will fail if unique factors aren't used per-signature, hence its inclusion { let mut batch = BatchVerifier::new(5); for (i, mut sig) in sigs.clone().drain(..).enumerate() { if i == 1 { sig.s += C::F::ONE; } if i == 2 { sig.s -= C::F::ONE; } sig.batch_verify(&mut OsRng, &mut batch, i, C::generator() * keys[i].deref(), challenges[i]); } if let Err(blame) = batch.verify_vartime_with_vartime_blame() { assert!((blame == 1) || (blame == 2)); } else { panic!("Batch verification considered malleated signatures valid"); } } } #[cfg(feature = "aggregate")] pub(crate) fn aggregate() { const DST: &[u8] = b"Schnorr Aggregator Test"; // Create 5 signatures let mut keys = vec![]; let mut challenges = vec![]; let mut aggregator = SchnorrAggregator::::new(DST); for i in 0 .. 5 { keys.push(Zeroizing::new(C::random_nonzero_F(&mut OsRng))); // In practice, this MUST be a secure challenge binding to the nonce, key, and any message challenges.push(C::random_nonzero_F(&mut OsRng)); aggregator.aggregate( challenges[i], SchnorrSignature::::sign( &keys[i], Zeroizing::new(C::random_nonzero_F(&mut OsRng)), challenges[i], ), ); } let aggregate = aggregator.complete().unwrap(); let aggregate = SchnorrAggregate::::read::<&[u8]>(&mut aggregate.serialize().as_ref()).unwrap(); assert!(aggregate.verify( DST, keys .iter() .map(|key| C::generator() * key.deref()) .zip(challenges.iter().copied()) .collect::>() .as_ref(), )); } #[test] fn test() { sign::(); verify::(); batch_verify::(); #[cfg(feature = "aggregate")] aggregate::(); } ================================================ FILE: crypto/schnorr/src/tests/rfc8032.rs ================================================ // RFC 8032 Ed25519 test vectors // The s = r + cx format modernly used for Schnorr signatures was popularized by EdDSA // While not all RFC 8032 signatures will work with this library, any canonical ones will, and // these vectors are canonical use sha2::{Digest, Sha512}; use dalek_ff_group::{Scalar, Ed25519}; use ciphersuite::{group::GroupEncoding, Ciphersuite}; use crate::SchnorrSignature; // Public key, message, signature #[rustfmt::skip] const VECTORS: [(&str, &str, &str); 5] = [ ( "d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a", "", "e5564300c360ac729086e2cc806e828a84877f1eb8e5d974d873e065224901555fb8821590a33bacc61e39701cf9b46bd25bf5f0595bbe24655141438e7a100b" ), ( "3d4017c3e843895a92b70aa74d1b7ebc9c982ccf2ec4968cc0cd55f12af4660c", "72", "92a009a9f0d4cab8720e820b5f642540a2b27b5416503f8fb3762223ebdb69da085ac1e43e15996e458f3613d0f11d8c387b2eaeb4302aeeb00d291612bb0c00" ), ( "fc51cd8e6218a1a38da47ed00230f0580816ed13ba3303ac5deb911548908025", "af82", "6291d657deec24024827e69c3abe01a30ce548a284743a445e3680d7db5ac3ac18ff9b538d16f290ae67f760984dc6594a7c15e9716ed28dc027beceea1ec40a" ), ( "278117fc144c72340f67d0f2316e8386ceffbf2b2428c9c51fef7c597f1d426e", "08b8b2b733424243760fe426a4b54908632110a66c2f6591eabd3345e3e4eb98fa6e264bf09efe12ee50f8f54e9f77b1e355f6c50544e23fb1433ddf73be84d879de7c0046dc4996d9e773f4bc9efe5738829adb26c81b37c93a1b270b20329d658675fc6ea534e0810a4432826bf58c941efb65d57a338bbd2e26640f89ffbc1a858efcb8550ee3a5e1998bd177e93a7363c344fe6b199ee5d02e82d522c4feba15452f80288a821a579116ec6dad2b3b310da903401aa62100ab5d1a36553e06203b33890cc9b832f79ef80560ccb9a39ce767967ed628c6ad573cb116dbefefd75499da96bd68a8a97b928a8bbc103b6621fcde2beca1231d206be6cd9ec7aff6f6c94fcd7204ed3455c68c83f4a41da4af2b74ef5c53f1d8ac70bdcb7ed185ce81bd84359d44254d95629e9855a94a7c1958d1f8ada5d0532ed8a5aa3fb2d17ba70eb6248e594e1a2297acbbb39d502f1a8c6eb6f1ce22b3de1a1f40cc24554119a831a9aad6079cad88425de6bde1a9187ebb6092cf67bf2b13fd65f27088d78b7e883c8759d2c4f5c65adb7553878ad575f9fad878e80a0c9ba63bcbcc2732e69485bbc9c90bfbd62481d9089beccf80cfe2df16a2cf65bd92dd597b0707e0917af48bbb75fed413d238f5555a7a569d80c3414a8d0859dc65a46128bab27af87a71314f318c782b23ebfe808b82b0ce26401d2e22f04d83d1255dc51addd3b75a2b1ae0784504df543af8969be3ea7082ff7fc9888c144da2af58429ec96031dbcad3dad9af0dcbaaaf268cb8fcffead94f3c7ca495e056a9b47acdb751fb73e666c6c655ade8297297d07ad1ba5e43f1bca32301651339e22904cc8c42f58c30c04aafdb038dda0847dd988dcda6f3bfd15c4b4c4525004aa06eeff8ca61783aacec57fb3d1f92b0fe2fd1a85f6724517b65e614ad6808d6f6ee34dff7310fdc82aebfd904b01e1dc54b2927094b2db68d6f903b68401adebf5a7e08d78ff4ef5d63653a65040cf9bfd4aca7984a74d37145986780fc0b16ac451649de6188a7dbdf191f64b5fc5e2ab47b57f7f7276cd419c17a3ca8e1b939ae49e488acba6b965610b5480109c8b17b80e1b7b750dfc7598d5d5011fd2dcc5600a32ef5b52a1ecc820e308aa342721aac0943bf6686b64b2579376504ccc493d97e6aed3fb0f9cd71a43dd497f01f17c0e2cb3797aa2a2f256656168e6c496afc5fb93246f6b1116398a346f1a641f3b041e989f7914f90cc2c7fff357876e506b50d334ba77c225bc307ba537152f3f1610e4eafe595f6d9d90d11faa933a15ef1369546868a7f3a45a96768d40fd9d03412c091c6315cf4fde7cb68606937380db2eaaa707b4c4185c32eddcdd306705e4dc1ffc872eeee475a64dfac86aba41c0618983f8741c5ef68d3a101e8a3b8cac60c905c15fc910840b94c00a0b9d0", "0aab4c900501b3e24d7cdf4663326a3a87df5e4843b2cbdb67cbf6e460fec350aa5371b1508f9f4528ecea23c436d94b5e8fcd4f681e30a6ac00a9704a188a03" ), ( "ec172b93ad5e563bf4932c70e1245034c35467ef2efd4d64ebf819683467e2bf", "ddaf35a193617abacc417349ae20413112e6fa4e89a97ea20a9eeee64b55d39a2192992a274fc1a836ba3c23a3feebbd454d4423643ce80e2a9ac94fa54ca49f", "dc2a4459e7369633a52b1bf277839a00201009a3efbf3ecb69bea2186c26b58909351fc9ac90b3ecfdfbc7c66431e0303dca179c138ac17ad9bef1177331a704" ), ]; #[test] fn test_rfc8032() { for vector in VECTORS { let key = Ed25519::read_G::<&[u8]>(&mut hex::decode(vector.0).unwrap().as_ref()).unwrap(); let sig = SchnorrSignature::::read::<&[u8]>(&mut hex::decode(vector.2).unwrap().as_ref()) .unwrap(); let hram = Sha512::new_with_prefix( [sig.R.to_bytes().as_ref(), &key.to_bytes(), &hex::decode(vector.1).unwrap()].concat(), ); assert!(sig.verify(key, Scalar::from_hash(hram))); } } ================================================ FILE: crypto/schnorrkel/Cargo.toml ================================================ [package] name = "frost-schnorrkel" version = "0.2.0" description = "modular-frost Algorithm compatible with Schnorrkel" license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/crypto/schnorrkel" authors = ["Luke Parker "] keywords = ["frost", "multisig", "threshold", "schnorrkel"] edition = "2021" rust-version = "1.80" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] rand_core = "0.6" zeroize = "^1.5" transcript = { package = "flexible-transcript", path = "../transcript", version = "^0.3.2", features = ["merlin"] } group = "0.13" dalek-ff-group = { path = "../dalek-ff-group" } ciphersuite = { path = "../ciphersuite", version = "^0.4.1", features = ["std"] } schnorr = { package = "schnorr-signatures", path = "../schnorr", version = "^0.5.1" } frost = { path = "../frost", package = "modular-frost", version = "^0.10.0", features = ["ristretto"] } schnorrkel = { version = "0.11" } [dev-dependencies] frost = { path = "../frost", package = "modular-frost", features = ["tests"] } ================================================ FILE: crypto/schnorrkel/LICENSE ================================================ MIT License Copyright (c) 2023 Luke Parker Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: crypto/schnorrkel/README.md ================================================ # FROST Schnorrkel A Schnorrkel algorithm for [modular-frost](https://docs.rs/modular-frost). While the Schnorrkel algorithm has not been audited, the underlying FROST implementation was [audited by Cypher Stack in March 2023](https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf), culminating in commit [669d2dbffc1dafb82a09d9419ea182667115df06](https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06). Any subsequent changes have not undergone auditing. ================================================ FILE: crypto/schnorrkel/src/lib.rs ================================================ #![cfg_attr(docsrs, feature(doc_cfg))] #![doc = include_str!("../README.md")] use std::io::{self, Read}; use rand_core::{RngCore, CryptoRng}; use zeroize::Zeroizing; use transcript::{Transcript, MerlinTranscript}; use dalek_ff_group::Ristretto; use ciphersuite::{ group::{ff::PrimeField, GroupEncoding}, Ciphersuite, }; use schnorr::SchnorrSignature; use ::frost::{ Participant, ThresholdKeys, ThresholdView, FrostError, algorithm::{Hram, Algorithm, Schnorr}, }; /// The [modular-frost](https://docs.rs/modular-frost) library. pub mod frost { pub use ::frost::*; } use schnorrkel::{PublicKey, Signature, context::SigningTranscript, signing_context}; type RistrettoPoint = ::G; type Scalar = ::F; #[cfg(test)] mod tests; #[derive(Clone)] struct SchnorrkelHram; impl Hram for SchnorrkelHram { #[allow(non_snake_case)] fn hram(R: &RistrettoPoint, A: &RistrettoPoint, m: &[u8]) -> Scalar { let ctx_len = usize::try_from(u32::from_le_bytes(m[0 .. 4].try_into().expect("malformed message"))) .unwrap(); let mut t = signing_context(&m[4 .. (4 + ctx_len)]).bytes(&m[(4 + ctx_len) ..]); t.proto_name(b"Schnorr-sig"); let convert = |point: &RistrettoPoint| PublicKey::from_bytes(&point.to_bytes()).unwrap().into_compressed(); t.commit_point(b"sign:pk", &convert(A)); t.commit_point(b"sign:R", &convert(R)); Scalar::from_repr(t.challenge_scalar(b"sign:c").to_bytes()).unwrap() } } /// FROST Schnorrkel algorithm. #[derive(Clone)] pub struct Schnorrkel { context: &'static [u8], schnorr: Schnorr, msg: Option>, } impl Schnorrkel { /// Create a new algorithm with the specified context. /// /// If the context is greater than or equal to 4 GB in size, this will panic. pub fn new(context: &'static [u8]) -> Schnorrkel { Schnorrkel { context, schnorr: Schnorr::new(MerlinTranscript::new(b"FROST Schnorrkel")), msg: None, } } } impl Algorithm for Schnorrkel { type Transcript = MerlinTranscript; type Addendum = (); type Signature = Signature; fn transcript(&mut self) -> &mut Self::Transcript { self.schnorr.transcript() } fn nonces(&self) -> Vec::G>> { self.schnorr.nonces() } fn preprocess_addendum( &mut self, _: &mut R, _: &ThresholdKeys, ) { } fn read_addendum(&self, _: &mut R) -> io::Result { Ok(()) } fn process_addendum( &mut self, _: &ThresholdView, _: Participant, (): (), ) -> Result<(), FrostError> { Ok(()) } fn sign_share( &mut self, params: &ThresholdView, nonce_sums: &[Vec], nonces: Vec>, msg: &[u8], ) -> Scalar { self.msg = Some(msg.to_vec()); self.schnorr.sign_share( params, nonce_sums, nonces, &[ &u32::try_from(self.context.len()).expect("context exceeded 2^32 bytes").to_le_bytes(), self.context, msg, ] .concat(), ) } #[must_use] fn verify( &self, group_key: RistrettoPoint, nonces: &[Vec], sum: Scalar, ) -> Option { let mut sig = (SchnorrSignature:: { R: nonces[0][0], s: sum }).serialize(); sig[63] |= 1 << 7; Some(Signature::from_bytes(&sig).unwrap()).filter(|sig| { PublicKey::from_bytes(&group_key.to_bytes()) .unwrap() .verify(&mut signing_context(self.context).bytes(self.msg.as_ref().unwrap()), sig) .is_ok() }) } fn verify_share( &self, verification_share: RistrettoPoint, nonces: &[Vec], share: Scalar, ) -> Result, ()> { self.schnorr.verify_share(verification_share, nonces, share) } } ================================================ FILE: crypto/schnorrkel/src/tests.rs ================================================ use rand_core::OsRng; use group::GroupEncoding; use frost::{ Participant, tests::{key_gen, algorithm_machines, sign}, }; use schnorrkel::{keys::PublicKey, context::SigningContext}; use crate::Schnorrkel; #[test] fn test() { const CONTEXT: &[u8] = b"FROST Schnorrkel Test"; const MSG: &[u8] = b"Hello, World!"; let keys = key_gen(&mut OsRng); let key = keys[&Participant::new(1).unwrap()].group_key(); let algorithm = Schnorrkel::new(CONTEXT); let machines = algorithm_machines(&mut OsRng, &algorithm, &keys); let signature = sign(&mut OsRng, &algorithm, keys, machines, MSG); let key = PublicKey::from_bytes(key.to_bytes().as_ref()).unwrap(); key.verify(&mut SigningContext::new(CONTEXT).bytes(MSG), &signature).unwrap() } ================================================ FILE: crypto/transcript/Cargo.toml ================================================ [package] name = "flexible-transcript" version = "0.3.4" description = "A simple transcript trait definition, along with viable options" license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/crypto/transcript" authors = ["Luke Parker "] keywords = ["transcript"] edition = "2021" rust-version = "1.66" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] std-shims = { path = "../../common/std-shims", version = "0.1.4", default-features = false } zeroize = { version = "^1.5", default-features = false } digest = { version = "0.10", default-features = false, features = ["core-api"] } blake2 = { version = "0.10", default-features = false, optional = true } merlin = { version = "3", default-features = false, optional = true } [dev-dependencies] sha2 = { version = "0.10", default-features = false } blake2 = { version = "0.10", default-features = false } [features] std = ["std-shims/std", "zeroize/std", "digest/std", "blake2?/std", "merlin?/std"] recommended = ["blake2"] tests = [] default = ["std"] ================================================ FILE: crypto/transcript/LICENSE ================================================ MIT License Copyright (c) 2022-2023 Luke Parker Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: crypto/transcript/README.md ================================================ # Flexible Transcript Flexible Transcript is a crate offering: - `Transcript`, a trait offering functions transcripts should implement. - `DigestTranscript`, a competent transcript format instantiated against a provided hash function. - `MerlinTranscript`, a wrapper of `merlin` into the trait (available via the `merlin` feature). - `RecommendedTranscript`, a transcript recommended for usage in applications. Currently, this is `DigestTranscript` (available via the `recommended` feature). The trait was created while working on an IETF draft which defined an incredibly simple transcript format. Extensions of the protocol would quickly require a more competent format, yet implementing the one specified was mandatory to meet the specification. Accordingly, the library implementing the draft defined an `IetfTranscript`, dropping labels and not allowing successive challenges, yet thanks to the trait, allowed protocols building on top to provide their own transcript format as needed. `DigestTranscript` takes in any hash function implementing `Digest`, offering a secure transcript format around it. All items are prefixed by a flag, denoting their type, and their length. `MerlinTranscript` was used to justify the API, and if any issues existed with `DigestTranscript`, enable a fallback. It was also meant as a way to be compatible with existing Rust projects using `merlin`. This library was [audited by Cypher Stack in March 2023](https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf), culminating in commit [669d2dbffc1dafb82a09d9419ea182667115df06](https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06). Any subsequent changes have not undergone auditing. This library is usable under no_std. ================================================ FILE: crypto/transcript/src/lib.rs ================================================ #![cfg_attr(docsrs, feature(doc_cfg))] #![doc = include_str!("../README.md")] #![no_std] #[allow(unused_imports)] use std_shims::prelude::*; use zeroize::Zeroize; use digest::{ typenum::{ consts::U32, marker_traits::NonZero, type_operators::IsGreaterOrEqual, operator_aliases::GrEq, }, core_api::BlockSizeUser, Digest, Output, HashMarker, }; #[cfg(feature = "merlin")] mod merlin; #[cfg(feature = "merlin")] pub use crate::merlin::MerlinTranscript; /// Tests for a transcript. #[cfg(any(test, feature = "tests"))] pub mod tests; /// A transcript trait valid over a variety of transcript formats. pub trait Transcript: Send + Clone { type Challenge: Send + Sync + Clone + AsRef<[u8]>; /// Create a new transcript with the specified name. fn new(name: &'static [u8]) -> Self; /// Apply a domain separator to the transcript. fn domain_separate(&mut self, label: &'static [u8]); /// Append a message to the transcript. fn append_message>(&mut self, label: &'static [u8], message: M); /// Produce a challenge. /// /// Implementors MUST update the transcript as it does so, preventing the same challenge from /// being generated multiple times. fn challenge(&mut self, label: &'static [u8]) -> Self::Challenge; /// Produce a RNG seed. /// /// Helper function for parties needing to generate random data from an agreed upon state. /// /// Implementors MAY internally call the challenge function for the needed bytes, and accordingly /// produce a transcript conflict between two transcripts, one which called challenge(label) and /// one which called rng_seed(label) at the same point. fn rng_seed(&mut self, label: &'static [u8]) -> [u8; 32]; } #[derive(Clone, Copy)] enum DigestTranscriptMember { Name, Domain, Label, Value, Challenge, Continued, Challenged, } impl DigestTranscriptMember { fn as_u8(&self) -> u8 { match self { DigestTranscriptMember::Name => 0, DigestTranscriptMember::Domain => 1, DigestTranscriptMember::Label => 2, DigestTranscriptMember::Value => 3, DigestTranscriptMember::Challenge => 4, DigestTranscriptMember::Continued => 5, DigestTranscriptMember::Challenged => 6, } } } /// A trait defining cryptographic Digests with at least a 256-bit output size, assuming at least a /// 128-bit level of security accordingly. pub trait SecureDigest: Digest + HashMarker {} impl SecureDigest for D where // This just lets us perform the comparison D::OutputSize: IsGreaterOrEqual, // Perform the comparison and make sure it's true (not zero), meaning D::OutputSize is >= U32 // This should be U32 as it's length in bytes, not bits GrEq: NonZero, { } /// A simple transcript format constructed around the specified hash algorithm. #[derive(Clone, Debug)] pub struct DigestTranscript(D); impl DigestTranscript { fn append(&mut self, kind: DigestTranscriptMember, value: &[u8]) { self.0.update([kind.as_u8()]); // Assumes messages don't exceed 16 exabytes self.0.update(u64::try_from(value.len()).unwrap().to_le_bytes()); self.0.update(value); } } impl Transcript for DigestTranscript { type Challenge = Output; fn new(name: &'static [u8]) -> Self { let mut res = DigestTranscript(D::new()); res.append(DigestTranscriptMember::Name, name); res } fn domain_separate(&mut self, label: &'static [u8]) { self.append(DigestTranscriptMember::Domain, label); } fn append_message>(&mut self, label: &'static [u8], message: M) { self.append(DigestTranscriptMember::Label, label); self.append(DigestTranscriptMember::Value, message.as_ref()); } fn challenge(&mut self, label: &'static [u8]) -> Self::Challenge { self.append(DigestTranscriptMember::Challenge, label); let mut cloned = self.0.clone(); // Explicitly fork these transcripts to prevent length extension attacks from being possible // (at least, without the additional ability to remove a byte from a finalized hash) self.0.update([DigestTranscriptMember::Continued.as_u8()]); cloned.update([DigestTranscriptMember::Challenged.as_u8()]); cloned.finalize() } fn rng_seed(&mut self, label: &'static [u8]) -> [u8; 32] { let mut seed = [0; 32]; seed.copy_from_slice(&self.challenge(label)[.. 32]); seed } } // Digest doesn't implement Zeroize // Implement Zeroize for DigestTranscript by writing twice the block size to the digest in an // attempt to overwrite the internal hash state/any leftover bytes impl Zeroize for DigestTranscript where D: BlockSizeUser, { fn zeroize(&mut self) { // Update in 4-byte chunks to reduce call quantity and enable word-level update optimizations const WORD_SIZE: usize = 4; // block_size returns the block_size in bytes // Use a ceil div in case the block size isn't evenly divisible by our word size let words = D::block_size().div_ceil(WORD_SIZE); for _ in 0 .. (2 * words) { self.0.update([255; WORD_SIZE]); } // Hopefully, the hash state is now overwritten to the point no data is recoverable // These writes may be optimized out if they're never read // Attempt to get them marked as read fn mark_read(transcript: &DigestTranscript) { // Just get a challenge from the state let mut challenge = core::hint::black_box(transcript.0.clone().finalize()); challenge.as_mut().zeroize(); } mark_read(self) } } /// The recommended transcript, guaranteed to be secure against length-extension attacks. #[cfg(feature = "recommended")] pub type RecommendedTranscript = DigestTranscript; ================================================ FILE: crypto/transcript/src/merlin.rs ================================================ use core::fmt::{Debug, Formatter}; use crate::Transcript; /// A wrapper around a Merlin transcript which satisfiees the Transcript API. /// /// Challenges are fixed to 64 bytes, despite Merlin supporting variable length challenges. /// /// This implementation is intended to remain in the spirit of Merlin more than it's intended to be /// in the spirit of the provided DigestTranscript. While DigestTranscript uses flags for each of /// its different field types, the domain_separate function simply appends a message with a label /// of "dom-sep", Merlin's preferred domain separation label. Since this could introduce transcript /// conflicts between a domain separation and a message with a label of "dom-sep", the /// append_message function uses an assertion to prevent such labels. #[derive(Clone)] pub struct MerlinTranscript(merlin::Transcript); // Merlin doesn't implement Debug so provide a stub which won't panic impl Debug for MerlinTranscript { fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), core::fmt::Error> { fmt.debug_struct("MerlinTranscript").finish_non_exhaustive() } } impl Transcript for MerlinTranscript { // Uses a challenge length of 64 bytes to support wide reduction on commonly used EC scalars // From a security level standpoint (Merlin targets 128-bits), this should just be 32 bytes // From a Merlin standpoint, this should be variable per call // From a practical standpoint, this should be practical type Challenge = [u8; 64]; fn new(name: &'static [u8]) -> Self { MerlinTranscript(merlin::Transcript::new(name)) } fn domain_separate(&mut self, label: &'static [u8]) { self.0.append_message(b"dom-sep", label); } fn append_message>(&mut self, label: &'static [u8], message: M) { assert!( label != "dom-sep".as_bytes(), "\"dom-sep\" is reserved for the domain_separate function", ); self.0.append_message(label, message.as_ref()); } fn challenge(&mut self, label: &'static [u8]) -> Self::Challenge { let mut challenge = [0; 64]; self.0.challenge_bytes(label, &mut challenge); challenge } fn rng_seed(&mut self, label: &'static [u8]) -> [u8; 32] { let mut seed = [0; 32]; seed.copy_from_slice(&self.challenge(label)[.. 32]); seed } } ================================================ FILE: crypto/transcript/src/tests.rs ================================================ use crate::Transcript; /// Test the sanity of a transcript. /// /// This will panic if sanity checks fail. pub fn test_transcript>() { // Ensure distinct names cause distinct challenges { let mut t1 = T::new(b"1"); let mut t2 = T::new(b"2"); assert!(t1.challenge(b"c") != t2.challenge(b"c")); } // Ensure names can't lead into labels { let mut t1 = T::new(b"12"); let c1 = t1.challenge(b"c"); let mut t2 = T::new(b"1"); let c2 = t2.challenge(b"2c"); assert!(c1 != c2); } let t = || T::new(b"name"); let c = |mut t: T| t.challenge(b"c"); // Ensure domain separators do something { let mut t1 = t(); t1.domain_separate(b"d"); assert!(c(t1) != c(t())); } // Ensure distinct domain separators create distinct challenges { let mut t1 = t(); let mut t2 = t(); t1.domain_separate(b"d1"); t2.domain_separate(b"d2"); assert!(c(t1) != c(t2)); } // Ensure distinct messages create distinct challenges { // By label { let mut t1 = t(); let mut t2 = t(); t1.append_message(b"msg", b"a"); t2.append_message(b"msg", b"b"); assert!(c(t1) != c(t2)); } // By value { let mut t1 = t(); let mut t2 = t(); t1.append_message(b"a", b"val"); t2.append_message(b"b", b"val"); assert!(c(t1) != c(t2)); } } // Ensure challenges advance the transcript { let mut t = t(); let c1 = t.challenge(b"c"); let c2 = t.challenge(b"c"); assert!(c1 != c2); } // Ensure distinct challenge labels produce distinct challenges assert!(t().challenge(b"a") != t().challenge(b"b")); // Ensure RNG seed calls advance the transcript { let mut t = t(); let s1 = t.rng_seed(b"s"); let s2 = t.rng_seed(b"s"); assert!(s1 != s2); } // Ensure distinct RNG seed labels produce distinct seeds assert!(t().rng_seed(b"a") != t().rng_seed(b"b")); } #[test] fn test_digest() { test_transcript::>(); test_transcript::>(); } #[cfg(feature = "recommended")] #[test] fn test_recommended() { test_transcript::(); } #[cfg(feature = "merlin")] #[test] fn test_merlin() { test_transcript::(); } ================================================ FILE: deny.toml ================================================ [advisories] version = 2 db-path = "~/.cargo/advisory-db" db-urls = ["https://github.com/rustsec/advisory-db"] yanked = "deny" ignore = [ "RUSTSEC-2022-0061", # https://github.com/serai-dex/serai/227 "RUSTSEC-2024-0370", # proc-macro-error is unmaintained "RUSTSEC-2024-0436", # paste is unmaintained "RUSTSEC-2024-0384", # instant is unmaintained, fixed on `next` "RUSTSEC-2025-0057", # fxhash is unmaintained, fixed with bytecodealliance/wasmtime/pull/11634 ] [licenses] version = 2 allow = [ # Effective public domain "CC0-1.0", "Unlicense", # Attribution required "MIT", "MITNFA", "BSD-2-Clause", "BSD-3-Clause", "ISC", "Zlib", "Unicode-3.0", "CDLA-Permissive-2.0", # Non-invasive copyleft # "MPL-2.0", # Commented as it's not currently in-use within the Serai tree "Apache-2.0", "Apache-2.0 WITH LLVM-exception", "GPL-3.0-or-later WITH Classpath-exception-2.0", ] exceptions = [ { allow = ["AGPL-3.0-only"], name = "serai-env" }, { allow = ["AGPL-3.0-only"], name = "ethereum-serai" }, { allow = ["AGPL-3.0-only"], name = "serai-ethereum-relayer" }, { allow = ["AGPL-3.0-only"], name = "serai-message-queue" }, { allow = ["AGPL-3.0-only"], name = "serai-processor-messages" }, { allow = ["AGPL-3.0-only"], name = "serai-processor" }, { allow = ["AGPL-3.0-only"], name = "tributary-chain" }, { allow = ["AGPL-3.0-only"], name = "serai-coordinator" }, { allow = ["AGPL-3.0-only"], name = "pallet-session" }, { allow = ["AGPL-3.0-only"], name = "serai-coins-pallet" }, { allow = ["AGPL-3.0-only"], name = "serai-dex-pallet" }, { allow = ["AGPL-3.0-only"], name = "serai-genesis-liquidity-pallet" }, { allow = ["AGPL-3.0-only"], name = "serai-emissions-pallet" }, { allow = ["AGPL-3.0-only"], name = "serai-economic-security-pallet" }, { allow = ["AGPL-3.0-only"], name = "serai-in-instructions-pallet" }, { allow = ["AGPL-3.0-only"], name = "serai-validator-sets-pallet" }, { allow = ["AGPL-3.0-only"], name = "serai-signals-pallet" }, { allow = ["AGPL-3.0-only"], name = "serai-runtime" }, { allow = ["AGPL-3.0-only"], name = "serai-node" }, { allow = ["AGPL-3.0-only"], name = "serai-orchestrator" }, { allow = ["AGPL-3.0-only"], name = "mini-serai" }, { allow = ["AGPL-3.0-only"], name = "serai-docker-tests" }, { allow = ["AGPL-3.0-only"], name = "serai-message-queue-tests" }, { allow = ["AGPL-3.0-only"], name = "serai-processor-tests" }, { allow = ["AGPL-3.0-only"], name = "serai-coordinator-tests" }, { allow = ["AGPL-3.0-only"], name = "serai-full-stack-tests" }, { allow = ["AGPL-3.0-only"], name = "serai-reproducible-runtime-tests" }, ] [[licenses.clarify]] name = "ring" version = "*" expression = "MIT AND ISC AND OpenSSL" license-files = [ { path = "LICENSE", hash = 0xbd0eed23 } ] [bans] multiple-versions = "warn" wildcards = "warn" highlight = "all" deny = [ { name = "serde_derive", version = ">=1.0.172, <1.0.185" }, { name = "hashbrown", version = "=0.15.0" }, # Legacy which _no one_ should use anymore { name = "is-terminal", version = "*" }, # Stop introduction into the tree without realizing it { name = "once_cell_polyfill", version = "*" }, ] [sources] unknown-registry = "deny" unknown-git = "deny" allow-registry = ["https://github.com/rust-lang/crates.io-index"] allow-git = [ "https://github.com/rust-lang-nursery/lazy-static.rs", "https://github.com/monero-oxide/monero-oxide", "https://github.com/serai-dex/patch-polkadot-sdk", ] ================================================ FILE: docs/.gitignore ================================================ _site/ .sass-cache/ .jekyll-cache/ .jekyll-metadata .bundle/ vendor/ ================================================ FILE: docs/.ruby-version ================================================ 3.3.4 ================================================ FILE: docs/Gemfile ================================================ source 'https://rubygems.org' gem "jekyll", "~> 4.3.3" gem "just-the-docs", "0.8.2" ================================================ FILE: docs/_config.yml ================================================ title: Serai Documentation description: Documentation for the Serai protocol. theme: just-the-docs url: https://docs.serai.exchange callouts: warning: title: Warning color: red definition: title: Definition color: blue ================================================ FILE: docs/amm/index.md ================================================ --- title: Automatic Market Makers layout: default nav_order: 2 --- # Automatic Market Makers *text on how AMMs work* Serai uses a symmetric liquidity pool with the `xy=k` formula. Concentrated liquidity would presumably offer less slippage on swaps, and there are [discussions to evolve to a concentrated liquidity/order book environment](https://github.com/serai-dex/serai/issues/420). Unfortunately, it effectively requires active management of provided liquidity. This disenfranchises small liquidity providers who may not have the knowledge and resources necessary to perform such management. Since Serai is expected to have a community-bootstrapped start, starting with concentrated liquidity would accordingly be contradictory. ================================================ FILE: docs/cross_chain/index.md ================================================ --- title: Cross-Chain Architecture layout: default nav_order: 3 --- # Cross-Chain Architecture ================================================ FILE: docs/economics/genesis.md ================================================ --- title: Genesis layout: default nav_order: 1 parent: Economics --- ================================================ FILE: docs/economics/index.md ================================================ --- title: Economics layout: default nav_order: 4 has_children: true --- # Economics Serai's economics change depending on which of three eras is currently occurring. ## Genesis Era The network starts with the "Genesis" era, where the goal of the network is to attract the liquidity necessary to facilitate swaps. This period will last for 30 days and will let anyone add liquidity to the protocol. Only with its conclusion will SRI start being distributed. After the Genesis era, the network enters the "Pre-Economic Security" era. ## Pre-Economic Security {: .definition-title } > Definition: Economic Security > > Economic security is derived from it being unprofitable to misbehave. > This is by the economic penalty which is presumed to occur upon misbehavior > exceeding the value which would presumably be gained. > Accordingly, rational actors would behave properly, causing the protocol to > maintain its integrity. > > For Serai specifically, the stake required to produce unintended signatures > must exceed the value accessible via producing unintended signatures. With liquidity provided, and swaps enabled, the goal is to have validators stake sufficiently for economic security to be achieved. This is primarily via offering freshly minted, staked SRI to would-be validators who decide to swap external coins for their stake. ## Post-Economic Security Having achieved economic security, the protocol changes its economics one last time (barring future upgrades to the protocol) to a 'normal' state of operations. ================================================ FILE: docs/economics/post.md ================================================ --- title: Post-Economic Security layout: default nav_order: 3 parent: Economics --- ================================================ FILE: docs/economics/pre.md ================================================ --- title: Pre-Economic Security layout: default nav_order: 2 parent: Economics --- ================================================ FILE: docs/index.md ================================================ --- title: Home layout: home nav_order: 1 --- {: .warning } This documentation site is still under active development and may have missing sections, errors, and typos. Even once this documentation site is 'complete', it may become out-of-date (as Serai is an evolving protocol yet to release) or have minor errors. # Serai Serai is a fairly launched cross-chain decentralized exchange, integrating Bitcoin (BTC), Ethereum (ETH, DAI), and Monero (XMR). The Serai mainnet has yet to launch, and until then, all details are subject to change. Prior to the Serai mainnet launching, SRI, Serai's native coin, will not exist. As a fairly launched project, SRI will have no ICO, no IEO, no presale, no developers' tax/fund, and no airdrop for out-of-mainnet activity. Out-of-mainnet activity includes: - Being a community member (such as on Discord or on Twitter) - Participating in testnets - Contributing to the GitHub None of these will be awarded any airdrop. All distributions of SRI will happen on-chain per the protocols' defined rules, based on on-chain activity. ================================================ FILE: docs/infrastructure/coordinator.md ================================================ --- title: Coordinator layout: default nav_order: 3 parent: Infrastructure --- # Coordinator The coordinator is a local service which communicates with other validators' coordinators. It provides a verifiable broadcast layer for various consensus messages, such as agreement on external blockchains, key generation and signing protocols, and the latest Serai block. The verifiable broadcast layer is implemented via a blockchain, referred to as a Tributary, which is agreed upon using Tendermint consensus. This consensus is not as offered by Tendermint Core/CometBFT, as used in the Cosmos SDK (historically/presently), yet by our own implementation designed to be used as a library and not as another daemon. Tributaries are ephemeral, only used by the current validators, and deleted upon the next epoch. All of the results from it are verifiable via the external network and the Serai blockchain alone. ================================================ FILE: docs/infrastructure/index.md ================================================ --- title: Infrastructure layout: default nav_order: 6 has_children: true --- ================================================ FILE: docs/infrastructure/message_queue.md ================================================ --- title: Message Queue layout: default nav_order: 1 parent: Infrastructure --- # Message Queue The Message Queue is a microservice to authenticate and relay messages between services. It offers just three functions: 1) Queue a message. 2) Receive the next message. 3) Acknowledge a message, removing it from the queue. This ensures messages are delivered between services, with their order preserved. This also ensures that if a service reboots while handling a message, it'll still handle the message once rebooted (and the message will not be lost). The Message Queue also aims to offer increased liveliness and performance. If services directly communicated, the rate at which one service could operate would always be bottlenecked by the service it communicates with. If the receiving service ever went offline, the sending service wouldn't be able to deliver messages until the receiver came back online, halting its own work. By defining a dedicated microservice, with a lack of complex logic, it's much less likely to go offline or suffer from degraded performance. ================================================ FILE: docs/infrastructure/processor.md ================================================ --- title: Processor layout: default nav_order: 2 parent: Infrastructure --- # Processor The processor performs several important tasks with regards to the external network. Each of them are documented in the following sections. ## Key Generation ## Scanning ## Signing Batches ## Planning Transactions ## Cosigning ================================================ FILE: docs/infrastructure/serai.md ================================================ --- title: Serai layout: default nav_order: 4 parent: Infrastructure --- ================================================ FILE: docs/integrating/index.md ================================================ --- title: Integrating with Serai layout: default nav_order: 7 has_children: true --- ================================================ FILE: docs/protocol_changes/index.md ================================================ --- title: Protocol Changes layout: default nav_order: 5 --- # Protocol Changes The protocol has no central authority nor organization nor actors (such as liquidity providers/validators) who can compel new protocol rules. The Serai protocol is as-written with all granted functionality and declared rules present. Validators are explicitly granted the ability to signal for two things to occur: ### 1) Halt another validator set. This will presumably occur if another validator set turns malicious and is the expected incident response in order to apply an economic penalty of ideally greater value than damage wrecked. Halting a validator set prevents further publication of `Batch`s, preventing improper actions on the Serai blockchain, and preventing validators from unstaking (as unstaking only occurs once future validator sets have accepted responsibility, and accepting responsibility requires `Batch` publication). This effectively burns the malicious validators' stake. ### 2) Retire the protocol. A supermajority of validators may favor a signal (an opaque 32-byte ID). A common signal gaining sufficient favor will cause the protocol to stop producing blocks in two weeks. Nodes will presumably, as individual entities, hard fork to new consensus rules. These rules presumably will remove the rule to stop producing blocks in two weeks, they may declare new validators, and they may declare new functionality entirely. While nodes individually hard fork, across every hard fork the state of the various `sriXYZ` coins (such as `sriBTC`, `sriETH`, `sriDAI`, and `sriXMR`) remains intact (unless the new rules modify such state). These coins can still be burned with instructions (unless the new rules prevent that) and if a validator set doesn't send `XYZ` as expected, they can be halted (effectively burning their `SRI` stake). Accordingly, every node decides if and how to future participate, with the abilities and powers they declare themselves to have. ================================================ FILE: docs/validator/index.md ================================================ --- title: Running a Validator layout: default nav_order: 8 has_children: true --- ================================================ FILE: message-queue/Cargo.toml ================================================ [package] name = "serai-message-queue" version = "0.1.0" description = "A message queue for Serai focused on consistency" license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/message-queue" authors = ["Luke Parker "] keywords = [] edition = "2021" publish = false [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] # Macros once_cell = { version = "1", default-features = false } # Encoders hex = { version = "0.4", default-features = false, features = ["std"] } borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } # Libs zeroize = { version = "1", default-features = false, features = ["std"] } rand_core = { version = "0.6", default-features = false, features = ["std"] } # Cryptography transcript = { package = "flexible-transcript", path = "../crypto/transcript", default-features = false, features = ["std", "recommended"] } dalek-ff-group = { path = "../crypto/dalek-ff-group", default-features = false, features = ["std"] } ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std"] } schnorr-signatures = { path = "../crypto/schnorr", default-features = false, features = ["std"] } # Application log = { version = "0.4", default-features = false, features = ["std"] } env_logger = { version = "0.10", default-features = false, features = ["humantime"] } # Uses a single threaded runtime since this shouldn't ever be CPU-bound tokio = { version = "1", default-features = false, features = ["rt", "time", "io-util", "net", "macros"] } zalloc = { path = "../common/zalloc" } serai-db = { path = "../common/db", optional = true } serai-env = { path = "../common/env" } serai-primitives = { path = "../substrate/primitives", features = ["borsh"] } [features] parity-db = ["serai-db/parity-db"] rocksdb = ["serai-db/rocksdb"] ================================================ FILE: message-queue/LICENSE ================================================ AGPL-3.0-only license Copyright (c) 2023 Luke Parker This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License Version 3 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see . ================================================ FILE: message-queue/README.md ================================================ # Message Log A message log for various services to communicate over. Each message is checked to be of the claimed origin. Then, it's added to the recipient's message queue. This queue is sequentially handled, FIFO, only dropping messages once the recipient acknowledges it's been handled. A client which publishes an event specifies its own ID for the publication. If multiple publications with the same ID occur, they are assumed repeats and dropped. This library always panics as its error-cases should be unreachable, given its intranet status. ================================================ FILE: message-queue/src/client.rs ================================================ use core::ops::Deref; use zeroize::{Zeroize, Zeroizing}; use rand_core::OsRng; use dalek_ff_group::Ristretto; use ciphersuite::{ group::ff::{Field, PrimeField}, Ciphersuite, }; use schnorr_signatures::SchnorrSignature; use tokio::{ io::{AsyncReadExt, AsyncWriteExt}, net::TcpStream, }; use serai_env as env; #[rustfmt::skip] use crate::{Service, Metadata, QueuedMessage, MessageQueueRequest, message_challenge, ack_challenge}; pub struct MessageQueue { pub service: Service, priv_key: Zeroizing<::F>, pub_key: ::G, url: String, } impl MessageQueue { pub fn new( service: Service, mut url: String, priv_key: Zeroizing<::F>, ) -> MessageQueue { // Allow MESSAGE_QUEUE_RPC to either be a full URL or just a hostname // While we could stitch together multiple variables, our control over this service makes this // fine if !url.contains(':') { url += ":2287"; } MessageQueue { service, pub_key: Ristretto::generator() * priv_key.deref(), priv_key, url } } pub fn from_env(service: Service) -> MessageQueue { let url = env::var("MESSAGE_QUEUE_RPC").expect("message-queue RPC wasn't specified"); let priv_key: Zeroizing<::F> = { let key_str = Zeroizing::new(env::var("MESSAGE_QUEUE_KEY").expect("message-queue key wasn't specified")); let key_bytes = Zeroizing::new( hex::decode(&key_str).expect("invalid message-queue key specified (wasn't hex)"), ); let mut bytes = <::F as PrimeField>::Repr::default(); bytes.copy_from_slice(&key_bytes); let key = Zeroizing::new( Option::from(<::F as PrimeField>::from_repr(bytes)) .expect("invalid message-queue key specified"), ); bytes.zeroize(); key }; Self::new(service, url, priv_key) } #[must_use] async fn send(socket: &mut TcpStream, msg: MessageQueueRequest) -> bool { let msg = borsh::to_vec(&msg).unwrap(); let Ok(()) = socket.write_all(&u32::try_from(msg.len()).unwrap().to_le_bytes()).await else { log::warn!("couldn't send the message len"); return false; }; let Ok(()) = socket.write_all(&msg).await else { log::warn!("couldn't write the message"); return false; }; true } pub async fn queue(&self, metadata: Metadata, msg: Vec) { // TODO: Should this use OsRng? Deterministic or deterministic + random may be better. let nonce = Zeroizing::new(::F::random(&mut OsRng)); let nonce_pub = Ristretto::generator() * nonce.deref(); let sig = SchnorrSignature::::sign( &self.priv_key, nonce, message_challenge( metadata.from, self.pub_key, metadata.to, &metadata.intent, &msg, nonce_pub, ), ) .serialize(); let msg = MessageQueueRequest::Queue { meta: metadata, msg, sig }; let mut first = true; loop { // Sleep, so we don't hammer re-attempts if !first { tokio::time::sleep(core::time::Duration::from_secs(5)).await; } first = false; let Ok(mut socket) = TcpStream::connect(&self.url).await else { continue }; if !Self::send(&mut socket, msg.clone()).await { continue; } if socket.read_u8().await.ok() != Some(1) { continue; } break; } } pub async fn next(&self, from: Service) -> QueuedMessage { let msg = MessageQueueRequest::Next { from, to: self.service }; let mut first = true; 'outer: loop { if !first { tokio::time::sleep(core::time::Duration::from_secs(5)).await; } first = false; log::trace!("opening socket to message-queue for next"); let mut socket = match TcpStream::connect(&self.url).await { Ok(socket) => socket, Err(e) => { log::warn!("couldn't connect to message-queue server: {e:?}"); continue; } }; log::trace!("opened socket for next"); loop { if !Self::send(&mut socket, msg.clone()).await { continue 'outer; } let status = match socket.read_u8().await { Ok(status) => status, Err(e) => { log::warn!("couldn't read status u8: {e:?}"); continue 'outer; } }; // If there wasn't a message, check again in 1s // TODO: Use a notification system here if status == 0 { tokio::time::sleep(core::time::Duration::from_secs(1)).await; continue; } assert_eq!(status, 1); break; } // Timeout after 5 seconds in case there's an issue with the length handling let Ok(msg) = tokio::time::timeout(core::time::Duration::from_secs(5), async { // Read the message length let len = match socket.read_u32_le().await { Ok(len) => len, Err(e) => { log::warn!("couldn't read len: {e:?}"); return vec![]; } }; let mut buf = vec![0; usize::try_from(len).unwrap()]; // Read the message let Ok(_) = socket.read_exact(&mut buf).await else { log::warn!("couldn't read the message"); return vec![]; }; buf }) .await else { continue; }; if msg.is_empty() { continue; } let msg: QueuedMessage = borsh::from_slice(msg.as_slice()).unwrap(); // Verify the message // Verify the sender is sane if matches!(self.service, Service::Processor(_)) { assert_eq!( msg.from, Service::Coordinator, "non-coordinator sent us (a processor) a message" ); } else { assert!( matches!(msg.from, Service::Processor(_)), "non-processor sent us (coordinator) a message" ); } // TODO: Verify the sender's signature return msg; } } pub async fn ack(&self, from: Service, id: u64) { // TODO: Should this use OsRng? Deterministic or deterministic + random may be better. let nonce = Zeroizing::new(::F::random(&mut OsRng)); let nonce_pub = Ristretto::generator() * nonce.deref(); let sig = SchnorrSignature::::sign( &self.priv_key, nonce, ack_challenge(self.service, self.pub_key, from, id, nonce_pub), ) .serialize(); let msg = MessageQueueRequest::Ack { from, to: self.service, id, sig }; let mut first = true; loop { if !first { tokio::time::sleep(core::time::Duration::from_secs(5)).await; } first = false; let Ok(mut socket) = TcpStream::connect(&self.url).await else { continue }; if !Self::send(&mut socket, msg.clone()).await { continue; } if socket.read_u8().await.ok() != Some(1) { continue; } break; } } } ================================================ FILE: message-queue/src/lib.rs ================================================ mod messages; pub use messages::*; pub mod client; ================================================ FILE: message-queue/src/main.rs ================================================ pub(crate) use std::{ sync::{Arc, RwLock}, collections::HashMap, }; use dalek_ff_group::Ristretto; pub(crate) use ciphersuite::{group::GroupEncoding, Ciphersuite}; pub(crate) use schnorr_signatures::SchnorrSignature; pub(crate) use serai_primitives::ExternalNetworkId; pub(crate) use tokio::{ io::{AsyncReadExt, AsyncWriteExt}, net::TcpListener, }; use serai_db::{Get, DbTxn, Db as DbTrait}; pub(crate) use crate::messages::*; pub(crate) use crate::queue::Queue; #[cfg(all(feature = "parity-db", not(feature = "rocksdb")))] pub(crate) type Db = Arc; #[cfg(feature = "rocksdb")] pub(crate) type Db = serai_db::RocksDB; #[allow(clippy::type_complexity)] mod clippy { use super::*; use once_cell::sync::Lazy; pub(crate) static KEYS: Lazy::G>>>> = Lazy::new(|| Arc::new(RwLock::new(HashMap::new()))); pub(crate) static QUEUES: Lazy>>>>> = Lazy::new(|| Arc::new(RwLock::new(HashMap::new()))); } pub(crate) use self::clippy::*; mod messages; mod queue; #[global_allocator] static ALLOCATOR: zalloc::ZeroizingAlloc = zalloc::ZeroizingAlloc(std::alloc::System); // queue RPC method /* Queues a message to be delivered from a processor to a coordinator, or vice versa. Messages are authenticated to be coming from the claimed service. Recipient services SHOULD independently verify signatures. The metadata specifies an intent. Only one message, for a specified intent, will be delivered. This allows services to safely send messages multiple times without them being delivered multiple times. The message will be ordered by this service, with the order having no guarantees other than successful ordering by the time this call returns. */ pub(crate) fn queue_message( db: &mut Db, meta: &Metadata, msg: Vec, sig: SchnorrSignature, ) { { let from = KEYS.read().unwrap()[&meta.from]; assert!( sig.verify(from, message_challenge(meta.from, from, meta.to, &meta.intent, &msg, sig.R)) ); } // Assert one, and only one of these, is the coordinator assert!(matches!(meta.from, Service::Coordinator) ^ matches!(meta.to, Service::Coordinator)); // Verify (from, to, intent) hasn't been prior seen fn key(domain: &'static [u8], key: impl AsRef<[u8]>) -> Vec { [&[u8::try_from(domain.len()).unwrap()], domain, key.as_ref()].concat() } fn intent_key(from: Service, to: Service, intent: &[u8]) -> Vec { key(b"intent_seen", borsh::to_vec(&(from, to, intent)).unwrap()) } let mut txn = db.txn(); let intent_key = intent_key(meta.from, meta.to, &meta.intent); if Get::get(&txn, &intent_key).is_some() { log::warn!( "Prior queued message attempted to be queued again. From: {:?} To: {:?} Intent: {}", meta.from, meta.to, hex::encode(&meta.intent) ); return; } DbTxn::put(&mut txn, intent_key, []); // Queue it let id = QUEUES.read().unwrap()[&(meta.from, meta.to)].write().unwrap().queue_message( &mut txn, QueuedMessage { from: meta.from, // Temporary value which queue_message will override id: u64::MAX, msg, sig: sig.serialize(), }, ); log::info!("Queued message. From: {:?} To: {:?} ID: {id}", meta.from, meta.to); DbTxn::commit(txn); } // next RPC method /* Gets the next message in queue for the named services. This is not authenticated due to the fact every nonce would have to be saved to prevent replays, or a challenge-response protocol implemented. Neither are worth doing when there should be no sensitive data on this server. */ pub(crate) fn get_next_message(from: Service, to: Service) -> Option { let queue_outer = QUEUES.read().unwrap(); let queue = queue_outer[&(from, to)].read().unwrap(); let next = queue.last_acknowledged().map_or(0, |i| i + 1); queue.get_message(next) } // ack RPC method /* Acknowledges a message as received and handled, meaning it'll no longer be returned as the next message. */ pub(crate) fn ack_message(from: Service, to: Service, id: u64, sig: SchnorrSignature) { { let to_key = KEYS.read().unwrap()[&to]; assert!(sig.verify(to_key, ack_challenge(to, to_key, from, id, sig.R))); } // Is it: // The acknowledged message should be > last acknowledged OR // The acknowledged message should be >= // It's the first if we save messages as acknowledged before acknowledging them // It's the second if we acknowledge messages before saving them as acknowledged // TODO: Check only a proper message is being acked log::info!("Acknowledging From: {:?} To: {:?} ID: {}", from, to, id); QUEUES.read().unwrap()[&(from, to)].write().unwrap().ack_message(id) } #[tokio::main(flavor = "current_thread")] async fn main() { // Override the panic handler with one which will panic if any tokio task panics { let existing = std::panic::take_hook(); std::panic::set_hook(Box::new(move |panic| { existing(panic); const MSG: &str = "exiting the process due to a task panicking"; println!("{MSG}"); log::error!("{MSG}"); std::process::exit(1); })); } if std::env::var("RUST_LOG").is_err() { std::env::set_var("RUST_LOG", serai_env::var("RUST_LOG").unwrap_or_else(|| "info".to_string())); } env_logger::init(); log::info!("Starting message-queue service..."); // Open the DB #[allow(unused_variables, unreachable_code)] let db = { #[cfg(all(feature = "parity-db", feature = "rocksdb"))] panic!("built with parity-db and rocksdb"); #[cfg(all(feature = "parity-db", not(feature = "rocksdb")))] let db = serai_db::new_parity_db(&serai_env::var("DB_PATH").expect("path to DB wasn't specified")); #[cfg(feature = "rocksdb")] let db = serai_db::new_rocksdb(&serai_env::var("DB_PATH").expect("path to DB wasn't specified")); db }; let read_key = |str| { let key = serai_env::var(str)?; let mut repr = <::G as GroupEncoding>::Repr::default(); repr.as_mut().copy_from_slice(&hex::decode(key).unwrap()); Some(::G::from_bytes(&repr).unwrap()) }; let register_service = |service, key| { KEYS.write().unwrap().insert(service, key); let mut queues = QUEUES.write().unwrap(); if service == Service::Coordinator { for network in serai_primitives::EXTERNAL_NETWORKS { queues.insert( (service, Service::Processor(network)), RwLock::new(Queue(db.clone(), service, Service::Processor(network))), ); } } else { queues.insert( (service, Service::Coordinator), RwLock::new(Queue(db.clone(), service, Service::Coordinator)), ); } }; // Make queues for each ExternalNetworkId for network in serai_primitives::EXTERNAL_NETWORKS { // Use a match so we error if the list of NetworkIds changes let Some(key) = read_key(match network { ExternalNetworkId::Bitcoin => "BITCOIN_KEY", ExternalNetworkId::Ethereum => "ETHEREUM_KEY", ExternalNetworkId::Monero => "MONERO_KEY", }) else { continue; }; register_service(Service::Processor(network), key); } // And the coordinator's register_service(Service::Coordinator, read_key("COORDINATOR_KEY").unwrap()); // Start server // 5132 ^ ((b'M' << 8) | b'Q') let server = TcpListener::bind("0.0.0.0:2287").await.unwrap(); loop { let (mut socket, _) = server.accept().await.unwrap(); // TODO: Add a magic value with a key at the start of the connection to make this authed let mut db = db.clone(); tokio::spawn(async move { while let Ok(msg_len) = socket.read_u32_le().await { let mut buf = vec![0; usize::try_from(msg_len).unwrap()]; let Ok(_) = socket.read_exact(&mut buf).await else { break }; let msg = borsh::from_slice(&buf).unwrap(); match msg { MessageQueueRequest::Queue { meta, msg, sig } => { queue_message( &mut db, &meta, msg, SchnorrSignature::::read(&mut sig.as_slice()).unwrap(), ); let Ok(()) = socket.write_all(&[1]).await else { break }; } MessageQueueRequest::Next { from, to } => match get_next_message(from, to) { Some(msg) => { let Ok(()) = socket.write_all(&[1]).await else { break }; let msg = borsh::to_vec(&msg).unwrap(); let len = u32::try_from(msg.len()).unwrap(); let Ok(()) = socket.write_all(&len.to_le_bytes()).await else { break }; let Ok(()) = socket.write_all(&msg).await else { break }; } None => { let Ok(()) = socket.write_all(&[0]).await else { break }; } }, MessageQueueRequest::Ack { from, to, id, sig } => { ack_message( from, to, id, SchnorrSignature::::read(&mut sig.as_slice()).unwrap(), ); let Ok(()) = socket.write_all(&[1]).await else { break }; } } } }); } } ================================================ FILE: message-queue/src/messages.rs ================================================ use transcript::{Transcript, RecommendedTranscript}; use dalek_ff_group::Ristretto; use ciphersuite::{group::GroupEncoding, Ciphersuite}; use borsh::{BorshSerialize, BorshDeserialize}; use serai_primitives::ExternalNetworkId; #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, BorshSerialize, BorshDeserialize)] pub enum Service { Processor(ExternalNetworkId), Coordinator, } #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub struct QueuedMessage { pub from: Service, pub id: u64, pub msg: Vec, pub sig: Vec, } #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub struct Metadata { pub from: Service, pub to: Service, pub intent: Vec, } #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub enum MessageQueueRequest { Queue { meta: Metadata, msg: Vec, sig: Vec }, Next { from: Service, to: Service }, Ack { from: Service, to: Service, id: u64, sig: Vec }, } pub fn message_challenge( from: Service, from_key: ::G, to: Service, intent: &[u8], msg: &[u8], nonce: ::G, ) -> ::F { let mut transcript = RecommendedTranscript::new(b"Serai Message Queue v0.1 Message"); transcript.domain_separate(b"metadata"); transcript.append_message(b"from", borsh::to_vec(&from).unwrap()); transcript.append_message(b"from_key", from_key.to_bytes()); transcript.append_message(b"to", borsh::to_vec(&to).unwrap()); transcript.append_message(b"intent", intent); transcript.domain_separate(b"message"); transcript.append_message(b"msg", msg); transcript.domain_separate(b"signature"); transcript.append_message(b"nonce", nonce.to_bytes()); ::hash_to_F(b"message_challenge", &transcript.challenge(b"challenge")) } pub fn ack_challenge( to: Service, to_key: ::G, from: Service, id: u64, nonce: ::G, ) -> ::F { let mut transcript = RecommendedTranscript::new(b"Serai Message Queue v0.1 Acknowledgement"); transcript.domain_separate(b"metadata"); transcript.append_message(b"to", borsh::to_vec(&to).unwrap()); transcript.append_message(b"to_key", to_key.to_bytes()); transcript.append_message(b"from", borsh::to_vec(&from).unwrap()); transcript.domain_separate(b"message"); transcript.append_message(b"id", id.to_le_bytes()); transcript.domain_separate(b"signature"); transcript.append_message(b"nonce", nonce.to_bytes()); ::hash_to_F(b"ack_challenge", &transcript.challenge(b"challenge")) } ================================================ FILE: message-queue/src/queue.rs ================================================ use serai_db::{DbTxn, Db}; use crate::messages::*; #[derive(Clone, Debug)] pub(crate) struct Queue(pub(crate) D, pub(crate) Service, pub(crate) Service); impl Queue { fn key(domain: &'static [u8], key: impl AsRef<[u8]>) -> Vec { [&[u8::try_from(domain.len()).unwrap()], domain, key.as_ref()].concat() } fn message_count_key(&self) -> Vec { Self::key(b"message_count", borsh::to_vec(&(self.1, self.2)).unwrap()) } pub(crate) fn message_count(&self) -> u64 { self .0 .get(self.message_count_key()) .map_or(0, |bytes| u64::from_le_bytes(bytes.try_into().unwrap())) } fn last_acknowledged_key(&self) -> Vec { Self::key(b"last_acknowledged", borsh::to_vec(&(self.1, self.2)).unwrap()) } pub(crate) fn last_acknowledged(&self) -> Option { self .0 .get(self.last_acknowledged_key()) .map(|bytes| u64::from_le_bytes(bytes.try_into().unwrap())) } fn message_key(&self, id: u64) -> Vec { Self::key(b"message", borsh::to_vec(&(self.1, self.2, id)).unwrap()) } // TODO: This is fine as-used, yet gets from the DB while having a txn. It should get from the // txn pub(crate) fn queue_message( &mut self, txn: &mut D::Transaction<'_>, mut msg: QueuedMessage, ) -> u64 { let id = self.message_count(); msg.id = id; let msg_key = self.message_key(id); let msg_count_key = self.message_count_key(); txn.put(msg_key, borsh::to_vec(&msg).unwrap()); txn.put(msg_count_key, (id + 1).to_le_bytes()); id } pub(crate) fn get_message(&self, id: u64) -> Option { let msg: Option = self.0.get(self.message_key(id)).map(|bytes| borsh::from_slice(&bytes).unwrap()); if let Some(msg) = msg.as_ref() { assert_eq!(msg.id, id, "message stored at {id} has ID {}", msg.id); } msg } pub(crate) fn ack_message(&mut self, id: u64) { let ack_key = self.last_acknowledged_key(); let mut txn = self.0.txn(); txn.put(ack_key, id.to_le_bytes()); txn.commit(); } } ================================================ FILE: mini/Cargo.toml ================================================ [package] name = "mini-serai" version = "0.1.0" description = "A miniature version of Serai used to test for race conditions" license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/mini" authors = ["Luke Parker "] keywords = [] edition = "2021" publish = false [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] loom = "0.7" ================================================ FILE: mini/LICENSE ================================================ AGPL-3.0-only license Copyright (c) 2023 Luke Parker This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License Version 3 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see . ================================================ FILE: mini/README.md ================================================ # Mini Serai A miniature version of the Serai stack, intended to demonstrate a lack of system-wide race conditions in the officially stated flows. ### Why When working on multiple multisigs, a race condition was noted. Originally, the documentation stated that the activation block of the new multisig would be the block after the next `Batch`'s block. This introduced a race condition, where since multiple `Batch`s can be signed at the same time, multiple `Batch`s can exist in the mempool at the same time. This could cause `Batch`s [1, 2] to exist in the mempool, 1 to be published (causing 2 to be the activation block of the new multisig), yet then the already signed 2 to be published (despite no longer being accurate as it only had events for a subset of keys). This effort initially modeled and tested this single race condition, yet aims to grow to the entire system. Then we just have to prove the actual Serai stack's flow reduces to the miniature flow modeled here. While further efforts are needed to prove Serai's implementation of the flow is itself free of race conditions, this is a layer of defense over the theory. ### How [loom](https://docs.rs/loom) is a library which will execute a block of code with every possible combination of orders in order to test results aren't invalidated by order of execution. ================================================ FILE: mini/src/lib.rs ================================================ use std::sync::{Arc as StdArc, RwLock as StdRwLock}; use loom::{ thread::{self, JoinHandle}, sync::{Arc, RwLock, mpsc}, }; #[cfg(test)] mod tests; #[derive(Clone, PartialEq, Eq, Hash, Debug)] pub struct Batch { block: u64, keys: Vec, } #[derive(Clone, PartialEq, Eq, Hash, Debug)] pub enum Event { IncludedBatch(Batch), // Allows if let else on this without clippy believing it's redundant __Ignore, } // The amount of blocks to scan after we publish a batch, before confirming the batch was // included. // Prevents race conditions on rotation regarding when the new keys activate. const BATCH_FTL: u64 = 3; #[derive(Debug)] pub struct Serai { handle: JoinHandle<()>, remaining_ticks: Arc>, // Activation block, ID pub active_keys: Arc>>, pub mempool_batches: Arc>>, pub events: mpsc::Receiver, all_events_unsafe: StdArc>>, } impl Serai { #[allow(clippy::new_without_default)] pub fn new(ticks: usize, mut queued_key: bool) -> Serai { let remaining_ticks = Arc::new(RwLock::new(ticks)); let active_keys = Arc::new(RwLock::new(vec![(0, 0)])); let mempool_batches = Arc::new(RwLock::new(vec![])); let (events_sender, events_receiver) = mpsc::channel(); let all_events_unsafe = StdArc::new(StdRwLock::new(vec![])); let handle = thread::spawn({ let remaining_ticks = remaining_ticks.clone(); let active_keys = active_keys.clone(); let mempool_batches = mempool_batches.clone(); let all_events_unsafe = all_events_unsafe.clone(); move || { while { let mut remaining_ticks = remaining_ticks.write().unwrap(); let ticking = *remaining_ticks != 0; *remaining_ticks = remaining_ticks.saturating_sub(1); ticking } { let mut batches = mempool_batches.write().unwrap(); if !batches.is_empty() { let batch: Batch = batches.remove(0); // Activate keys after the FTL if queued_key { let mut active_keys = active_keys.write().unwrap(); let len = active_keys.len().try_into().unwrap(); // TODO: active_keys is under Serai, yet the processor is the one actually with the // context on when it activates // This should be re-modeled as an event active_keys.push((batch.block + BATCH_FTL, len)); } queued_key = false; let event = Event::IncludedBatch(batch); events_sender.send(event.clone()).unwrap(); all_events_unsafe.write().unwrap().push(event); } } } }); Serai { handle, remaining_ticks, mempool_batches, active_keys, events: events_receiver, all_events_unsafe, } } pub fn exhausted(&self) -> bool { *self.remaining_ticks.read().unwrap() == 0 } pub fn join(self) -> Vec { self.handle.join().unwrap(); self.all_events_unsafe.read().unwrap().clone() } } #[derive(Debug)] pub struct Processor { handle: JoinHandle, } impl Processor { pub fn new(serai: Serai, blocks: u64) -> Processor { let handle = thread::spawn(move || { let mut last_finalized_block = 0; for b in 0 .. blocks { // If this block is too far ahead of Serai's last block, wait for Serai to process // Note this wait only has to occur if we have a Batch which has yet to be included // mini just publishes a Batch for every Block at this point in time, meaning it always has // to wait while b >= (last_finalized_block + BATCH_FTL) { if serai.exhausted() { return serai; } let Ok(event) = serai.events.recv() else { return serai }; if let Event::IncludedBatch(Batch { block, .. }) = event { last_finalized_block = block; } } serai.mempool_batches.write().unwrap().push(Batch { block: b, keys: serai .active_keys .read() .unwrap() .iter() .filter_map(|(activation_block, id)| Some(*id).filter(|_| b >= *activation_block)) .collect(), }); } serai }); Processor { handle } } pub fn join(self) -> Serai { self.handle.join().unwrap() } } ================================================ FILE: mini/src/tests/activation_race/mod.rs ================================================ use std::{ collections::HashSet, sync::{Arc as StdArc, RwLock as StdRwLock}, }; use crate::*; #[test] fn activation_race() { #[derive(Debug)] struct EagerProcessor { handle: JoinHandle, } impl EagerProcessor { fn new(serai: Serai, batches: u64) -> EagerProcessor { let handle = thread::spawn(move || { for b in 0 .. batches { serai.mempool_batches.write().unwrap().push(Batch { block: b, keys: serai .active_keys .read() .unwrap() .iter() .filter_map(|(activation_block, id)| Some(*id).filter(|_| b >= *activation_block)) .collect(), }); } serai }); EagerProcessor { handle } } fn join(self) -> Serai { self.handle.join().unwrap() } } let results = StdArc::new(StdRwLock::new(HashSet::new())); loom::model({ let results = results.clone(); move || { let serai = Serai::new(4, true); let processor = EagerProcessor::new(serai, 4); let serai = processor.join(); let events = serai.join(); results.write().unwrap().insert(events); } }); let results: HashSet<_> = results.read().unwrap().clone(); assert_eq!(results.len(), 6); for result in results { for (b, batch) in result.into_iter().enumerate() { if b < 3 { assert_eq!( batch, Event::IncludedBatch(Batch { block: b.try_into().unwrap(), keys: vec![0] }) ); } else { let Event::IncludedBatch(batch) = batch else { panic!("unexpected event") }; assert_eq!(batch.block, b.try_into().unwrap()); assert!((batch.keys == vec![0]) || (batch.keys == vec![0, 1])); } } } } #[test] fn sequential_solves_activation_race() { #[derive(Debug)] struct DelayedProcessor { handle: JoinHandle, } impl DelayedProcessor { fn new(serai: Serai, batches: u64) -> DelayedProcessor { let handle = thread::spawn(move || { for b in 0 .. batches { let batch = { let mut batches = serai.mempool_batches.write().unwrap(); let batch = Batch { block: b, keys: serai .active_keys .read() .unwrap() .iter() .filter_map(|(activation_block, id)| Some(*id).filter(|_| b >= *activation_block)) .collect(), }; batches.push(batch.clone()); batch }; while (!serai.exhausted()) && (serai.events.recv().unwrap() != Event::IncludedBatch(batch.clone())) { loom::thread::yield_now(); } } serai }); DelayedProcessor { handle } } fn join(self) -> Serai { self.handle.join().unwrap() } } let results = StdArc::new(StdRwLock::new(HashSet::new())); loom::model({ let results = results.clone(); move || { let serai = Serai::new(4, true); let processor = DelayedProcessor::new(serai, 4); let serai = processor.join(); let events = serai.join(); results.write().unwrap().insert(events); } }); let results: HashSet<_> = results.read().unwrap().clone(); assert_eq!(results.len(), 5); for result in results { for (b, batch) in result.into_iter().enumerate() { assert_eq!( batch, Event::IncludedBatch(Batch { block: b.try_into().unwrap(), keys: if b < 3 { vec![0] } else { vec![0, 1] } }), ); } } } #[test] fn ftl_solves_activation_race() { let results = StdArc::new(StdRwLock::new(HashSet::new())); loom::model({ let results = results.clone(); move || { let serai = Serai::new(4, true); // Uses Processor since this Processor has this algorithm implemented let processor = Processor::new(serai, 4); let serai = processor.join(); let events = serai.join(); results.write().unwrap().insert(events); } }); let results: HashSet<_> = results.read().unwrap().clone(); assert_eq!(results.len(), 5); for result in results { for (b, batch) in result.into_iter().enumerate() { assert_eq!( batch, Event::IncludedBatch(Batch { block: b.try_into().unwrap(), keys: if b < 3 { vec![0] } else { vec![0, 1] } }), ); } } } ================================================ FILE: mini/src/tests/mod.rs ================================================ mod activation_race; ================================================ FILE: networks/bitcoin/Cargo.toml ================================================ [package] name = "bitcoin-serai" version = "0.4.0" description = "A Bitcoin library for FROST-signing transactions" license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/networks/bitcoin" authors = ["Luke Parker ", "Vrx "] edition = "2021" rust-version = "1.80" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] std-shims = { version = "0.1.1", path = "../../common/std-shims", default-features = false } thiserror = { version = "1", default-features = false, optional = true } subtle = { version = "2", default-features = false } zeroize = { version = "^1.5", default-features = false } rand_core = { version = "0.6", default-features = false } bitcoin = { version = "0.32", default-features = false } k256 = { version = "^0.13.1", default-features = false, features = ["arithmetic", "bits"] } frost = { package = "modular-frost", path = "../../crypto/frost", version = "0.10", default-features = false, features = ["secp256k1"], optional = true } hex = { version = "0.4", default-features = false, optional = true } serde = { version = "1", default-features = false, features = ["derive"], optional = true } serde_json = { version = "1", default-features = false, optional = true } simple-request = { path = "../../common/request", version = "0.1", default-features = false, features = ["tls", "basic-auth"], optional = true } [dev-dependencies] secp256k1 = { version = "0.29", default-features = false, features = ["std"] } frost = { package = "modular-frost", path = "../../crypto/frost", features = ["tests"] } tokio = { version = "1", features = ["macros"] } [features] std = [ "std-shims/std", "thiserror", "subtle/std", "zeroize/std", "rand_core/std", "bitcoin/std", "bitcoin/serde", "k256/std", "frost", "hex/std", "serde/std", "serde_json/std", "simple-request", ] hazmat = [] default = ["std"] ================================================ FILE: networks/bitcoin/LICENSE ================================================ MIT License Copyright (c) 2022-2023 Luke Parker Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: networks/bitcoin/README.md ================================================ # bitcoin-serai An application of [modular-frost](https://docs.rs/modular-frost) to Bitcoin transactions, enabling extremely-efficient multisigs. ================================================ FILE: networks/bitcoin/src/crypto.rs ================================================ use subtle::{Choice, ConstantTimeEq, ConditionallySelectable}; use k256::{ elliptic_curve::sec1::{Tag, ToEncodedPoint}, ProjectivePoint, }; use bitcoin::key::XOnlyPublicKey; /// Get the x coordinate of a non-infinity point. /// /// Panics on invalid input. fn x(key: &ProjectivePoint) -> [u8; 32] { let encoded = key.to_encoded_point(true); (*encoded.x().expect("point at infinity")).into() } /// Convert a non-infinity point to a XOnlyPublicKey (dropping its sign). /// /// Panics on invalid input. pub(crate) fn x_only(key: &ProjectivePoint) -> XOnlyPublicKey { XOnlyPublicKey::from_slice(&x(key)).expect("x_only was passed a point which was infinity or odd") } /// Return if a point must be negated to have an even Y coordinate and be eligible for use. pub(crate) fn needs_negation(key: &ProjectivePoint) -> Choice { u8::from(key.to_encoded_point(true).tag()).ct_eq(&u8::from(Tag::CompressedOddY)) } #[cfg(feature = "std")] mod frost_crypto { use core::fmt::Debug; use std_shims::{vec::Vec, io}; use zeroize::Zeroizing; use rand_core::{RngCore, CryptoRng}; use bitcoin::hashes::{HashEngine, Hash, sha256::Hash as Sha256}; use k256::{elliptic_curve::ops::Reduce, U256, Scalar}; use frost::{ curve::{Ciphersuite, Secp256k1}, Participant, ThresholdKeys, ThresholdView, FrostError, algorithm::{Hram as HramTrait, Algorithm, IetfSchnorr as FrostSchnorr}, }; use super::*; /// A BIP-340 compatible HRAm for use with the modular-frost Schnorr Algorithm. /// /// If passed an odd nonce, the challenge will be negated. /// /// If either `R` or `A` is the point at infinity, this will panic. #[derive(Clone, Copy, Debug)] pub struct Hram; #[allow(non_snake_case)] impl HramTrait for Hram { fn hram(R: &ProjectivePoint, A: &ProjectivePoint, m: &[u8]) -> Scalar { const TAG_HASH: Sha256 = Sha256::const_hash(b"BIP0340/challenge"); let mut data = Sha256::engine(); data.input(TAG_HASH.as_ref()); data.input(TAG_HASH.as_ref()); data.input(&x(R)); data.input(&x(A)); data.input(m); let c = Scalar::reduce(U256::from_be_slice(Sha256::from_engine(data).as_ref())); // If the nonce was odd, sign `r - cx` instead of `r + cx`, allowing us to negate `s` at the // end to sign as `-r + cx` <_>::conditional_select(&c, &-c, needs_negation(R)) } } /// BIP-340 Schnorr signature algorithm. /// /// This may panic if called with nonces/a group key which are the point at infinity (which have /// a negligible probability for a well-reasoned caller, even with malicious participants /// present). /// /// `verify`, `verify_share` MUST be called after `sign_share` is called. Otherwise, this library /// MAY panic. #[derive(Clone)] pub struct Schnorr(FrostSchnorr); impl Schnorr { /// Construct a Schnorr algorithm continuing the specified transcript. #[allow(clippy::new_without_default)] pub fn new() -> Schnorr { Schnorr(FrostSchnorr::ietf()) } } impl Algorithm for Schnorr { type Transcript = as Algorithm>::Transcript; type Addendum = (); type Signature = [u8; 64]; fn transcript(&mut self) -> &mut Self::Transcript { self.0.transcript() } fn nonces(&self) -> Vec> { self.0.nonces() } fn preprocess_addendum( &mut self, rng: &mut R, keys: &ThresholdKeys, ) { self.0.preprocess_addendum(rng, keys) } fn read_addendum(&self, reader: &mut R) -> io::Result { self.0.read_addendum(reader) } fn process_addendum( &mut self, view: &ThresholdView, i: Participant, addendum: (), ) -> Result<(), FrostError> { self.0.process_addendum(view, i, addendum) } fn sign_share( &mut self, params: &ThresholdView, nonce_sums: &[Vec<::G>], nonces: Vec::F>>, msg: &[u8], ) -> ::F { self.0.sign_share(params, nonce_sums, nonces, msg) } #[must_use] fn verify( &self, group_key: ProjectivePoint, nonces: &[Vec], sum: Scalar, ) -> Option { self.0.verify(group_key, nonces, sum).map(|mut sig| { sig.s = <_>::conditional_select(&sum, &-sum, needs_negation(&sig.R)); // Convert to a Bitcoin signature by dropping the byte for the point's sign bit sig.serialize()[1 ..].try_into().unwrap() }) } fn verify_share( &self, verification_share: ProjectivePoint, nonces: &[Vec], share: Scalar, ) -> Result, ()> { self.0.verify_share(verification_share, nonces, share) } } } #[cfg(feature = "std")] pub use frost_crypto::*; ================================================ FILE: networks/bitcoin/src/lib.rs ================================================ #![cfg_attr(docsrs, feature(doc_cfg))] #![doc = include_str!("../README.md")] #![cfg_attr(not(feature = "std"), no_std)] #[cfg(not(feature = "std"))] extern crate alloc; /// The bitcoin Rust library. pub use bitcoin; /// Cryptographic helpers. #[cfg(feature = "hazmat")] pub mod crypto; #[cfg(not(feature = "hazmat"))] pub(crate) mod crypto; /// Wallet functionality to create transactions. pub mod wallet; /// A minimal asynchronous Bitcoin RPC client. #[cfg(feature = "std")] pub mod rpc; #[cfg(test)] mod tests; ================================================ FILE: networks/bitcoin/src/rpc.rs ================================================ use core::fmt::Debug; use std::collections::HashSet; use thiserror::Error; use serde::{Deserialize, de::DeserializeOwned}; use serde_json::json; use simple_request::{hyper, Request, Client}; use bitcoin::{ hashes::{Hash, hex::FromHex}, consensus::encode, Txid, Transaction, BlockHash, Block, }; #[derive(Clone, PartialEq, Eq, Debug, Deserialize)] pub struct Error { code: isize, message: String, } #[derive(Clone, Debug, Deserialize)] #[serde(untagged)] enum RpcResponse { Ok { result: T }, Err { error: Error }, } /// A minimal asynchronous Bitcoin RPC client. #[derive(Clone, Debug)] pub struct Rpc { client: Client, url: String, } #[derive(Clone, PartialEq, Eq, Debug, Error)] pub enum RpcError { #[error("couldn't connect to node")] ConnectionError, #[error("request had an error: {0:?}")] RequestError(Error), #[error("node replied with invalid JSON")] InvalidJson(serde_json::error::Category), #[error("node sent an invalid response ({0})")] InvalidResponse(&'static str), #[error("node was missing expected methods")] MissingMethods(HashSet<&'static str>), } impl Rpc { /// Create a new connection to a Bitcoin RPC. /// /// An RPC call is performed to ensure the node is reachable (and that an invalid URL wasn't /// provided). /// /// Additionally, a set of expected methods is checked to be offered by the Bitcoin RPC. If these /// methods aren't provided, an error with the missing methods is returned. This ensures all RPC /// routes explicitly provided by this library are at least possible. /// /// Each individual RPC route may still fail at time-of-call, regardless of the arguments /// provided to this library, if the RPC has an incompatible argument layout. That is not checked /// at time of RPC creation. pub async fn new(url: String) -> Result { let rpc = Rpc { client: Client::with_connection_pool(), url }; // Make an RPC request to verify the node is reachable and sane let res: String = rpc.rpc_call("help", json!([])).await?; // Verify all methods we expect are present // If we had a more expanded RPC, due to differences in RPC versions, it wouldn't make sense to // error if all methods weren't present // We only provide a very minimal set of methods which have been largely consistent, hence why // this is sane let mut expected_methods = HashSet::from([ "help", "getblockcount", "getblockhash", "getblockheader", "getblock", "sendrawtransaction", "getrawtransaction", ]); for line in res.split('\n') { // This doesn't check if the arguments are as expected // This is due to Bitcoin supporting a large amount of optional arguments, which // occasionally change, with their own mechanism of text documentation, making matching off // it a quite involved task // Instead, once we've confirmed the methods are present, we assume our arguments are aligned // Else we'll error at time of call if expected_methods.remove(line.split(' ').next().unwrap_or("")) && expected_methods.is_empty() { break; } } if !expected_methods.is_empty() { Err(RpcError::MissingMethods(expected_methods))?; }; Ok(rpc) } /// Perform an arbitrary RPC call. pub async fn rpc_call( &self, method: &str, params: serde_json::Value, ) -> Result { let mut request = Request::from( hyper::Request::post(&self.url) .header("Content-Type", "application/json") .body( serde_json::to_vec(&json!({ "jsonrpc": "2.0", "method": method, "params": params })) .unwrap() .into(), ) .unwrap(), ); request.with_basic_auth(); let mut res = self .client .request(request) .await .map_err(|_| RpcError::ConnectionError)? .body() .await .map_err(|_| RpcError::ConnectionError)?; let res: RpcResponse = serde_json::from_reader(&mut res).map_err(|e| RpcError::InvalidJson(e.classify()))?; match res { RpcResponse::Ok { result } => Ok(result), RpcResponse::Err { error } => Err(RpcError::RequestError(error)), } } /// Get the latest block's number. /// /// The genesis block's 'number' is zero. They increment from there. pub async fn get_latest_block_number(&self) -> Result { // getblockcount doesn't return the amount of blocks on the current chain, yet the "height" // of the current chain. The "height" of the current chain is defined as the "height" of the // tip block of the current chain. The "height" of a block is defined as the amount of blocks // present when the block was created. Accordingly, the genesis block has height 0, and // getblockcount will return 0 when it's only the only block, despite their being one block. self.rpc_call("getblockcount", json!([])).await } /// Get the hash of a block by the block's number. pub async fn get_block_hash(&self, number: usize) -> Result<[u8; 32], RpcError> { let mut hash = self .rpc_call::("getblockhash", json!([number])) .await? .as_raw_hash() .to_byte_array(); // bitcoin stores the inner bytes in reverse order. hash.reverse(); Ok(hash) } /// Get a block's number by its hash. pub async fn get_block_number(&self, hash: &[u8; 32]) -> Result { #[derive(Deserialize, Debug)] struct Number { height: usize, } Ok(self.rpc_call::("getblockheader", json!([hex::encode(hash)])).await?.height) } /// Get a block by its hash. pub async fn get_block(&self, hash: &[u8; 32]) -> Result { let hex = self.rpc_call::("getblock", json!([hex::encode(hash), 0])).await?; let bytes: Vec = FromHex::from_hex(&hex) .map_err(|_| RpcError::InvalidResponse("node didn't use hex to encode the block"))?; let block: Block = encode::deserialize(&bytes) .map_err(|_| RpcError::InvalidResponse("node sent an improperly serialized block"))?; let mut block_hash = *block.block_hash().as_raw_hash().as_byte_array(); block_hash.reverse(); if hash != &block_hash { Err(RpcError::InvalidResponse("node replied with a different block"))?; } Ok(block) } /// Publish a transaction. pub async fn send_raw_transaction(&self, tx: &Transaction) -> Result { let txid = match self.rpc_call("sendrawtransaction", json!([encode::serialize_hex(tx)])).await { Ok(txid) => txid, Err(e) => { // A const from Bitcoin's bitcoin/src/rpc/protocol.h const RPC_VERIFY_ALREADY_IN_CHAIN: isize = -27; // If this was already successfully published, consider this having succeeded if let RpcError::RequestError(Error { code, .. }) = e { if code == RPC_VERIFY_ALREADY_IN_CHAIN { return Ok(tx.compute_txid()); } } Err(e)? } }; if txid != tx.compute_txid() { Err(RpcError::InvalidResponse("returned TX ID inequals calculated TX ID"))?; } Ok(txid) } /// Get a transaction by its hash. pub async fn get_transaction(&self, hash: &[u8; 32]) -> Result { let hex = self.rpc_call::("getrawtransaction", json!([hex::encode(hash)])).await?; let bytes: Vec = FromHex::from_hex(&hex) .map_err(|_| RpcError::InvalidResponse("node didn't use hex to encode the transaction"))?; let tx: Transaction = encode::deserialize(&bytes) .map_err(|_| RpcError::InvalidResponse("node sent an improperly serialized transaction"))?; let mut tx_hash = *tx.compute_txid().as_raw_hash().as_byte_array(); tx_hash.reverse(); if hash != &tx_hash { Err(RpcError::InvalidResponse("node replied with a different transaction"))?; } Ok(tx) } } ================================================ FILE: networks/bitcoin/src/tests/crypto.rs ================================================ use rand_core::OsRng; use secp256k1::{Secp256k1 as BContext, Message, schnorr::Signature}; use frost::{ curve::Secp256k1, Participant, tests::{algorithm_machines, key_gen, sign}, }; use crate::{ bitcoin::hashes::{Hash as HashTrait, sha256::Hash}, crypto::{x_only, Schnorr}, wallet::tweak_keys, }; #[test] fn test_algorithm() { let mut keys = key_gen::<_, Secp256k1>(&mut OsRng); const MESSAGE: &[u8] = b"Hello, World!"; for keys in keys.values_mut() { *keys = tweak_keys(keys.clone()); } let algo = Schnorr::new(); let sig = sign( &mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), Hash::hash(MESSAGE).as_ref(), ); BContext::new() .verify_schnorr( &Signature::from_slice(&sig) .expect("couldn't convert produced signature to secp256k1::Signature"), &Message::from_digest_slice(Hash::hash(MESSAGE).as_ref()).unwrap(), &x_only(&keys[&Participant::new(1).unwrap()].group_key()), ) .unwrap() } ================================================ FILE: networks/bitcoin/src/tests/mod.rs ================================================ mod crypto; ================================================ FILE: networks/bitcoin/src/wallet/mod.rs ================================================ use std_shims::{ vec::Vec, collections::HashMap, io::{self, Write}, }; #[cfg(feature = "std")] use std::io::{Read, BufReader}; use k256::{ elliptic_curve::sec1::{Tag, ToEncodedPoint}, Scalar, ProjectivePoint, }; #[cfg(feature = "std")] use frost::{ curve::{Ciphersuite, Secp256k1}, ThresholdKeys, }; use bitcoin::{ consensus::encode::serialize, key::TweakedPublicKey, OutPoint, ScriptBuf, TxOut, Transaction, Block, }; #[cfg(feature = "std")] use bitcoin::{hashes::Hash, consensus::encode::Decodable, TapTweakHash}; use crate::crypto::x_only; #[cfg(feature = "std")] use crate::crypto::needs_negation; #[cfg(feature = "std")] mod send; #[cfg(feature = "std")] pub use send::*; /// Tweak keys to ensure they're usable with Bitcoin's Taproot upgrade. /// /// This adds an unspendable script path to the key, preventing any outputs received to this key /// from being spent via a script. To have keys which have spendable script paths, further offsets /// from this position must be used. /// /// After adding an unspendable script path, the key is negated if odd. /// /// This has a neligible probability of returning keys whose group key is the point at infinity. #[cfg(feature = "std")] pub fn tweak_keys(keys: ThresholdKeys) -> ThresholdKeys { // Adds the unspendable script path per // https://github.com/bitcoin/bips/blob/master/bip-0341.mediawiki#cite_note-23 let keys = { use k256::elliptic_curve::{ bigint::{Encoding, U256}, ops::Reduce, group::GroupEncoding, }; let tweak_hash = TapTweakHash::hash(&keys.group_key().to_bytes().as_slice()[1 ..]); /* https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki#cite_ref-13-0 states how the bias is negligible. This reduction shouldn't ever occur, yet if it did, the script path would be unusable due to a check the script path hash is less than the order. That doesn't impact us as we don't want the script path to be usable. */ keys.offset(::F::reduce(U256::from_be_bytes( *tweak_hash.to_raw_hash().as_ref(), ))) }; let needs_negation = needs_negation(&keys.group_key()); keys .scale(<_ as subtle::ConditionallySelectable>::conditional_select( &Scalar::ONE, &-Scalar::ONE, needs_negation, )) .expect("scaling keys by 1 or -1 yet interpreted as 0?") } /// Return the Taproot address payload for a public key. /// /// If the key is odd, this will return None. pub fn p2tr_script_buf(key: ProjectivePoint) -> Option { if key.to_encoded_point(true).tag() != Tag::CompressedEvenY { return None; } Some(ScriptBuf::new_p2tr_tweaked(TweakedPublicKey::dangerous_assume_tweaked(x_only(&key)))) } /// A spendable output. #[derive(Clone, PartialEq, Eq, Debug)] pub struct ReceivedOutput { // The scalar offset to obtain the key usable to spend this output. offset: Scalar, // The output to spend. output: TxOut, // The TX ID and vout of the output to spend. outpoint: OutPoint, } impl ReceivedOutput { /// The offset for this output. pub fn offset(&self) -> Scalar { self.offset } /// The Bitcoin output for this output. pub fn output(&self) -> &TxOut { &self.output } /// The outpoint for this output. pub fn outpoint(&self) -> &OutPoint { &self.outpoint } /// The value of this output. pub fn value(&self) -> u64 { self.output.value.to_sat() } /// Read a ReceivedOutput from a generic satisfying Read. #[cfg(feature = "std")] pub fn read(r: &mut R) -> io::Result { let offset = Secp256k1::read_F(r)?; let output; let outpoint; { let mut buf_r = BufReader::with_capacity(0, r); output = TxOut::consensus_decode(&mut buf_r).map_err(|_| io::Error::other("invalid TxOut"))?; outpoint = OutPoint::consensus_decode(&mut buf_r).map_err(|_| io::Error::other("invalid OutPoint"))?; } Ok(ReceivedOutput { offset, output, outpoint }) } /// Write a ReceivedOutput to a generic satisfying Write. pub fn write(&self, w: &mut W) -> io::Result<()> { w.write_all(&self.offset.to_bytes())?; w.write_all(&serialize(&self.output))?; w.write_all(&serialize(&self.outpoint)) } /// Serialize a ReceivedOutput to a `Vec`. pub fn serialize(&self) -> Vec { let mut res = Vec::new(); self.write(&mut res).unwrap(); res } } /// A transaction scanner capable of being used with HDKD schemes. #[derive(Clone, Debug)] pub struct Scanner { key: ProjectivePoint, scripts: HashMap, } impl Scanner { /// Construct a Scanner for a key. /// /// Returns None if this key can't be scanned for. pub fn new(key: ProjectivePoint) -> Option { let mut scripts = HashMap::new(); scripts.insert(p2tr_script_buf(key)?, Scalar::ZERO); Some(Scanner { key, scripts }) } /// Register an offset to scan for. /// /// Due to Bitcoin's requirement that points are even, not every offset may be used. /// If an offset isn't usable, it will be incremented until it is. If this offset is already /// present, None is returned. Else, Some(offset) will be, with the used offset. /// /// This means offsets are surjective, not bijective, and the order offsets are registered in /// may determine the validity of future offsets. /// /// The offsets registered must be securely generated. Arbitrary offsets may introduce a script /// path into the output, allowing the output to be spent by satisfaction of an arbitrary script /// (not by the signature of the key). pub fn register_offset(&mut self, mut offset: Scalar) -> Option { // This loop will terminate as soon as an even point is found, with any point having a ~50% // chance of being even // That means this should terminate within a very small amount of iterations loop { match p2tr_script_buf(self.key + (ProjectivePoint::GENERATOR * offset)) { Some(script) => { if self.scripts.contains_key(&script) { None?; } self.scripts.insert(script, offset); return Some(offset); } None => offset += Scalar::ONE, } } } /// Scan a transaction. pub fn scan_transaction(&self, tx: &Transaction) -> Vec { let mut res = Vec::new(); for (vout, output) in tx.output.iter().enumerate() { // If the vout index exceeds 2**32, stop scanning outputs let Ok(vout) = u32::try_from(vout) else { break }; if let Some(offset) = self.scripts.get(&output.script_pubkey) { res.push(ReceivedOutput { offset: *offset, output: output.clone(), outpoint: OutPoint::new(tx.compute_txid(), vout), }); } } res } /// Scan a block. /// /// This will also scan the coinbase transaction which is bound by maturity. If received outputs /// must be immediately spendable, a post-processing pass is needed to remove those outputs. /// Alternatively, scan_transaction can be called on `block.txdata[1 ..]`. pub fn scan_block(&self, block: &Block) -> Vec { let mut res = Vec::new(); for tx in &block.txdata { res.extend(self.scan_transaction(tx)); } res } } ================================================ FILE: networks/bitcoin/src/wallet/send.rs ================================================ use std_shims::{ io::{self, Read}, collections::HashMap, }; use thiserror::Error; use rand_core::{RngCore, CryptoRng}; use k256::Scalar; use frost::{curve::Secp256k1, Participant, ThresholdKeys, FrostError, sign::*}; use bitcoin::{ hashes::Hash, sighash::{TapSighashType, SighashCache, Prevouts}, absolute::LockTime, script::{PushBytesBuf, ScriptBuf}, transaction::{Version, Transaction}, OutPoint, Sequence, Witness, TxIn, Amount, TxOut, }; use crate::{ crypto::Schnorr, wallet::{ReceivedOutput, p2tr_script_buf}, }; #[rustfmt::skip] // https://github.com/bitcoin/bitcoin/blob/306ccd4927a2efe325c8d84be1bdb79edeb29b04/src/policy/policy.cpp#L26-L63 // As the above notes, a lower amount may not be considered dust if contained in a SegWit output // This doesn't bother with delineation due to how marginal these values are, and because it isn't // worth the complexity to implement differentation pub const DUST: u64 = 546; #[derive(Clone, PartialEq, Eq, Debug, Error)] pub enum TransactionError { #[error("no inputs were specified")] NoInputs, #[error("no outputs were created")] NoOutputs, #[error("a specified payment's amount was less than bitcoin's required minimum")] DustPayment, #[error("too much data was specified")] TooMuchData, #[error("fee was too low to pass the default minimum fee rate")] TooLowFee, #[error("not enough funds for these payments")] NotEnoughFunds { inputs: u64, payments: u64, fee: u64 }, #[error("transaction was too large")] TooLargeTransaction, } /// A signable transaction, clone-able across attempts. #[derive(Clone, PartialEq, Eq, Debug)] pub struct SignableTransaction { tx: Transaction, offsets: Vec, prevouts: Vec, needed_fee: u64, } impl SignableTransaction { fn calculate_weight_vbytes( inputs: usize, payments: &[(ScriptBuf, u64)], change: Option<&ScriptBuf>, ) -> (u64, u64) { // Expand this a full transaction in order to use the bitcoin library's weight function let mut tx = Transaction { version: Version(2), lock_time: LockTime::ZERO, input: vec![ TxIn { // This is a fixed size // See https://developer.bitcoin.org/reference/transactions.html#raw-transaction-format previous_output: OutPoint::default(), // This is empty for a Taproot spend script_sig: ScriptBuf::new(), // This is fixed size, yet we do use Sequence::MAX sequence: Sequence::MAX, // Our witnesses contains a single 64-byte signature witness: Witness::from_slice(&[vec![0; 64]]) }; inputs ], output: payments .iter() // The payment is a fixed size so we don't have to use it here // The script pub key is not of a fixed size and does have to be used here .map(|payment| TxOut { value: Amount::from_sat(payment.1), script_pubkey: payment.0.clone(), }) .collect(), }; if let Some(change) = change { // Use a 0 value since we're currently unsure what the change amount will be, and since // the value is fixed size (so any value could be used here) tx.output.push(TxOut { value: Amount::ZERO, script_pubkey: change.clone() }); } let weight = tx.weight(); // Now calculate the size in vbytes /* "Virtual transaction size" is weight ceildiv 4 per https://github.com/bitcoin/bips/blob/master/bip-0141.mediawiki https://github.com/bitcoin/bitcoin/blob/306ccd4927a2efe325c8d84be1bdb79edeb29b04 /src/policy/policy.cpp#L295-L298 implements this almost as expected, with an additional consideration to signature operations Signature operations (the second argument of the following call) do not count Taproot signatures per https://github.com/bitcoin/bips/blob/master/bip-0342.mediawiki#cite_ref-11-0 We don't risk running afoul of the Taproot signature limit as it allows at least one per input, which is all we use */ ( weight.to_wu(), u64::try_from(bitcoin::policy::get_virtual_tx_size( i64::try_from(weight.to_wu()).unwrap(), 0i64, )) .unwrap(), ) } /// Returns the fee necessary for this transaction to achieve the fee rate specified at /// construction. /// /// The actual fee this transaction will use is `sum(inputs) - sum(outputs)`. pub fn needed_fee(&self) -> u64 { self.needed_fee } /// Returns the fee this transaction will use. pub fn fee(&self) -> u64 { self.prevouts.iter().map(|prevout| prevout.value.to_sat()).sum::() - self.tx.output.iter().map(|prevout| prevout.value.to_sat()).sum::() } /// Create a new SignableTransaction. /// /// If a change address is specified, any leftover funds will be sent to it if the leftover funds /// exceed the minimum output amount. If a change address isn't specified, all leftover funds /// will become part of the paid fee. /// /// If data is specified, an OP_RETURN output will be added with it. pub fn new( mut inputs: Vec, payments: &[(ScriptBuf, u64)], change: Option, data: Option>, fee_per_vbyte: u64, ) -> Result { if inputs.is_empty() { Err(TransactionError::NoInputs)?; } if payments.is_empty() && change.is_none() && data.is_none() { Err(TransactionError::NoOutputs)?; } for (_, amount) in payments { if *amount < DUST { Err(TransactionError::DustPayment)?; } } if data.as_ref().map_or(0, Vec::len) > 80 { Err(TransactionError::TooMuchData)?; } let input_sat = inputs.iter().map(|input| input.output.value.to_sat()).sum::(); let offsets = inputs.iter().map(|input| input.offset).collect(); let tx_ins = inputs .iter() .map(|input| TxIn { previous_output: input.outpoint, script_sig: ScriptBuf::new(), sequence: Sequence::MAX, witness: Witness::new(), }) .collect::>(); let payment_sat = payments.iter().map(|payment| payment.1).sum::(); let mut tx_outs = payments .iter() .map(|payment| TxOut { value: Amount::from_sat(payment.1), script_pubkey: payment.0.clone() }) .collect::>(); // Add the OP_RETURN output if let Some(data) = data { tx_outs.push(TxOut { value: Amount::ZERO, script_pubkey: ScriptBuf::new_op_return( PushBytesBuf::try_from(data) .expect("data didn't fit into PushBytes depsite being checked"), ), }) } let (mut weight, vbytes) = Self::calculate_weight_vbytes(tx_ins.len(), payments, None); let mut needed_fee = fee_per_vbyte * vbytes; // Technically, if there isn't change, this TX may still pay enough of a fee to pass the // minimum fee. Such edge cases aren't worth programming when they go against intent, as the // specified fee rate is too low to be valid // bitcoin::policy::DEFAULT_MIN_RELAY_TX_FEE is in sats/kilo-vbyte if needed_fee < ((u64::from(bitcoin::policy::DEFAULT_MIN_RELAY_TX_FEE) * vbytes) / 1000) { Err(TransactionError::TooLowFee)?; } if input_sat < (payment_sat + needed_fee) { Err(TransactionError::NotEnoughFunds { inputs: input_sat, payments: payment_sat, fee: needed_fee, })?; } // If there's a change address, check if there's change to give it if let Some(change) = change { let (weight_with_change, vbytes_with_change) = Self::calculate_weight_vbytes(tx_ins.len(), payments, Some(&change)); let fee_with_change = fee_per_vbyte * vbytes_with_change; if let Some(value) = input_sat.checked_sub(payment_sat + fee_with_change) { if value >= DUST { tx_outs.push(TxOut { value: Amount::from_sat(value), script_pubkey: change }); weight = weight_with_change; needed_fee = fee_with_change; } } } if tx_outs.is_empty() { Err(TransactionError::NoOutputs)?; } if weight > u64::from(bitcoin::policy::MAX_STANDARD_TX_WEIGHT) { Err(TransactionError::TooLargeTransaction)?; } Ok(SignableTransaction { tx: Transaction { version: Version(2), lock_time: LockTime::ZERO, input: tx_ins, output: tx_outs, }, offsets, prevouts: inputs.drain(..).map(|input| input.output).collect(), needed_fee, }) } /// Returns the TX ID of the transaction this will create. pub fn txid(&self) -> [u8; 32] { let mut res = self.tx.compute_txid().to_byte_array(); res.reverse(); res } /// Returns the transaction, sans witness, this will create if signed. pub fn transaction(&self) -> &Transaction { &self.tx } /// Create a multisig machine for this transaction. /// /// Returns None if the wrong keys are used. pub fn multisig(self, keys: &ThresholdKeys) -> Option { let mut sigs = vec![]; for i in 0 .. self.tx.input.len() { let offset = keys.clone().offset(self.offsets[i]); if p2tr_script_buf(offset.group_key())? != self.prevouts[i].script_pubkey { None?; } sigs.push(AlgorithmMachine::new(Schnorr::new(), keys.clone().offset(self.offsets[i]))); } Some(TransactionMachine { tx: self, sigs }) } } /// A FROST signing machine to produce a Bitcoin transaction. /// /// This does not support caching its preprocess. When sign is called, the message must be empty. /// This will panic if either `cache`, `from_cache` is called or the message isn't empty. pub struct TransactionMachine { tx: SignableTransaction, sigs: Vec>, } impl PreprocessMachine for TransactionMachine { type Preprocess = Vec>; type Signature = Transaction; type SignMachine = TransactionSignMachine; fn preprocess( mut self, rng: &mut R, ) -> (Self::SignMachine, Self::Preprocess) { let mut preprocesses = Vec::with_capacity(self.sigs.len()); let sigs = self .sigs .drain(..) .map(|sig| { let (sig, preprocess) = sig.preprocess(rng); preprocesses.push(preprocess); sig }) .collect(); (TransactionSignMachine { tx: self.tx, sigs }, preprocesses) } } pub struct TransactionSignMachine { tx: SignableTransaction, sigs: Vec>, } impl SignMachine for TransactionSignMachine { type Params = (); type Keys = ThresholdKeys; type Preprocess = Vec>; type SignatureShare = Vec>; type SignatureMachine = TransactionSignatureMachine; fn cache(self) -> CachedPreprocess { unimplemented!( "Bitcoin transactions don't support caching their preprocesses due to {}", "being already bound to a specific transaction" ); } fn from_cache( (): (), _: ThresholdKeys, _: CachedPreprocess, ) -> (Self, Self::Preprocess) { unimplemented!( "Bitcoin transactions don't support caching their preprocesses due to {}", "being already bound to a specific transaction" ); } fn read_preprocess(&self, reader: &mut R) -> io::Result { self.sigs.iter().map(|sig| sig.read_preprocess(reader)).collect() } fn sign( mut self, commitments: HashMap, msg: &[u8], ) -> Result<(TransactionSignatureMachine, Self::SignatureShare), FrostError> { if !msg.is_empty() { panic!("message was passed to the TransactionSignMachine when it generates its own"); } let commitments = (0 .. self.sigs.len()) .map(|c| { commitments .iter() .map(|(l, commitments)| (*l, commitments[c].clone())) .collect::>() }) .collect::>(); let mut cache = SighashCache::new(&self.tx.tx); // Sign committing to all inputs let prevouts = Prevouts::All(&self.tx.prevouts); let mut shares = Vec::with_capacity(self.sigs.len()); let sigs = self .sigs .drain(..) .enumerate() .map(|(i, sig)| { let (sig, share) = sig.sign( commitments[i].clone(), cache .taproot_key_spend_signature_hash(i, &prevouts, TapSighashType::Default) // This should never happen since the inputs align with the TX the cache was // constructed with, and because i is always < prevouts.len() .expect("taproot_key_spend_signature_hash failed to return a hash") .as_ref(), )?; shares.push(share); Ok(sig) }) .collect::>()?; Ok((TransactionSignatureMachine { tx: self.tx.tx, sigs }, shares)) } } pub struct TransactionSignatureMachine { tx: Transaction, sigs: Vec>, } impl SignatureMachine for TransactionSignatureMachine { type SignatureShare = Vec>; fn read_share(&self, reader: &mut R) -> io::Result { self.sigs.iter().map(|sig| sig.read_share(reader)).collect() } fn complete( mut self, mut shares: HashMap, ) -> Result { for (input, schnorr) in self.tx.input.iter_mut().zip(self.sigs.drain(..)) { let sig = schnorr.complete( shares.iter_mut().map(|(l, shares)| (*l, shares.remove(0))).collect::>(), )?; let mut witness = Witness::new(); witness.push(sig); input.witness = witness; } Ok(self.tx) } } ================================================ FILE: networks/bitcoin/tests/rpc.rs ================================================ use bitcoin_serai::{bitcoin::hashes::Hash as HashTrait, rpc::RpcError}; mod runner; use runner::rpc; async_sequential! { async fn test_rpc() { let rpc = rpc().await; // Test get_latest_block_number and get_block_hash by round tripping them let latest = rpc.get_latest_block_number().await.unwrap(); let hash = rpc.get_block_hash(latest).await.unwrap(); assert_eq!(rpc.get_block_number(&hash).await.unwrap(), latest); // Test this actually is the latest block number by checking asking for the next block's errors assert!(matches!(rpc.get_block_hash(latest + 1).await, Err(RpcError::RequestError(_)))); // Test get_block by checking the received block's hash matches the request let block = rpc.get_block(&hash).await.unwrap(); // Hashes are stored in reverse. It's bs from Satoshi let mut block_hash = *block.block_hash().as_raw_hash().as_byte_array(); block_hash.reverse(); assert_eq!(hash, block_hash); } } ================================================ FILE: networks/bitcoin/tests/runner.rs ================================================ use std::sync::LazyLock; use bitcoin_serai::rpc::Rpc; use tokio::sync::Mutex; #[allow(dead_code)] pub(crate) static SEQUENTIAL: LazyLock> = LazyLock::new(|| Mutex::new(())); #[allow(dead_code)] pub(crate) async fn rpc() -> Rpc { let rpc = Rpc::new("http://serai:seraidex@127.0.0.1:8332".to_string()).await.unwrap(); // If this node has already been interacted with, clear its chain if rpc.get_latest_block_number().await.unwrap() > 0 { rpc .rpc_call( "invalidateblock", serde_json::json!([hex::encode(rpc.get_block_hash(1).await.unwrap())]), ) .await .unwrap() } rpc } #[macro_export] macro_rules! async_sequential { ($(async fn $name: ident() $body: block)*) => { $( #[tokio::test] async fn $name() { let guard = runner::SEQUENTIAL.lock().await; let local = tokio::task::LocalSet::new(); local.run_until(async move { if let Err(err) = tokio::task::spawn_local(async move { $body }).await { drop(guard); Err(err).unwrap() } }).await; } )* } } ================================================ FILE: networks/bitcoin/tests/wallet.rs ================================================ use std::collections::HashMap; use rand_core::{RngCore, OsRng}; use k256::{ elliptic_curve::{ group::{ff::Field, Group}, sec1::{Tag, ToEncodedPoint}, }, Scalar, ProjectivePoint, }; use frost::{ curve::Secp256k1, Participant, ThresholdKeys, tests::{THRESHOLD, key_gen, sign_without_caching}, }; use bitcoin_serai::{ bitcoin::{ hashes::Hash as HashTrait, blockdata::opcodes::all::OP_RETURN, script::{PushBytesBuf, Instruction, Instructions, Script}, OutPoint, Amount, TxOut, Transaction, Network, Address, }, wallet::{ tweak_keys, p2tr_script_buf, ReceivedOutput, Scanner, TransactionError, SignableTransaction, }, rpc::Rpc, }; mod runner; use runner::rpc; const FEE: u64 = 20; fn is_even(key: ProjectivePoint) -> bool { key.to_encoded_point(true).tag() == Tag::CompressedEvenY } async fn send_and_get_output(rpc: &Rpc, scanner: &Scanner, key: ProjectivePoint) -> ReceivedOutput { let block_number = rpc.get_latest_block_number().await.unwrap() + 1; rpc .rpc_call::>( "generatetoaddress", serde_json::json!([ 1, Address::from_script(&p2tr_script_buf(key).unwrap(), Network::Regtest).unwrap() ]), ) .await .unwrap(); // Mine until maturity rpc .rpc_call::>( "generatetoaddress", serde_json::json!([100, Address::p2sh(Script::new(), Network::Regtest).unwrap()]), ) .await .unwrap(); let block = rpc.get_block(&rpc.get_block_hash(block_number).await.unwrap()).await.unwrap(); let mut outputs = scanner.scan_block(&block); assert_eq!(outputs, scanner.scan_transaction(&block.txdata[0])); assert_eq!(outputs.len(), 1); assert_eq!(outputs[0].outpoint(), &OutPoint::new(block.txdata[0].compute_txid(), 0)); assert_eq!(outputs[0].value(), block.txdata[0].output[0].value.to_sat()); assert_eq!( ReceivedOutput::read::<&[u8]>(&mut outputs[0].serialize().as_ref()).unwrap(), outputs[0] ); outputs.swap_remove(0) } fn keys() -> (HashMap>, ProjectivePoint) { let mut keys = key_gen(&mut OsRng); for keys in keys.values_mut() { *keys = tweak_keys(keys.clone()); } let key = keys.values().next().unwrap().group_key(); (keys, key) } fn sign( keys: &HashMap>, tx: &SignableTransaction, ) -> Transaction { let mut machines = HashMap::new(); for i in (1 ..= THRESHOLD).map(|i| Participant::new(i).unwrap()) { machines.insert(i, tx.clone().multisig(&keys[&i].clone()).unwrap()); } sign_without_caching(&mut OsRng, machines, &[]) } async_sequential! { async fn test_scanner() { // Test Scanners are creatable for even keys. for _ in 0 .. 128 { let key = ProjectivePoint::random(&mut OsRng); assert_eq!(Scanner::new(key).is_some(), is_even(key)); } let mut key = ProjectivePoint::random(&mut OsRng); while !is_even(key) { key += ProjectivePoint::GENERATOR; } { let mut scanner = Scanner::new(key).unwrap(); for _ in 0 .. 128 { let mut offset = Scalar::random(&mut OsRng); let registered = scanner.register_offset(offset).unwrap(); // Registering this again should return None assert!(scanner.register_offset(offset).is_none()); // We can only register offsets resulting in even keys // Make this even while !is_even(key + (ProjectivePoint::GENERATOR * offset)) { offset += Scalar::ONE; } // Ensure it matches the registered offset assert_eq!(registered, offset); // Assert registering this again fails assert!(scanner.register_offset(offset).is_none()); } } let rpc = rpc().await; let mut scanner = Scanner::new(key).unwrap(); assert_eq!(send_and_get_output(&rpc, &scanner, key).await.offset(), Scalar::ZERO); // Register an offset and test receiving to it let offset = scanner.register_offset(Scalar::random(&mut OsRng)).unwrap(); assert_eq!( send_and_get_output(&rpc, &scanner, key + (ProjectivePoint::GENERATOR * offset)) .await .offset(), offset ); } async fn test_transaction_errors() { let (_, key) = keys(); let rpc = rpc().await; let scanner = Scanner::new(key).unwrap(); let output = send_and_get_output(&rpc, &scanner, key).await; assert_eq!(output.offset(), Scalar::ZERO); let inputs = vec![output]; let addr = || p2tr_script_buf(key).unwrap(); let payments = vec![(addr(), 1000)]; assert!(SignableTransaction::new(inputs.clone(), &payments, None, None, FEE).is_ok()); assert_eq!( SignableTransaction::new(vec![], &payments, None, None, FEE), Err(TransactionError::NoInputs) ); // No change assert!(SignableTransaction::new(inputs.clone(), &[(addr(), 1000)], None, None, FEE).is_ok()); // Consolidation TX assert!(SignableTransaction::new(inputs.clone(), &[], Some(addr()), None, FEE).is_ok()); // Data assert!(SignableTransaction::new(inputs.clone(), &[], None, Some(vec![]), FEE).is_ok()); // No outputs assert_eq!( SignableTransaction::new(inputs.clone(), &[], None, None, FEE), Err(TransactionError::NoOutputs), ); assert_eq!( SignableTransaction::new(inputs.clone(), &[(addr(), 1)], None, None, FEE), Err(TransactionError::DustPayment), ); assert!( SignableTransaction::new(inputs.clone(), &payments, None, Some(vec![0; 80]), FEE).is_ok() ); assert_eq!( SignableTransaction::new(inputs.clone(), &payments, None, Some(vec![0; 81]), FEE), Err(TransactionError::TooMuchData), ); assert_eq!( SignableTransaction::new(inputs.clone(), &[], Some(addr()), None, 0), Err(TransactionError::TooLowFee), ); assert!(matches!( SignableTransaction::new(inputs.clone(), &[(addr(), inputs[0].value() * 2)], None, None, FEE), Err(TransactionError::NotEnoughFunds { .. }), )); assert_eq!( SignableTransaction::new(inputs, &vec![(addr(), 1000); 10000], None, None, FEE), Err(TransactionError::TooLargeTransaction), ); } async fn test_send() { let (keys, key) = keys(); let rpc = rpc().await; let mut scanner = Scanner::new(key).unwrap(); // Get inputs, one not offset and one offset let output = send_and_get_output(&rpc, &scanner, key).await; assert_eq!(output.offset(), Scalar::ZERO); let offset = scanner.register_offset(Scalar::random(&mut OsRng)).unwrap(); let offset_key = key + (ProjectivePoint::GENERATOR * offset); let offset_output = send_and_get_output(&rpc, &scanner, offset_key).await; assert_eq!(offset_output.offset(), offset); // Declare payments, change, fee let payments = [ (p2tr_script_buf(key).unwrap(), 1005), (p2tr_script_buf(offset_key).unwrap(), 1007) ]; let change_offset = scanner.register_offset(Scalar::random(&mut OsRng)).unwrap(); let change_key = key + (ProjectivePoint::GENERATOR * change_offset); let change_addr = p2tr_script_buf(change_key).unwrap(); // Create and sign the TX let tx = SignableTransaction::new( vec![output.clone(), offset_output.clone()], &payments, Some(change_addr.clone()), None, FEE ).unwrap(); let needed_fee = tx.needed_fee(); let expected_id = tx.txid(); let tx = sign(&keys, &tx); assert_eq!(tx.output.len(), 3); // Ensure we can scan it let outputs = scanner.scan_transaction(&tx); for (o, output) in outputs.iter().enumerate() { assert_eq!(output.outpoint(), &OutPoint::new(tx.compute_txid(), u32::try_from(o).unwrap())); assert_eq!(&ReceivedOutput::read::<&[u8]>(&mut output.serialize().as_ref()).unwrap(), output); } assert_eq!(outputs[0].offset(), Scalar::ZERO); assert_eq!(outputs[1].offset(), offset); assert_eq!(outputs[2].offset(), change_offset); // Make sure the payments were properly created for ((output, scanned), payment) in tx.output.iter().zip(outputs.iter()).zip(payments.iter()) { assert_eq!( output, &TxOut { script_pubkey: payment.0.clone(), value: Amount::from_sat(payment.1) }, ); assert_eq!(scanned.value(), payment.1 ); } // Make sure the change is correct assert_eq!(needed_fee, u64::try_from(tx.vsize()).unwrap() * FEE); let input_value = output.value() + offset_output.value(); let output_value = tx.output.iter().map(|output| output.value.to_sat()).sum::(); assert_eq!(input_value - output_value, needed_fee); let change_amount = input_value - payments.iter().map(|payment| payment.1).sum::() - needed_fee; assert_eq!( tx.output[2], TxOut { script_pubkey: change_addr, value: Amount::from_sat(change_amount) }, ); // This also tests send_raw_transaction and get_transaction, which the RPC test can't // effectively test rpc.send_raw_transaction(&tx).await.unwrap(); let mut hash = *tx.compute_txid().as_raw_hash().as_byte_array(); hash.reverse(); assert_eq!(tx, rpc.get_transaction(&hash).await.unwrap()); assert_eq!(expected_id, hash); } async fn test_data() { let (keys, key) = keys(); let rpc = rpc().await; let scanner = Scanner::new(key).unwrap(); let output = send_and_get_output(&rpc, &scanner, key).await; assert_eq!(output.offset(), Scalar::ZERO); let data_len = 60 + usize::try_from(OsRng.next_u64() % 21).unwrap(); let mut data = vec![0; data_len]; OsRng.fill_bytes(&mut data); let tx = sign( &keys, &SignableTransaction::new( vec![output], &[], Some(p2tr_script_buf(key).unwrap()), Some(data.clone()), FEE ).unwrap() ); assert!(tx.output[0].script_pubkey.is_op_return()); let check = |mut instructions: Instructions| { assert_eq!(instructions.next().unwrap().unwrap(), Instruction::Op(OP_RETURN)); assert_eq!( instructions.next().unwrap().unwrap(), Instruction::PushBytes(&PushBytesBuf::try_from(data.clone()).unwrap()), ); assert!(instructions.next().is_none()); }; check(tx.output[0].script_pubkey.instructions()); check(tx.output[0].script_pubkey.instructions_minimal()); } } ================================================ FILE: networks/ethereum/.gitignore ================================================ # Solidity build outputs cache artifacts ================================================ FILE: networks/ethereum/Cargo.toml ================================================ [package] name = "ethereum-serai" version = "0.1.0" description = "An Ethereum library supporting Schnorr signing and on-chain verification" license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/networks/ethereum" authors = ["Luke Parker ", "Elizabeth Binks "] edition = "2021" publish = false rust-version = "1.79" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] thiserror = { version = "1", default-features = false } rand_core = { version = "0.6", default-features = false, features = ["std"] } transcript = { package = "flexible-transcript", path = "../../crypto/transcript", default-features = false, features = ["recommended"] } group = { version = "0.13", default-features = false } k256 = { version = "^0.13.1", default-features = false, features = ["std", "ecdsa", "arithmetic"] } frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false, features = ["secp256k1"] } alloy-core = { version = "0.8", default-features = false } alloy-sol-types = { version = "0.8", default-features = false, features = ["json"] } alloy-consensus = { version = "0.4", default-features = false, features = ["k256"] } alloy-network = { version = "0.4", default-features = false } alloy-rpc-types-eth = { version = "0.4", default-features = false } alloy-rpc-client = { version = "0.4", default-features = false } alloy-simple-request-transport = { path = "./alloy-simple-request-transport", default-features = false } alloy-provider = { version = "0.4", default-features = false } alloy-node-bindings = { version = "0.4", default-features = false, optional = true } [dev-dependencies] frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false, features = ["tests"] } tokio = { version = "1", features = ["macros"] } alloy-node-bindings = { version = "0.4", default-features = false } [features] tests = ["alloy-node-bindings", "frost/tests"] ================================================ FILE: networks/ethereum/LICENSE ================================================ AGPL-3.0-only license Copyright (c) 2022-2023 Luke Parker This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License Version 3 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see . ================================================ FILE: networks/ethereum/README.md ================================================ # Ethereum This package contains Ethereum-related functionality, specifically deploying and interacting with Serai contracts. While `bitcoin-serai` is a general purpose library, `ethereum-serai` is Serai specific. If any of the utilities are generally desired, please fork and maintain your own copy to ensure the desired functionality is preserved, or open an issue to request we make this library general purpose. ### Dependencies - solc - [Foundry](https://github.com/foundry-rs/foundry) ================================================ FILE: networks/ethereum/alloy-simple-request-transport/Cargo.toml ================================================ [package] name = "alloy-simple-request-transport" version = "0.1.0" description = "A transport for alloy based off simple-request" license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/networks/ethereum/alloy-simple-request-transport" authors = ["Luke Parker "] edition = "2021" rust-version = "1.74" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] tower = "0.5" serde_json = { version = "1", default-features = false } simple-request = { path = "../../../common/request", default-features = false } alloy-json-rpc = { version = "0.4", default-features = false } alloy-transport = { version = "0.4", default-features = false } [features] default = ["tls"] tls = ["simple-request/tls"] ================================================ FILE: networks/ethereum/alloy-simple-request-transport/LICENSE ================================================ MIT License Copyright (c) 2024 Luke Parker Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: networks/ethereum/alloy-simple-request-transport/README.md ================================================ # Alloy Simple Request Transport A transport for alloy based on simple-request, a small HTTP client built around hyper. ================================================ FILE: networks/ethereum/alloy-simple-request-transport/src/lib.rs ================================================ #![cfg_attr(docsrs, feature(doc_cfg))] #![doc = include_str!("../README.md")] use core::task; use std::io; use alloy_json_rpc::{RequestPacket, ResponsePacket}; use alloy_transport::{TransportError, TransportErrorKind, TransportFut}; use simple_request::{hyper, Request, Client}; use tower::Service; #[derive(Clone, Debug)] pub struct SimpleRequest { client: Client, url: String, } impl SimpleRequest { pub fn new(url: String) -> Self { Self { client: Client::with_connection_pool(), url } } } impl Service for SimpleRequest { type Response = ResponsePacket; type Error = TransportError; type Future = TransportFut<'static>; #[inline] fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> task::Poll> { task::Poll::Ready(Ok(())) } #[inline] fn call(&mut self, req: RequestPacket) -> Self::Future { let inner = self.clone(); Box::pin(async move { let packet = req.serialize().map_err(TransportError::SerError)?; let request = Request::from( hyper::Request::post(&inner.url) .header("Content-Type", "application/json") .body(serde_json::to_vec(&packet).map_err(TransportError::SerError)?.into()) .unwrap(), ); let mut res = inner .client .request(request) .await .map_err(|e| TransportErrorKind::custom(io::Error::other(format!("{e:?}"))))? .body() .await .map_err(|e| TransportErrorKind::custom(io::Error::other(format!("{e:?}"))))?; serde_json::from_reader(&mut res).map_err(|e| TransportError::deser_err(e, "")) }) } } ================================================ FILE: networks/ethereum/build.rs ================================================ use std::process::Command; fn main() { println!("cargo:rerun-if-changed=contracts/*"); println!("cargo:rerun-if-changed=artifacts/*"); for line in String::from_utf8(Command::new("solc").args(["--version"]).output().unwrap().stdout) .unwrap() .lines() { if let Some(version) = line.strip_prefix("Version: ") { let version = version.split('+').next().unwrap(); assert_eq!(version, "0.8.26"); } } #[rustfmt::skip] let args = [ "--base-path", ".", "-o", "./artifacts", "--overwrite", "--bin", "--abi", "--via-ir", "--optimize", "./contracts/IERC20.sol", "./contracts/Schnorr.sol", "./contracts/Deployer.sol", "./contracts/Sandbox.sol", "./contracts/Router.sol", "./src/tests/contracts/Schnorr.sol", "./src/tests/contracts/ERC20.sol", "--no-color", ]; let solc = Command::new("solc").args(args).output().unwrap(); assert!(solc.status.success()); for line in String::from_utf8(solc.stderr).unwrap().lines() { assert!(!line.starts_with("Error:")); } } ================================================ FILE: networks/ethereum/contracts/Deployer.sol ================================================ // SPDX-License-Identifier: AGPLv3 pragma solidity ^0.8.0; /* The expected deployment process of the Router is as follows: 1) A transaction deploying Deployer is made. Then, a deterministic signature is created such that an account with an unknown private key is the creator of the contract. Anyone can fund this address, and once anyone does, the transaction deploying Deployer can be published by anyone. No other transaction may be made from that account. 2) Anyone deploys the Router through the Deployer. This uses a sequential nonce such that meet-in-the-middle attacks, with complexity 2**80, aren't feasible. While such attacks would still be feasible if the Deployer's address was controllable, the usage of a deterministic signature with a NUMS method prevents that. This doesn't have any denial-of-service risks and will resolve once anyone steps forward as deployer. This does fail to guarantee an identical address across every chain, though it enables letting anyone efficiently ask the Deployer for the address (with the Deployer having an identical address on every chain). Unfortunately, guaranteeing identical addresses aren't feasible. We'd need the Deployer contract to use a consistent salt for the Router, yet the Router must be deployed with a specific public key for Serai. Since Ethereum isn't able to determine a valid public key (one the result of a Serai DKG) from a dishonest public key, we have to allow multiple deployments with Serai being the one to determine which to use. The alternative would be to have a council publish the Serai key on-Ethereum, with Serai verifying the published result. This would introduce a DoS risk in the council not publishing the correct key/not publishing any key. */ contract Deployer { event Deployment(bytes32 indexed init_code_hash, address created); error DeploymentFailed(); function deploy(bytes memory init_code) external { address created; assembly { created := create(0, add(init_code, 0x20), mload(init_code)) } if (created == address(0)) { revert DeploymentFailed(); } // These may be emitted out of order upon re-entrancy emit Deployment(keccak256(init_code), created); } } ================================================ FILE: networks/ethereum/contracts/IERC20.sol ================================================ // SPDX-License-Identifier: CC0 pragma solidity ^0.8.0; interface IERC20 { event Transfer(address indexed from, address indexed to, uint256 value); event Approval(address indexed owner, address indexed spender, uint256 value); function name() external view returns (string memory); function symbol() external view returns (string memory); function decimals() external view returns (uint8); function totalSupply() external view returns (uint256); function balanceOf(address owner) external view returns (uint256); function transfer(address to, uint256 value) external returns (bool); function transferFrom(address from, address to, uint256 value) external returns (bool); function approve(address spender, uint256 value) external returns (bool); function allowance(address owner, address spender) external view returns (uint256); } ================================================ FILE: networks/ethereum/contracts/Router.sol ================================================ // SPDX-License-Identifier: AGPLv3 pragma solidity ^0.8.0; import "./IERC20.sol"; import "./Schnorr.sol"; import "./Sandbox.sol"; contract Router { // Nonce is incremented for each batch of transactions executed/key update uint256 public nonce; // Current public key's x-coordinate // This key must always have the parity defined within the Schnorr contract bytes32 public seraiKey; struct OutInstruction { address to; Call[] calls; uint256 value; } struct Signature { bytes32 c; bytes32 s; } event SeraiKeyUpdated( uint256 indexed nonce, bytes32 indexed key, Signature signature ); event InInstruction( address indexed from, address indexed coin, uint256 amount, bytes instruction ); // success is a uint256 representing a bitfield of transaction successes event Executed( uint256 indexed nonce, bytes32 indexed batch, uint256 success, Signature signature ); // error types error InvalidKey(); error InvalidSignature(); error InvalidAmount(); error FailedTransfer(); error TooManyTransactions(); modifier _updateSeraiKeyAtEndOfFn( uint256 _nonce, bytes32 key, Signature memory sig ) { if ( (key == bytes32(0)) || ((bytes32(uint256(key) % Schnorr.Q)) != key) ) { revert InvalidKey(); } _; seraiKey = key; emit SeraiKeyUpdated(_nonce, key, sig); } constructor(bytes32 _seraiKey) _updateSeraiKeyAtEndOfFn( 0, _seraiKey, Signature({ c: bytes32(0), s: bytes32(0) }) ) { nonce = 1; } // updateSeraiKey validates the given Schnorr signature against the current // public key, and if successful, updates the contract's public key to the // given one. function updateSeraiKey( bytes32 _seraiKey, Signature calldata sig ) external _updateSeraiKeyAtEndOfFn(nonce, _seraiKey, sig) { bytes memory message = abi.encodePacked("updateSeraiKey", block.chainid, nonce, _seraiKey); nonce++; if (!Schnorr.verify(seraiKey, message, sig.c, sig.s)) { revert InvalidSignature(); } } function inInstruction( address coin, uint256 amount, bytes memory instruction ) external payable { if (coin == address(0)) { if (amount != msg.value) { revert InvalidAmount(); } } else { (bool success, bytes memory res) = address(coin).call( abi.encodeWithSelector( IERC20.transferFrom.selector, msg.sender, address(this), amount ) ); // Require there was nothing returned, which is done by some non-standard // tokens, or that the ERC20 contract did in fact return true bool nonStandardResOrTrue = (res.length == 0) || abi.decode(res, (bool)); if (!(success && nonStandardResOrTrue)) { revert FailedTransfer(); } } /* Due to fee-on-transfer tokens, emitting the amount directly is frowned upon. The amount instructed to transfer may not actually be the amount transferred. If we add nonReentrant to every single function which can effect the balance, we can check the amount exactly matches. This prevents transfers of less value than expected occurring, at least, not without an additional transfer to top up the difference (which isn't routed through this contract and accordingly isn't trying to artificially create events). If we don't add nonReentrant, a transfer can be started, and then a new transfer for the difference can follow it up (again and again until a rounding error is reached). This contract would believe all transfers were done in full, despite each only being done in part (except for the last one). Given fee-on-transfer tokens aren't intended to be supported, the only token planned to be supported is Dai and it doesn't have any fee-on-transfer logic, fee-on-transfer tokens aren't even able to be supported at this time, we simply classify this entire class of tokens as non-standard implementations which induce undefined behavior. It is the Serai network's role not to add support for any non-standard implementations. */ emit InInstruction(msg.sender, coin, amount, instruction); } // execute accepts a list of transactions to execute as well as a signature. // if signature verification passes, the given transactions are executed. // if signature verification fails, this function will revert. function execute( OutInstruction[] calldata transactions, Signature calldata sig ) external { if (transactions.length > 256) { revert TooManyTransactions(); } bytes memory message = abi.encode("execute", block.chainid, nonce, transactions); uint256 executed_with_nonce = nonce; // This prevents re-entrancy from causing double spends yet does allow // out-of-order execution via re-entrancy nonce++; if (!Schnorr.verify(seraiKey, message, sig.c, sig.s)) { revert InvalidSignature(); } uint256 successes; for (uint256 i = 0; i < transactions.length; i++) { bool success; // If there are no calls, send to `to` the value if (transactions[i].calls.length == 0) { (success, ) = transactions[i].to.call{ value: transactions[i].value, gas: 5_000 }(""); } else { // If there are calls, ignore `to`. Deploy a new Sandbox and proxy the // calls through that // // We could use a single sandbox in order to reduce gas costs, yet that // risks one person creating an approval that's hooked before another // user's intended action executes, in order to drain their coins // // While technically, that would be a flaw in the sandboxed flow, this // is robust and prevents such flaws from being possible // // We also don't want people to set state via the Sandbox and expect it // future available when anyone else could set a distinct value Sandbox sandbox = new Sandbox(); (success, ) = address(sandbox).call{ value: transactions[i].value, // TODO: Have the Call specify the gas up front gas: 350_000 }( abi.encodeWithSelector( Sandbox.sandbox.selector, transactions[i].calls ) ); } assembly { successes := or(successes, shl(i, success)) } } emit Executed( executed_with_nonce, keccak256(message), successes, sig ); } } ================================================ FILE: networks/ethereum/contracts/Sandbox.sol ================================================ // SPDX-License-Identifier: AGPLv3 pragma solidity ^0.8.24; struct Call { address to; uint256 value; bytes data; } // A minimal sandbox focused on gas efficiency. // // The first call is executed if any of the calls fail, making it a fallback. // All other calls are executed sequentially. contract Sandbox { error AlreadyCalled(); error CallsFailed(); function sandbox(Call[] calldata calls) external payable { // Prevent re-entrancy due to this executing arbitrary calls from anyone // and anywhere bool called; assembly { called := tload(0) } if (called) { revert AlreadyCalled(); } assembly { tstore(0, 1) } // Execute the calls, starting from 1 for (uint256 i = 1; i < calls.length; i++) { (bool success, ) = calls[i].to.call{ value: calls[i].value }(calls[i].data); // If this call failed, execute the fallback (call 0) if (!success) { (success, ) = calls[0].to.call{ value: address(this).balance }(calls[0].data); // If this call also failed, revert entirely if (!success) { revert CallsFailed(); } return; } } // We don't clear the re-entrancy guard as this contract should never be // called again, so there's no reason to spend the effort } } ================================================ FILE: networks/ethereum/contracts/Schnorr.sol ================================================ // SPDX-License-Identifier: AGPLv3 pragma solidity ^0.8.0; // see https://github.com/noot/schnorr-verify for implementation details library Schnorr { // secp256k1 group order uint256 constant public Q = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141; // Fixed parity for the public keys used in this contract // This avoids spending a word passing the parity in a similar style to // Bitcoin's Taproot uint8 constant public KEY_PARITY = 27; error InvalidSOrA(); error MalformedSignature(); // px := public key x-coord, where the public key has a parity of KEY_PARITY // message := 32-byte hash of the message // c := schnorr signature challenge // s := schnorr signature function verify( bytes32 px, bytes memory message, bytes32 c, bytes32 s ) internal pure returns (bool) { // ecrecover = (m, v, r, s) -> key // We instead pass the following to obtain the nonce (not the key) // Then we hash it and verify it matches the challenge bytes32 sa = bytes32(Q - mulmod(uint256(s), uint256(px), Q)); bytes32 ca = bytes32(Q - mulmod(uint256(c), uint256(px), Q)); // For safety, we want each input to ecrecover to be 0 (sa, px, ca) // The ecreover precomple checks `r` and `s` (`px` and `ca`) are non-zero // That leaves us to check `sa` are non-zero if (sa == 0) revert InvalidSOrA(); address R = ecrecover(sa, KEY_PARITY, px, ca); if (R == address(0)) revert MalformedSignature(); // Check the signature is correct by rebuilding the challenge return c == keccak256(abi.encodePacked(R, px, message)); } } ================================================ FILE: networks/ethereum/relayer/Cargo.toml ================================================ [package] name = "serai-ethereum-relayer" version = "0.1.0" description = "A relayer for Serai's Ethereum transactions" license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/networks/ethereum/relayer" authors = ["Luke Parker "] keywords = [] edition = "2021" publish = false [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] log = { version = "0.4", default-features = false, features = ["std"] } env_logger = { version = "0.10", default-features = false, features = ["humantime"] } tokio = { version = "1", default-features = false, features = ["rt", "time", "io-util", "net", "macros"] } serai-env = { path = "../../../common/env" } serai-db = { path = "../../../common/db" } [features] parity-db = ["serai-db/parity-db"] rocksdb = ["serai-db/rocksdb"] ================================================ FILE: networks/ethereum/relayer/LICENSE ================================================ AGPL-3.0-only license Copyright (c) 2023-2024 Luke Parker This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License Version 3 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see . ================================================ FILE: networks/ethereum/relayer/README.md ================================================ # Ethereum Transaction Relayer This server collects Ethereum router commands to be published, offering an RPC to fetch them. ================================================ FILE: networks/ethereum/relayer/src/main.rs ================================================ pub(crate) use tokio::{ io::{AsyncReadExt, AsyncWriteExt}, net::TcpListener, }; use serai_db::{Get, DbTxn, Db as DbTrait}; #[tokio::main(flavor = "current_thread")] async fn main() { // Override the panic handler with one which will panic if any tokio task panics { let existing = std::panic::take_hook(); std::panic::set_hook(Box::new(move |panic| { existing(panic); const MSG: &str = "exiting the process due to a task panicking"; println!("{MSG}"); log::error!("{MSG}"); std::process::exit(1); })); } if std::env::var("RUST_LOG").is_err() { std::env::set_var("RUST_LOG", serai_env::var("RUST_LOG").unwrap_or_else(|| "info".to_string())); } env_logger::init(); log::info!("Starting Ethereum relayer server..."); // Open the DB #[allow(unused_variables, unreachable_code)] let db = { #[cfg(all(feature = "parity-db", feature = "rocksdb"))] panic!("built with parity-db and rocksdb"); #[cfg(all(feature = "parity-db", not(feature = "rocksdb")))] let db = serai_db::new_parity_db(&serai_env::var("DB_PATH").expect("path to DB wasn't specified")); #[cfg(feature = "rocksdb")] let db = serai_db::new_rocksdb(&serai_env::var("DB_PATH").expect("path to DB wasn't specified")); db }; // Start command recipience server // This should not be publicly exposed // TODO: Add auth tokio::spawn({ let db = db.clone(); async move { // 5132 ^ ((b'E' << 8) | b'R') let server = TcpListener::bind("0.0.0.0:20830").await.unwrap(); loop { let (mut socket, _) = server.accept().await.unwrap(); let db = db.clone(); tokio::spawn(async move { let mut db = db.clone(); while let Ok(msg_len) = socket.read_u32_le().await { let mut buf = vec![0; usize::try_from(msg_len).unwrap()]; let Ok(_) = socket.read_exact(&mut buf).await else { break }; if buf.len() < 5 { break; } let nonce = u32::from_le_bytes(buf[.. 4].try_into().unwrap()); let mut txn = db.txn(); txn.put(nonce.to_le_bytes(), &buf[4 ..]); txn.commit(); let Ok(()) = socket.write_all(&[1]).await else { break }; log::info!("received signed command #{nonce}"); } }); } } }); // Start command fetch server // 5132 ^ ((b'E' << 8) | b'R') + 1 let server = TcpListener::bind("0.0.0.0:20831").await.unwrap(); loop { let (mut socket, _) = server.accept().await.unwrap(); let db = db.clone(); tokio::spawn(async move { let db = db.clone(); loop { // Nonce to get the router comamnd for let mut buf = vec![0; 4]; let Ok(_) = socket.read_exact(&mut buf).await else { break }; let command = db.get(&buf[.. 4]).unwrap_or(vec![]); let Ok(()) = socket.write_all(&u32::try_from(command.len()).unwrap().to_le_bytes()).await else { break; }; let Ok(()) = socket.write_all(&command).await else { break }; } }); } } ================================================ FILE: networks/ethereum/src/abi/mod.rs ================================================ use alloy_sol_types::sol; #[rustfmt::skip] #[allow(warnings)] #[allow(needless_pass_by_value)] #[allow(clippy::all)] #[allow(clippy::ignored_unit_patterns)] #[allow(clippy::redundant_closure_for_method_calls)] mod erc20_container { use super::*; sol!("contracts/IERC20.sol"); } pub use erc20_container::IERC20 as erc20; #[rustfmt::skip] #[allow(warnings)] #[allow(needless_pass_by_value)] #[allow(clippy::all)] #[allow(clippy::ignored_unit_patterns)] #[allow(clippy::redundant_closure_for_method_calls)] mod deployer_container { use super::*; sol!("contracts/Deployer.sol"); } pub use deployer_container::Deployer as deployer; #[rustfmt::skip] #[allow(warnings)] #[allow(needless_pass_by_value)] #[allow(clippy::all)] #[allow(clippy::ignored_unit_patterns)] #[allow(clippy::redundant_closure_for_method_calls)] mod router_container { use super::*; sol!(Router, "artifacts/Router.abi"); } pub use router_container::Router as router; ================================================ FILE: networks/ethereum/src/crypto.rs ================================================ #![allow(deprecated)] use group::ff::PrimeField; use k256::{ elliptic_curve::{ops::Reduce, point::AffineCoordinates, sec1::ToEncodedPoint}, ProjectivePoint, Scalar, U256 as KU256, }; #[cfg(test)] use k256::{elliptic_curve::point::DecompressPoint, AffinePoint}; use frost::{ algorithm::{Hram, SchnorrSignature}, curve::{Ciphersuite, Secp256k1}, }; use alloy_core::primitives::{Parity, Signature as AlloySignature}; use alloy_consensus::{SignableTransaction, Signed, TxLegacy}; use crate::abi::router::{Signature as AbiSignature}; pub(crate) fn keccak256(data: &[u8]) -> [u8; 32] { alloy_core::primitives::keccak256(data).into() } pub(crate) fn hash_to_scalar(data: &[u8]) -> Scalar { >::reduce_bytes(&keccak256(data).into()) } pub fn address(point: &ProjectivePoint) -> [u8; 20] { let encoded_point = point.to_encoded_point(false); // Last 20 bytes of the hash of the concatenated x and y coordinates // We obtain the concatenated x and y coordinates via the uncompressed encoding of the point keccak256(&encoded_point.as_ref()[1 .. 65])[12 ..].try_into().unwrap() } /// Deterministically sign a transaction. /// /// This function panics if passed a transaction with a non-None chain ID. pub fn deterministically_sign(tx: &TxLegacy) -> Signed { assert!( tx.chain_id.is_none(), "chain ID was Some when deterministically signing a TX (causing a non-deterministic signer)" ); let sig_hash = tx.signature_hash().0; let mut r = hash_to_scalar(&[sig_hash.as_slice(), b"r"].concat()); let mut s = hash_to_scalar(&[sig_hash.as_slice(), b"s"].concat()); loop { let r_bytes: [u8; 32] = r.to_repr().into(); let s_bytes: [u8; 32] = s.to_repr().into(); let v = Parity::NonEip155(false); let signature = AlloySignature::from_scalars_and_parity(r_bytes.into(), s_bytes.into(), v).unwrap(); let tx = tx.clone().into_signed(signature); if tx.recover_signer().is_ok() { return tx; } // Re-hash until valid r = hash_to_scalar(r_bytes.as_ref()); s = hash_to_scalar(s_bytes.as_ref()); } } /// The public key for a Schnorr-signing account. #[allow(non_snake_case)] #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub struct PublicKey { pub(crate) A: ProjectivePoint, pub(crate) px: Scalar, } impl PublicKey { /// Construct a new `PublicKey`. /// /// This will return None if the provided point isn't eligible to be a public key (due to /// bounds such as parity). #[allow(non_snake_case)] pub fn new(A: ProjectivePoint) -> Option { let affine = A.to_affine(); // Only allow even keys to save a word within Ethereum let is_odd = bool::from(affine.y_is_odd()); if is_odd { None?; } let x_coord = affine.x(); let x_coord_scalar = >::reduce_bytes(&x_coord); // Return None if a reduction would occur // Reductions would be incredibly unlikely and shouldn't be an issue, yet it's one less // headache/concern to have // This does ban a trivial amoount of public keys if x_coord_scalar.to_repr() != x_coord { None?; } Some(PublicKey { A, px: x_coord_scalar }) } pub fn point(&self) -> ProjectivePoint { self.A } pub(crate) fn eth_repr(&self) -> [u8; 32] { self.px.to_repr().into() } #[cfg(test)] pub(crate) fn from_eth_repr(repr: [u8; 32]) -> Option { #[allow(non_snake_case)] let A = Option::::from(AffinePoint::decompress(&repr.into(), 0.into()))?.into(); Option::from(Scalar::from_repr(repr.into())).map(|px| PublicKey { A, px }) } } /// The HRAm to use for the Schnorr contract. #[derive(Clone, Default)] pub struct EthereumHram {} impl Hram for EthereumHram { #[allow(non_snake_case)] fn hram(R: &ProjectivePoint, A: &ProjectivePoint, m: &[u8]) -> Scalar { let x_coord = A.to_affine().x(); let mut data = address(R).to_vec(); data.extend(x_coord.as_slice()); data.extend(m); >::reduce_bytes(&keccak256(&data).into()) } } /// A signature for the Schnorr contract. #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub struct Signature { pub(crate) c: Scalar, pub(crate) s: Scalar, } impl Signature { pub fn verify(&self, public_key: &PublicKey, message: &[u8]) -> bool { #[allow(non_snake_case)] let R = (Secp256k1::generator() * self.s) - (public_key.A * self.c); EthereumHram::hram(&R, &public_key.A, message) == self.c } /// Construct a new `Signature`. /// /// This will return None if the signature is invalid. pub fn new( public_key: &PublicKey, message: &[u8], signature: SchnorrSignature, ) -> Option { let c = EthereumHram::hram(&signature.R, &public_key.A, message); if !signature.verify(public_key.A, c) { None?; } let res = Signature { c, s: signature.s }; assert!(res.verify(public_key, message)); Some(res) } pub fn c(&self) -> Scalar { self.c } pub fn s(&self) -> Scalar { self.s } pub fn to_bytes(&self) -> [u8; 64] { let mut res = [0; 64]; res[.. 32].copy_from_slice(self.c.to_repr().as_ref()); res[32 ..].copy_from_slice(self.s.to_repr().as_ref()); res } pub fn from_bytes(bytes: [u8; 64]) -> std::io::Result { let mut reader = bytes.as_slice(); let c = Secp256k1::read_F(&mut reader)?; let s = Secp256k1::read_F(&mut reader)?; Ok(Signature { c, s }) } } impl From<&Signature> for AbiSignature { fn from(sig: &Signature) -> AbiSignature { let c: [u8; 32] = sig.c.to_repr().into(); let s: [u8; 32] = sig.s.to_repr().into(); AbiSignature { c: c.into(), s: s.into() } } } ================================================ FILE: networks/ethereum/src/deployer.rs ================================================ use std::sync::Arc; use alloy_core::primitives::{hex::FromHex, Address, B256, U256, Bytes, TxKind}; use alloy_consensus::{Signed, TxLegacy}; use alloy_sol_types::{SolCall, SolEvent}; use alloy_rpc_types_eth::{BlockNumberOrTag, Filter}; use alloy_simple_request_transport::SimpleRequest; use alloy_provider::{Provider, RootProvider}; use crate::{ Error, crypto::{self, keccak256, PublicKey}, router::Router, }; pub use crate::abi::deployer as abi; /// The Deployer contract for the Router contract. /// /// This Deployer has a deterministic address, letting it be immediately identified on any /// compatible chain. It then supports retrieving the Router contract's address (which isn't /// deterministic) using a single log query. #[derive(Clone, Debug)] pub struct Deployer; impl Deployer { /// Obtain the transaction to deploy this contract, already signed. /// /// The account this transaction is sent from (which is populated in `from`) must be sufficiently /// funded for this transaction to be submitted. This account has no known private key to anyone, /// so ETH sent can be neither misappropriated nor returned. pub fn deployment_tx() -> Signed { let bytecode = include_str!("../artifacts/Deployer.bin"); let bytecode = Bytes::from_hex(bytecode).expect("compiled-in Deployer bytecode wasn't valid hex"); let tx = TxLegacy { chain_id: None, nonce: 0, gas_price: 100_000_000_000u128, // TODO: Use a more accurate gas limit gas_limit: 1_000_000, to: TxKind::Create, value: U256::ZERO, input: bytecode, }; crypto::deterministically_sign(&tx) } /// Obtain the deterministic address for this contract. pub fn address() -> [u8; 20] { let deployer_deployer = Self::deployment_tx().recover_signer().expect("deployment_tx didn't have a valid signature"); **Address::create(&deployer_deployer, 0) } /// Construct a new view of the `Deployer`. pub async fn new(provider: Arc>) -> Result, Error> { let address = Self::address(); let code = provider.get_code_at(address.into()).await.map_err(|_| Error::ConnectionError)?; // Contract has yet to be deployed if code.is_empty() { return Ok(None); } Ok(Some(Self)) } /// Yield the `ContractCall` necessary to deploy the Router. pub fn deploy_router(&self, key: &PublicKey) -> TxLegacy { TxLegacy { to: TxKind::Call(Self::address().into()), input: abi::deployCall::new((Router::init_code(key).into(),)).abi_encode().into(), gas_limit: 1_000_000, ..Default::default() } } /// Find the first Router deployed with the specified key as its first key. /// /// This is the Router Serai will use, and is the only way to construct a `Router`. pub async fn find_router( &self, provider: Arc>, key: &PublicKey, ) -> Result, Error> { let init_code = Router::init_code(key); let init_code_hash = keccak256(&init_code); #[cfg(not(test))] let to_block = BlockNumberOrTag::Finalized; #[cfg(test)] let to_block = BlockNumberOrTag::Latest; // Find the first log using this init code (where the init code is binding to the key) // TODO: Make an abstraction for event filtering (de-duplicating common code) let filter = Filter::new().from_block(0).to_block(to_block).address(Address::from(Self::address())); let filter = filter.event_signature(abi::Deployment::SIGNATURE_HASH); let filter = filter.topic1(B256::from(init_code_hash)); let logs = provider.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; let Some(first_log) = logs.first() else { return Ok(None) }; let router = first_log .log_decode::() .map_err(|_| Error::ConnectionError)? .inner .data .created; Ok(Some(Router::new(provider, router))) } } ================================================ FILE: networks/ethereum/src/erc20.rs ================================================ use std::{sync::Arc, collections::HashSet}; use alloy_core::primitives::{Address, B256, U256}; use alloy_sol_types::{SolInterface, SolEvent}; use alloy_rpc_types_eth::Filter; use alloy_simple_request_transport::SimpleRequest; use alloy_provider::{Provider, RootProvider}; use crate::Error; pub use crate::abi::erc20 as abi; use abi::{IERC20Calls, Transfer, transferCall, transferFromCall}; #[derive(Clone, Debug)] pub struct TopLevelErc20Transfer { pub id: [u8; 32], pub from: [u8; 20], pub amount: U256, pub data: Vec, } /// A view for an ERC20 contract. #[derive(Clone, Debug)] pub struct Erc20(Arc>, Address); impl Erc20 { /// Construct a new view of the specified ERC20 contract. pub fn new(provider: Arc>, address: [u8; 20]) -> Self { Self(provider, Address::from(&address)) } pub async fn top_level_transfers( &self, block: u64, to: [u8; 20], ) -> Result, Error> { let filter = Filter::new().from_block(block).to_block(block).address(self.1); let filter = filter.event_signature(Transfer::SIGNATURE_HASH); let mut to_topic = [0; 32]; to_topic[12 ..].copy_from_slice(&to); let filter = filter.topic2(B256::from(to_topic)); let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; let mut handled = HashSet::new(); let mut top_level_transfers = vec![]; for log in logs { // Double check the address which emitted this log if log.address() != self.1 { Err(Error::ConnectionError)?; } let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?; let tx = self.0.get_transaction_by_hash(tx_id).await.ok().flatten().ok_or(Error::ConnectionError)?; // If this is a top-level call... if tx.to == Some(self.1) { // And we recognize the call... // Don't validate the encoding as this can't be re-encoded to an identical bytestring due // to the InInstruction appended if let Ok(call) = IERC20Calls::abi_decode(&tx.input, false) { // Extract the top-level call's from/to/value let (from, call_to, value) = match call { IERC20Calls::transfer(transferCall { to: call_to, value }) => (tx.from, call_to, value), IERC20Calls::transferFrom(transferFromCall { from, to: call_to, value }) => { (from, call_to, value) } // Treat any other function selectors as unrecognized _ => continue, }; let log = log.log_decode::().map_err(|_| Error::ConnectionError)?.inner.data; // Ensure the top-level transfer is equivalent, and this presumably isn't a log for an // internal transfer if (log.from != from) || (call_to != to) || (value != log.value) { continue; } // Now that the top-level transfer is confirmed to be equivalent to the log, ensure it's // the only log we handle if handled.contains(&tx_id) { continue; } handled.insert(tx_id); // Read the data appended after let encoded = call.abi_encode(); let data = tx.input.as_ref()[encoded.len() ..].to_vec(); // Push the transfer top_level_transfers.push(TopLevelErc20Transfer { // Since we'll only handle one log for this TX, set the ID to the TX ID id: *tx_id, from: *log.from.0, amount: log.value, data, }); } } } Ok(top_level_transfers) } } ================================================ FILE: networks/ethereum/src/lib.rs ================================================ use thiserror::Error; pub mod alloy { pub use alloy_core::primitives; pub use alloy_core as core; pub use alloy_sol_types as sol_types; pub use alloy_consensus as consensus; pub use alloy_network as network; pub use alloy_rpc_types_eth as rpc_types; pub use alloy_simple_request_transport as simple_request_transport; pub use alloy_rpc_client as rpc_client; pub use alloy_provider as provider; } pub mod crypto; pub(crate) mod abi; pub mod erc20; pub mod deployer; pub mod router; pub mod machine; #[cfg(any(test, feature = "tests"))] pub mod tests; #[derive(Clone, Copy, PartialEq, Eq, Debug, Error)] pub enum Error { #[error("failed to verify Schnorr signature")] InvalidSignature, #[error("couldn't make call/send TX")] ConnectionError, } ================================================ FILE: networks/ethereum/src/machine.rs ================================================ use std::{ io::{self, Read}, collections::HashMap, }; use rand_core::{RngCore, CryptoRng}; use transcript::{Transcript, RecommendedTranscript}; use group::GroupEncoding; use frost::{ curve::{Ciphersuite, Secp256k1}, Participant, ThresholdKeys, FrostError, algorithm::Schnorr, sign::*, }; use alloy_core::primitives::U256; use crate::{ crypto::{PublicKey, EthereumHram, Signature}, router::{ abi::{Call as AbiCall, OutInstruction as AbiOutInstruction}, Router, }, }; #[derive(Clone, PartialEq, Eq, Debug)] pub struct Call { pub to: [u8; 20], pub value: U256, pub data: Vec, } impl Call { pub fn read(reader: &mut R) -> io::Result { let mut to = [0; 20]; reader.read_exact(&mut to)?; let value = { let mut value_bytes = [0; 32]; reader.read_exact(&mut value_bytes)?; U256::from_le_slice(&value_bytes) }; let mut data_len = { let mut data_len = [0; 4]; reader.read_exact(&mut data_len)?; usize::try_from(u32::from_le_bytes(data_len)).expect("u32 couldn't fit within a usize") }; // A valid DoS would be to claim a 4 GB data is present for only 4 bytes // We read this in 1 KB chunks to only read data actually present (with a max DoS of 1 KB) let mut data = vec![]; while data_len > 0 { let chunk_len = data_len.min(1024); let mut chunk = vec![0; chunk_len]; reader.read_exact(&mut chunk)?; data.extend(&chunk); data_len -= chunk_len; } Ok(Call { to, value, data }) } fn write(&self, writer: &mut W) -> io::Result<()> { writer.write_all(&self.to)?; writer.write_all(&self.value.as_le_bytes())?; let data_len = u32::try_from(self.data.len()) .map_err(|_| io::Error::other("call data length exceeded 2**32"))?; writer.write_all(&data_len.to_le_bytes())?; writer.write_all(&self.data) } } impl From for AbiCall { fn from(call: Call) -> AbiCall { AbiCall { to: call.to.into(), value: call.value, data: call.data.into() } } } #[derive(Clone, PartialEq, Eq, Debug)] pub enum OutInstructionTarget { Direct([u8; 20]), Calls(Vec), } impl OutInstructionTarget { fn read(reader: &mut R) -> io::Result { let mut kind = [0xff]; reader.read_exact(&mut kind)?; match kind[0] { 0 => { let mut addr = [0; 20]; reader.read_exact(&mut addr)?; Ok(OutInstructionTarget::Direct(addr)) } 1 => { let mut calls_len = [0; 4]; reader.read_exact(&mut calls_len)?; let calls_len = u32::from_le_bytes(calls_len); let mut calls = vec![]; for _ in 0 .. calls_len { calls.push(Call::read(reader)?); } Ok(OutInstructionTarget::Calls(calls)) } _ => Err(io::Error::other("unrecognized OutInstructionTarget"))?, } } fn write(&self, writer: &mut W) -> io::Result<()> { match self { OutInstructionTarget::Direct(addr) => { writer.write_all(&[0])?; writer.write_all(addr)?; } OutInstructionTarget::Calls(calls) => { writer.write_all(&[1])?; let call_len = u32::try_from(calls.len()) .map_err(|_| io::Error::other("amount of calls exceeded 2**32"))?; writer.write_all(&call_len.to_le_bytes())?; for call in calls { call.write(writer)?; } } } Ok(()) } } #[derive(Clone, PartialEq, Eq, Debug)] pub struct OutInstruction { pub target: OutInstructionTarget, pub value: U256, } impl OutInstruction { fn read(reader: &mut R) -> io::Result { let target = OutInstructionTarget::read(reader)?; let value = { let mut value_bytes = [0; 32]; reader.read_exact(&mut value_bytes)?; U256::from_le_slice(&value_bytes) }; Ok(OutInstruction { target, value }) } fn write(&self, writer: &mut W) -> io::Result<()> { self.target.write(writer)?; writer.write_all(&self.value.as_le_bytes()) } } impl From for AbiOutInstruction { fn from(instruction: OutInstruction) -> AbiOutInstruction { match instruction.target { OutInstructionTarget::Direct(addr) => { AbiOutInstruction { to: addr.into(), calls: vec![], value: instruction.value } } OutInstructionTarget::Calls(calls) => AbiOutInstruction { to: [0; 20].into(), calls: calls.into_iter().map(Into::into).collect(), value: instruction.value, }, } } } #[derive(Clone, PartialEq, Eq, Debug)] pub enum RouterCommand { UpdateSeraiKey { chain_id: U256, nonce: U256, key: PublicKey }, Execute { chain_id: U256, nonce: U256, outs: Vec }, } impl RouterCommand { pub fn msg(&self) -> Vec { match self { RouterCommand::UpdateSeraiKey { chain_id, nonce, key } => { Router::update_serai_key_message(*chain_id, *nonce, key) } RouterCommand::Execute { chain_id, nonce, outs } => Router::execute_message( *chain_id, *nonce, outs.iter().map(|out| out.clone().into()).collect(), ), } } pub fn read(reader: &mut R) -> io::Result { let mut kind = [0xff]; reader.read_exact(&mut kind)?; match kind[0] { 0 => { let mut chain_id = [0; 32]; reader.read_exact(&mut chain_id)?; let mut nonce = [0; 32]; reader.read_exact(&mut nonce)?; let key = PublicKey::new(Secp256k1::read_G(reader)?) .ok_or(io::Error::other("key for RouterCommand doesn't have an eth representation"))?; Ok(RouterCommand::UpdateSeraiKey { chain_id: U256::from_le_slice(&chain_id), nonce: U256::from_le_slice(&nonce), key, }) } 1 => { let mut chain_id = [0; 32]; reader.read_exact(&mut chain_id)?; let chain_id = U256::from_le_slice(&chain_id); let mut nonce = [0; 32]; reader.read_exact(&mut nonce)?; let nonce = U256::from_le_slice(&nonce); let mut outs_len = [0; 4]; reader.read_exact(&mut outs_len)?; let outs_len = u32::from_le_bytes(outs_len); let mut outs = vec![]; for _ in 0 .. outs_len { outs.push(OutInstruction::read(reader)?); } Ok(RouterCommand::Execute { chain_id, nonce, outs }) } _ => Err(io::Error::other("reading unknown type of RouterCommand"))?, } } pub fn write(&self, writer: &mut W) -> io::Result<()> { match self { RouterCommand::UpdateSeraiKey { chain_id, nonce, key } => { writer.write_all(&[0])?; writer.write_all(&chain_id.as_le_bytes())?; writer.write_all(&nonce.as_le_bytes())?; writer.write_all(&key.A.to_bytes()) } RouterCommand::Execute { chain_id, nonce, outs } => { writer.write_all(&[1])?; writer.write_all(&chain_id.as_le_bytes())?; writer.write_all(&nonce.as_le_bytes())?; writer.write_all(&u32::try_from(outs.len()).unwrap().to_le_bytes())?; for out in outs { out.write(writer)?; } Ok(()) } } } pub fn serialize(&self) -> Vec { let mut res = vec![]; self.write(&mut res).unwrap(); res } } #[derive(Clone, PartialEq, Eq, Debug)] pub struct SignedRouterCommand { command: RouterCommand, signature: Signature, } impl SignedRouterCommand { pub fn new(key: &PublicKey, command: RouterCommand, signature: &[u8; 64]) -> Option { let c = Secp256k1::read_F(&mut &signature[.. 32]).ok()?; let s = Secp256k1::read_F(&mut &signature[32 ..]).ok()?; let signature = Signature { c, s }; if !signature.verify(key, &command.msg()) { None? } Some(SignedRouterCommand { command, signature }) } pub fn command(&self) -> &RouterCommand { &self.command } pub fn signature(&self) -> &Signature { &self.signature } pub fn read(reader: &mut R) -> io::Result { let command = RouterCommand::read(reader)?; let mut sig = [0; 64]; reader.read_exact(&mut sig)?; let signature = Signature::from_bytes(sig)?; Ok(SignedRouterCommand { command, signature }) } pub fn write(&self, writer: &mut W) -> io::Result<()> { self.command.write(writer)?; writer.write_all(&self.signature.to_bytes()) } } pub struct RouterCommandMachine { key: PublicKey, command: RouterCommand, machine: AlgorithmMachine>, } impl RouterCommandMachine { pub fn new(keys: ThresholdKeys, command: RouterCommand) -> Option { // The Schnorr algorithm should be fine without this, even when using the IETF variant // If this is better and more comprehensive, we should do it, even if not necessary let mut transcript = RecommendedTranscript::new(b"ethereum-serai RouterCommandMachine v0.1"); let key = keys.group_key(); transcript.append_message(b"key", key.to_bytes()); transcript.append_message(b"command", command.serialize()); Some(Self { key: PublicKey::new(key)?, command, machine: AlgorithmMachine::new(Schnorr::new(transcript), keys), }) } } impl PreprocessMachine for RouterCommandMachine { type Preprocess = Preprocess; type Signature = SignedRouterCommand; type SignMachine = RouterCommandSignMachine; fn preprocess( self, rng: &mut R, ) -> (Self::SignMachine, Self::Preprocess) { let (machine, preprocess) = self.machine.preprocess(rng); (RouterCommandSignMachine { key: self.key, command: self.command, machine }, preprocess) } } pub struct RouterCommandSignMachine { key: PublicKey, command: RouterCommand, machine: AlgorithmSignMachine>, } impl SignMachine for RouterCommandSignMachine { type Params = (); type Keys = ThresholdKeys; type Preprocess = Preprocess; type SignatureShare = SignatureShare; type SignatureMachine = RouterCommandSignatureMachine; fn cache(self) -> CachedPreprocess { unimplemented!( "RouterCommand machines don't support caching their preprocesses due to {}", "being already bound to a specific command" ); } fn from_cache( (): (), _: ThresholdKeys, _: CachedPreprocess, ) -> (Self, Self::Preprocess) { unimplemented!( "RouterCommand machines don't support caching their preprocesses due to {}", "being already bound to a specific command" ); } fn read_preprocess(&self, reader: &mut R) -> io::Result { self.machine.read_preprocess(reader) } fn sign( self, commitments: HashMap, msg: &[u8], ) -> Result<(RouterCommandSignatureMachine, Self::SignatureShare), FrostError> { if !msg.is_empty() { panic!("message was passed to a RouterCommand machine when it generates its own"); } let (machine, share) = self.machine.sign(commitments, &self.command.msg())?; Ok((RouterCommandSignatureMachine { key: self.key, command: self.command, machine }, share)) } } pub struct RouterCommandSignatureMachine { key: PublicKey, command: RouterCommand, machine: AlgorithmSignatureMachine>, } impl SignatureMachine for RouterCommandSignatureMachine { type SignatureShare = SignatureShare; fn read_share(&self, reader: &mut R) -> io::Result { self.machine.read_share(reader) } fn complete( self, shares: HashMap, ) -> Result { let sig = self.machine.complete(shares)?; let signature = Signature::new(&self.key, &self.command.msg(), sig) .expect("machine produced an invalid signature"); Ok(SignedRouterCommand { command: self.command, signature }) } } ================================================ FILE: networks/ethereum/src/router.rs ================================================ use std::{sync::Arc, io, collections::HashSet}; use k256::{ elliptic_curve::{group::GroupEncoding, sec1}, ProjectivePoint, }; use alloy_core::primitives::{hex::FromHex, Address, U256, Bytes, TxKind}; #[cfg(test)] use alloy_core::primitives::B256; use alloy_consensus::TxLegacy; use alloy_sol_types::{SolValue, SolConstructor, SolCall, SolEvent}; use alloy_rpc_types_eth::Filter; #[cfg(test)] use alloy_rpc_types_eth::{BlockId, TransactionRequest, TransactionInput}; use alloy_simple_request_transport::SimpleRequest; use alloy_provider::{Provider, RootProvider}; pub use crate::{ Error, crypto::{PublicKey, Signature}, abi::{erc20::Transfer, router as abi}, }; use abi::{SeraiKeyUpdated, InInstruction as InInstructionEvent, Executed as ExecutedEvent}; #[derive(Clone, PartialEq, Eq, Debug)] pub enum Coin { Ether, Erc20([u8; 20]), } impl Coin { pub fn read(reader: &mut R) -> io::Result { let mut kind = [0xff]; reader.read_exact(&mut kind)?; Ok(match kind[0] { 0 => Coin::Ether, 1 => { let mut address = [0; 20]; reader.read_exact(&mut address)?; Coin::Erc20(address) } _ => Err(io::Error::other("unrecognized Coin type"))?, }) } pub fn write(&self, writer: &mut W) -> io::Result<()> { match self { Coin::Ether => writer.write_all(&[0]), Coin::Erc20(token) => { writer.write_all(&[1])?; writer.write_all(token) } } } } #[derive(Clone, PartialEq, Eq, Debug)] pub struct InInstruction { pub id: ([u8; 32], u64), pub from: [u8; 20], pub coin: Coin, pub amount: U256, pub data: Vec, pub key_at_end_of_block: ProjectivePoint, } impl InInstruction { pub fn read(reader: &mut R) -> io::Result { let id = { let mut id_hash = [0; 32]; reader.read_exact(&mut id_hash)?; let mut id_pos = [0; 8]; reader.read_exact(&mut id_pos)?; let id_pos = u64::from_le_bytes(id_pos); (id_hash, id_pos) }; let mut from = [0; 20]; reader.read_exact(&mut from)?; let coin = Coin::read(reader)?; let mut amount = [0; 32]; reader.read_exact(&mut amount)?; let amount = U256::from_le_slice(&amount); let mut data_len = [0; 4]; reader.read_exact(&mut data_len)?; let data_len = usize::try_from(u32::from_le_bytes(data_len)) .map_err(|_| io::Error::other("InInstruction data exceeded 2**32 in length"))?; let mut data = vec![0; data_len]; reader.read_exact(&mut data)?; let mut key_at_end_of_block = ::Repr::default(); reader.read_exact(&mut key_at_end_of_block)?; let key_at_end_of_block = Option::from(ProjectivePoint::from_bytes(&key_at_end_of_block)) .ok_or(io::Error::other("InInstruction had key at end of block which wasn't valid"))?; Ok(InInstruction { id, from, coin, amount, data, key_at_end_of_block }) } pub fn write(&self, writer: &mut W) -> io::Result<()> { writer.write_all(&self.id.0)?; writer.write_all(&self.id.1.to_le_bytes())?; writer.write_all(&self.from)?; self.coin.write(writer)?; writer.write_all(&self.amount.as_le_bytes())?; writer.write_all( &u32::try_from(self.data.len()) .map_err(|_| { io::Error::other("InInstruction being written had data exceeding 2**32 in length") })? .to_le_bytes(), )?; writer.write_all(&self.data)?; writer.write_all(&self.key_at_end_of_block.to_bytes()) } } #[derive(Clone, PartialEq, Eq, Debug)] pub struct Executed { pub tx_id: [u8; 32], pub nonce: u64, pub signature: [u8; 64], } /// The contract Serai uses to manage its state. #[derive(Clone, Debug)] pub struct Router(Arc>, Address); impl Router { pub(crate) fn code() -> Vec { let bytecode = include_str!("../artifacts/Router.bin"); Bytes::from_hex(bytecode).expect("compiled-in Router bytecode wasn't valid hex").to_vec() } pub(crate) fn init_code(key: &PublicKey) -> Vec { let mut bytecode = Self::code(); // Append the constructor arguments bytecode.extend((abi::constructorCall { _seraiKey: key.eth_repr().into() }).abi_encode()); bytecode } // This isn't pub in order to force users to use `Deployer::find_router`. pub(crate) fn new(provider: Arc>, address: Address) -> Self { Self(provider, address) } pub fn address(&self) -> [u8; 20] { **self.1 } /// Get the key for Serai at the specified block. #[cfg(test)] pub async fn serai_key(&self, at: [u8; 32]) -> Result { let call = TransactionRequest::default() .to(self.1) .input(TransactionInput::new(abi::seraiKeyCall::new(()).abi_encode().into())); let bytes = self .0 .call(&call) .block(BlockId::Hash(B256::from(at).into())) .await .map_err(|_| Error::ConnectionError)?; let res = abi::seraiKeyCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?; PublicKey::from_eth_repr(res._0.0).ok_or(Error::ConnectionError) } /// Get the message to be signed in order to update the key for Serai. pub(crate) fn update_serai_key_message(chain_id: U256, nonce: U256, key: &PublicKey) -> Vec { let mut buffer = b"updateSeraiKey".to_vec(); buffer.extend(&chain_id.to_be_bytes::<32>()); buffer.extend(&nonce.to_be_bytes::<32>()); buffer.extend(&key.eth_repr()); buffer } /// Update the key representing Serai. pub fn update_serai_key(&self, public_key: &PublicKey, sig: &Signature) -> TxLegacy { // TODO: Set a more accurate gas TxLegacy { to: TxKind::Call(self.1), input: abi::updateSeraiKeyCall::new((public_key.eth_repr().into(), sig.into())) .abi_encode() .into(), gas_limit: 100_000, ..Default::default() } } /// Get the current nonce for the published batches. #[cfg(test)] pub async fn nonce(&self, at: [u8; 32]) -> Result { let call = TransactionRequest::default() .to(self.1) .input(TransactionInput::new(abi::nonceCall::new(()).abi_encode().into())); let bytes = self .0 .call(&call) .block(BlockId::Hash(B256::from(at).into())) .await .map_err(|_| Error::ConnectionError)?; let res = abi::nonceCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?; Ok(res._0) } /// Get the message to be signed in order to update the key for Serai. pub(crate) fn execute_message( chain_id: U256, nonce: U256, outs: Vec, ) -> Vec { ("execute".to_string(), chain_id, nonce, outs).abi_encode_params() } /// Execute a batch of `OutInstruction`s. pub fn execute(&self, outs: &[abi::OutInstruction], sig: &Signature) -> TxLegacy { TxLegacy { to: TxKind::Call(self.1), input: abi::executeCall::new((outs.to_vec(), sig.into())).abi_encode().into(), // TODO gas_limit: 100_000 + ((200_000 + 10_000) * u64::try_from(outs.len()).unwrap()), ..Default::default() } } pub async fn key_at_end_of_block(&self, block: u64) -> Result, Error> { let filter = Filter::new().from_block(0).to_block(block).address(self.1); let filter = filter.event_signature(SeraiKeyUpdated::SIGNATURE_HASH); let all_keys = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; if all_keys.is_empty() { return Ok(None); }; let last_key_x_coordinate_log = all_keys.last().ok_or(Error::ConnectionError)?; let last_key_x_coordinate = last_key_x_coordinate_log .log_decode::() .map_err(|_| Error::ConnectionError)? .inner .data .key; let mut compressed_point = ::Repr::default(); compressed_point[0] = u8::from(sec1::Tag::CompressedEvenY); compressed_point[1 ..].copy_from_slice(last_key_x_coordinate.as_slice()); let key = Option::from(ProjectivePoint::from_bytes(&compressed_point)).ok_or(Error::ConnectionError)?; Ok(Some(key)) } pub async fn in_instructions( &self, block: u64, allowed_tokens: &HashSet<[u8; 20]>, ) -> Result, Error> { let Some(key_at_end_of_block) = self.key_at_end_of_block(block).await? else { return Ok(vec![]); }; let filter = Filter::new().from_block(block).to_block(block).address(self.1); let filter = filter.event_signature(InInstructionEvent::SIGNATURE_HASH); let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; let mut transfer_check = HashSet::new(); let mut in_instructions = vec![]; for log in logs { // Double check the address which emitted this log if log.address() != self.1 { Err(Error::ConnectionError)?; } let id = ( log.block_hash.ok_or(Error::ConnectionError)?.into(), log.log_index.ok_or(Error::ConnectionError)?, ); let tx_hash = log.transaction_hash.ok_or(Error::ConnectionError)?; let tx = self .0 .get_transaction_by_hash(tx_hash) .await .ok() .flatten() .ok_or(Error::ConnectionError)?; let log = log.log_decode::().map_err(|_| Error::ConnectionError)?.inner.data; let coin = if log.coin.0 == [0; 20] { Coin::Ether } else { let token = *log.coin.0; if !allowed_tokens.contains(&token) { continue; } // If this also counts as a top-level transfer via the token, drop it // // Necessary in order to handle a potential edge case with some theoretical token // implementations // // This will either let it be handled by the top-level transfer hook or will drop it // entirely on the side of caution if tx.to == Some(token.into()) { continue; } // Get all logs for this TX let receipt = self .0 .get_transaction_receipt(tx_hash) .await .map_err(|_| Error::ConnectionError)? .ok_or(Error::ConnectionError)?; let tx_logs = receipt.inner.logs(); // Find a matching transfer log let mut found_transfer = false; for tx_log in tx_logs { let log_index = tx_log.log_index.ok_or(Error::ConnectionError)?; // Ensure we didn't already use this transfer to check a distinct InInstruction event if transfer_check.contains(&log_index) { continue; } // Check if this log is from the token we expected to be transferred if tx_log.address().0 != token { continue; } // Check if this is a transfer log // https://github.com/alloy-rs/core/issues/589 if tx_log.topics()[0] != Transfer::SIGNATURE_HASH { continue; } let Ok(transfer) = Transfer::decode_log(&tx_log.inner.clone(), true) else { continue }; // Check if this is a transfer to us for the expected amount if (transfer.to == self.1) && (transfer.value == log.amount) { transfer_check.insert(log_index); found_transfer = true; break; } } if !found_transfer { // This shouldn't be a ConnectionError // This is an exploit, a non-conforming ERC20, or an invalid connection // This should halt the process which is sufficient, yet this is sub-optimal // TODO Err(Error::ConnectionError)?; } Coin::Erc20(token) }; in_instructions.push(InInstruction { id, from: *log.from.0, coin, amount: log.amount, data: log.instruction.as_ref().to_vec(), key_at_end_of_block, }); } Ok(in_instructions) } pub async fn executed_commands(&self, block: u64) -> Result, Error> { let mut res = vec![]; { let filter = Filter::new().from_block(block).to_block(block).address(self.1); let filter = filter.event_signature(SeraiKeyUpdated::SIGNATURE_HASH); let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; for log in logs { // Double check the address which emitted this log if log.address() != self.1 { Err(Error::ConnectionError)?; } let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?.into(); let log = log.log_decode::().map_err(|_| Error::ConnectionError)?.inner.data; let mut signature = [0; 64]; signature[.. 32].copy_from_slice(log.signature.c.as_ref()); signature[32 ..].copy_from_slice(log.signature.s.as_ref()); res.push(Executed { tx_id, nonce: log.nonce.try_into().map_err(|_| Error::ConnectionError)?, signature, }); } } { let filter = Filter::new().from_block(block).to_block(block).address(self.1); let filter = filter.event_signature(ExecutedEvent::SIGNATURE_HASH); let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; for log in logs { // Double check the address which emitted this log if log.address() != self.1 { Err(Error::ConnectionError)?; } let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?.into(); let log = log.log_decode::().map_err(|_| Error::ConnectionError)?.inner.data; let mut signature = [0; 64]; signature[.. 32].copy_from_slice(log.signature.c.as_ref()); signature[32 ..].copy_from_slice(log.signature.s.as_ref()); res.push(Executed { tx_id, nonce: log.nonce.try_into().map_err(|_| Error::ConnectionError)?, signature, }); } } Ok(res) } #[cfg(feature = "tests")] pub fn key_updated_filter(&self) -> Filter { Filter::new().address(self.1).event_signature(SeraiKeyUpdated::SIGNATURE_HASH) } #[cfg(feature = "tests")] pub fn executed_filter(&self) -> Filter { Filter::new().address(self.1).event_signature(ExecutedEvent::SIGNATURE_HASH) } } ================================================ FILE: networks/ethereum/src/tests/abi/mod.rs ================================================ use alloy_sol_types::sol; #[rustfmt::skip] #[allow(warnings)] #[allow(needless_pass_by_value)] #[allow(clippy::all)] #[allow(clippy::ignored_unit_patterns)] #[allow(clippy::redundant_closure_for_method_calls)] mod schnorr_container { use super::*; sol!("src/tests/contracts/Schnorr.sol"); } pub(crate) use schnorr_container::TestSchnorr as schnorr; ================================================ FILE: networks/ethereum/src/tests/contracts/ERC20.sol ================================================ // SPDX-License-Identifier: AGPLv3 pragma solidity ^0.8.0; contract TestERC20 { event Transfer(address indexed from, address indexed to, uint256 value); event Approval(address indexed owner, address indexed spender, uint256 value); function name() public pure returns (string memory) { return "Test ERC20"; } function symbol() public pure returns (string memory) { return "TEST"; } function decimals() public pure returns (uint8) { return 18; } function totalSupply() public pure returns (uint256) { return 1_000_000 * 10e18; } mapping(address => uint256) balances; mapping(address => mapping(address => uint256)) allowances; constructor() { balances[msg.sender] = totalSupply(); } function balanceOf(address owner) public view returns (uint256) { return balances[owner]; } function transfer(address to, uint256 value) public returns (bool) { balances[msg.sender] -= value; balances[to] += value; return true; } function transferFrom(address from, address to, uint256 value) public returns (bool) { allowances[from][msg.sender] -= value; balances[from] -= value; balances[to] += value; return true; } function approve(address spender, uint256 value) public returns (bool) { allowances[msg.sender][spender] = value; return true; } function allowance(address owner, address spender) public view returns (uint256) { return allowances[owner][spender]; } } ================================================ FILE: networks/ethereum/src/tests/contracts/Schnorr.sol ================================================ // SPDX-License-Identifier: AGPLv3 pragma solidity ^0.8.0; import "../../../contracts/Schnorr.sol"; contract TestSchnorr { function verify( bytes32 px, bytes calldata message, bytes32 c, bytes32 s ) external pure returns (bool) { return Schnorr.verify(px, message, c, s); } } ================================================ FILE: networks/ethereum/src/tests/crypto.rs ================================================ #![allow(deprecated)] use rand_core::OsRng; use group::ff::{Field, PrimeField}; use k256::{ ecdsa::{ self, hazmat::SignPrimitive, signature::hazmat::PrehashVerifier, SigningKey, VerifyingKey, }, Scalar, ProjectivePoint, }; use frost::{ curve::{Ciphersuite, Secp256k1}, algorithm::{Hram, IetfSchnorr}, tests::{algorithm_machines, sign}, }; use crate::{crypto::*, tests::key_gen}; // The ecrecover opcode, yet with parity replacing v pub(crate) fn ecrecover(message: Scalar, odd_y: bool, r: Scalar, s: Scalar) -> Option<[u8; 20]> { let sig = ecdsa::Signature::from_scalars(r, s).ok()?; let message: [u8; 32] = message.to_repr().into(); alloy_core::primitives::Signature::from_signature_and_parity( sig, alloy_core::primitives::Parity::Parity(odd_y), ) .ok()? .recover_address_from_prehash(&alloy_core::primitives::B256::from(message)) .ok() .map(Into::into) } #[test] fn test_ecrecover() { let private = SigningKey::random(&mut OsRng); let public = VerifyingKey::from(&private); // Sign the signature const MESSAGE: &[u8] = b"Hello, World!"; let (sig, recovery_id) = private .as_nonzero_scalar() .try_sign_prehashed( ::F::random(&mut OsRng), &keccak256(MESSAGE).into(), ) .unwrap(); // Sanity check the signature verifies #[allow(clippy::unit_cmp)] // Intended to assert this wasn't changed to Result { assert_eq!(public.verify_prehash(&keccak256(MESSAGE), &sig).unwrap(), ()); } // Perform the ecrecover assert_eq!( ecrecover( hash_to_scalar(MESSAGE), u8::from(recovery_id.unwrap().is_y_odd()) == 1, *sig.r(), *sig.s() ) .unwrap(), address(&ProjectivePoint::from(public.as_affine())) ); } // Run the sign test with the EthereumHram #[test] fn test_signing() { let (keys, _) = key_gen(); const MESSAGE: &[u8] = b"Hello, World!"; let algo = IetfSchnorr::::ietf(); let _sig = sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE); } #[allow(non_snake_case)] pub fn preprocess_signature_for_ecrecover( R: ProjectivePoint, public_key: &PublicKey, m: &[u8], s: Scalar, ) -> (Scalar, Scalar) { let c = EthereumHram::hram(&R, &public_key.A, m); let sa = -(s * public_key.px); let ca = -(c * public_key.px); (sa, ca) } #[test] fn test_ecrecover_hack() { let (keys, public_key) = key_gen(); const MESSAGE: &[u8] = b"Hello, World!"; let algo = IetfSchnorr::::ietf(); let sig = sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE); let (sa, ca) = preprocess_signature_for_ecrecover(sig.R, &public_key, MESSAGE, sig.s); let q = ecrecover(sa, false, public_key.px, ca).unwrap(); assert_eq!(q, address(&sig.R)); } ================================================ FILE: networks/ethereum/src/tests/mod.rs ================================================ #![allow(deprecated)] use std::{sync::Arc, collections::HashMap}; use rand_core::OsRng; use k256::{Scalar, ProjectivePoint}; use frost::{curve::Secp256k1, Participant, ThresholdKeys, tests::key_gen as frost_key_gen}; use alloy_core::{ primitives::{Address, U256, Bytes, Signature, TxKind}, hex::FromHex, }; use alloy_consensus::{SignableTransaction, TxLegacy}; use alloy_rpc_types_eth::TransactionReceipt; use alloy_simple_request_transport::SimpleRequest; use alloy_provider::{Provider, RootProvider}; use crate::crypto::{address, deterministically_sign, PublicKey}; #[cfg(test)] mod crypto; #[cfg(test)] mod abi; #[cfg(test)] mod schnorr; #[cfg(test)] mod router; pub fn key_gen() -> (HashMap>, PublicKey) { let mut keys = frost_key_gen::<_, Secp256k1>(&mut OsRng); let mut group_key = keys[&Participant::new(1).unwrap()].group_key(); let mut offset = Scalar::ZERO; while PublicKey::new(group_key).is_none() { offset += Scalar::ONE; group_key += ProjectivePoint::GENERATOR; } for keys in keys.values_mut() { *keys = keys.clone().offset(offset); } let public_key = PublicKey::new(group_key).unwrap(); (keys, public_key) } // TODO: Use a proper error here pub async fn send( provider: &RootProvider, wallet: &k256::ecdsa::SigningKey, mut tx: TxLegacy, ) -> Option { let verifying_key = *wallet.verifying_key().as_affine(); let address = Address::from(address(&verifying_key.into())); // https://github.com/alloy-rs/alloy/issues/539 // let chain_id = provider.get_chain_id().await.unwrap(); // tx.chain_id = Some(chain_id); tx.chain_id = None; tx.nonce = provider.get_transaction_count(address).await.unwrap(); // 100 gwei tx.gas_price = 100_000_000_000u128; let sig = wallet.sign_prehash_recoverable(tx.signature_hash().as_ref()).unwrap(); assert_eq!(address, tx.clone().into_signed(sig.into()).recover_signer().unwrap()); assert!( provider.get_balance(address).await.unwrap() > ((U256::from(tx.gas_price) * U256::from(tx.gas_limit)) + tx.value) ); let mut bytes = vec![]; tx.encode_with_signature_fields(&Signature::from(sig), &mut bytes); let pending_tx = provider.send_raw_transaction(&bytes).await.ok()?; pending_tx.get_receipt().await.ok() } pub async fn fund_account( provider: &RootProvider, wallet: &k256::ecdsa::SigningKey, to_fund: Address, value: U256, ) -> Option<()> { let funding_tx = TxLegacy { to: TxKind::Call(to_fund), gas_limit: 21_000, value, ..Default::default() }; assert!(send(provider, wallet, funding_tx).await.unwrap().status()); Some(()) } // TODO: Use a proper error here pub async fn deploy_contract( client: Arc>, wallet: &k256::ecdsa::SigningKey, name: &str, ) -> Option

{ let hex_bin_buf = std::fs::read_to_string(format!("./artifacts/{name}.bin")).unwrap(); let hex_bin = if let Some(stripped) = hex_bin_buf.strip_prefix("0x") { stripped } else { &hex_bin_buf }; let bin = Bytes::from_hex(hex_bin).unwrap(); let deployment_tx = TxLegacy { chain_id: None, nonce: 0, // 100 gwei gas_price: 100_000_000_000u128, gas_limit: 1_000_000, to: TxKind::Create, value: U256::ZERO, input: bin, }; let deployment_tx = deterministically_sign(&deployment_tx); // Fund the deployer address fund_account( &client, wallet, deployment_tx.recover_signer().unwrap(), U256::from(deployment_tx.tx().gas_limit) * U256::from(deployment_tx.tx().gas_price), ) .await?; let (deployment_tx, sig, _) = deployment_tx.into_parts(); let mut bytes = vec![]; deployment_tx.encode_with_signature_fields(&sig, &mut bytes); let pending_tx = client.send_raw_transaction(&bytes).await.ok()?; let receipt = pending_tx.get_receipt().await.ok()?; assert!(receipt.status()); Some(receipt.contract_address.unwrap()) } ================================================ FILE: networks/ethereum/src/tests/router.rs ================================================ use std::{convert::TryFrom, sync::Arc, collections::HashMap}; use rand_core::OsRng; use group::Group; use k256::ProjectivePoint; use frost::{ curve::Secp256k1, Participant, ThresholdKeys, algorithm::IetfSchnorr, tests::{algorithm_machines, sign}, }; use alloy_core::primitives::{Address, U256}; use alloy_simple_request_transport::SimpleRequest; use alloy_rpc_types_eth::BlockTransactionsKind; use alloy_rpc_client::ClientBuilder; use alloy_provider::{Provider, RootProvider}; use alloy_node_bindings::{Anvil, AnvilInstance}; use crate::{ crypto::*, deployer::Deployer, router::{Router, abi as router}, tests::{key_gen, send, fund_account}, }; async fn setup_test() -> ( AnvilInstance, Arc>, u64, Router, HashMap>, PublicKey, ) { let anvil = Anvil::new().spawn(); let provider = RootProvider::new( ClientBuilder::default().transport(SimpleRequest::new(anvil.endpoint()), true), ); let chain_id = provider.get_chain_id().await.unwrap(); let wallet = anvil.keys()[0].clone().into(); let client = Arc::new(provider); // Make sure the Deployer constructor returns None, as it doesn't exist yet assert!(Deployer::new(client.clone()).await.unwrap().is_none()); // Deploy the Deployer let tx = Deployer::deployment_tx(); fund_account( &client, &wallet, tx.recover_signer().unwrap(), U256::from(tx.tx().gas_limit) * U256::from(tx.tx().gas_price), ) .await .unwrap(); let (tx, sig, _) = tx.into_parts(); let mut bytes = vec![]; tx.encode_with_signature_fields(&sig, &mut bytes); let pending_tx = client.send_raw_transaction(&bytes).await.unwrap(); let receipt = pending_tx.get_receipt().await.unwrap(); assert!(receipt.status()); let deployer = Deployer::new(client.clone()).await.expect("network error").expect("deployer wasn't deployed"); let (keys, public_key) = key_gen(); // Verify the Router constructor returns None, as it doesn't exist yet assert!(deployer.find_router(client.clone(), &public_key).await.unwrap().is_none()); // Deploy the router let receipt = send(&client, &anvil.keys()[0].clone().into(), deployer.deploy_router(&public_key)) .await .unwrap(); assert!(receipt.status()); let contract = deployer.find_router(client.clone(), &public_key).await.unwrap().unwrap(); (anvil, client, chain_id, contract, keys, public_key) } async fn latest_block_hash(client: &RootProvider) -> [u8; 32] { client .get_block(client.get_block_number().await.unwrap().into(), BlockTransactionsKind::Hashes) .await .unwrap() .unwrap() .header .hash .0 } #[tokio::test] async fn test_deploy_contract() { let (_anvil, client, _, router, _, public_key) = setup_test().await; let block_hash = latest_block_hash(&client).await; assert_eq!(router.serai_key(block_hash).await.unwrap(), public_key); assert_eq!(router.nonce(block_hash).await.unwrap(), U256::try_from(1u64).unwrap()); // TODO: Check it emitted SeraiKeyUpdated(public_key) at its genesis } pub fn hash_and_sign( keys: &HashMap>, public_key: &PublicKey, message: &[u8], ) -> Signature { let algo = IetfSchnorr::::ietf(); let sig = sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, keys), message); Signature::new(public_key, message, sig).unwrap() } #[tokio::test] async fn test_router_update_serai_key() { let (anvil, client, chain_id, contract, keys, public_key) = setup_test().await; let next_key = loop { let point = ProjectivePoint::random(&mut OsRng); let Some(next_key) = PublicKey::new(point) else { continue }; break next_key; }; let message = Router::update_serai_key_message( U256::try_from(chain_id).unwrap(), U256::try_from(1u64).unwrap(), &next_key, ); let sig = hash_and_sign(&keys, &public_key, &message); let first_block_hash = latest_block_hash(&client).await; assert_eq!(contract.serai_key(first_block_hash).await.unwrap(), public_key); let receipt = send(&client, &anvil.keys()[0].clone().into(), contract.update_serai_key(&next_key, &sig)) .await .unwrap(); assert!(receipt.status()); let second_block_hash = latest_block_hash(&client).await; assert_eq!(contract.serai_key(second_block_hash).await.unwrap(), next_key); // Check this does still offer the historical state assert_eq!(contract.serai_key(first_block_hash).await.unwrap(), public_key); // TODO: Check logs println!("gas used: {:?}", receipt.gas_used); // println!("logs: {:?}", receipt.logs); } #[tokio::test] async fn test_router_execute() { let (anvil, client, chain_id, contract, keys, public_key) = setup_test().await; let to = Address::from([0; 20]); let value = U256::ZERO; let tx = router::OutInstruction { to, value, calls: vec![] }; let txs = vec![tx]; let first_block_hash = latest_block_hash(&client).await; let nonce = contract.nonce(first_block_hash).await.unwrap(); assert_eq!(nonce, U256::try_from(1u64).unwrap()); let message = Router::execute_message(U256::try_from(chain_id).unwrap(), nonce, txs.clone()); let sig = hash_and_sign(&keys, &public_key, &message); let receipt = send(&client, &anvil.keys()[0].clone().into(), contract.execute(&txs, &sig)).await.unwrap(); assert!(receipt.status()); let second_block_hash = latest_block_hash(&client).await; assert_eq!(contract.nonce(second_block_hash).await.unwrap(), U256::try_from(2u64).unwrap()); // Check this does still offer the historical state assert_eq!(contract.nonce(first_block_hash).await.unwrap(), U256::try_from(1u64).unwrap()); // TODO: Check logs println!("gas used: {:?}", receipt.gas_used); // println!("logs: {:?}", receipt.logs); } ================================================ FILE: networks/ethereum/src/tests/schnorr.rs ================================================ use std::sync::Arc; use rand_core::OsRng; use group::ff::PrimeField; use k256::Scalar; use frost::{ curve::Secp256k1, algorithm::IetfSchnorr, tests::{algorithm_machines, sign}, }; use alloy_core::primitives::Address; use alloy_sol_types::SolCall; use alloy_rpc_types_eth::{TransactionInput, TransactionRequest}; use alloy_simple_request_transport::SimpleRequest; use alloy_rpc_client::ClientBuilder; use alloy_provider::{Provider, RootProvider}; use alloy_node_bindings::{Anvil, AnvilInstance}; use crate::{ Error, crypto::*, tests::{key_gen, deploy_contract, abi::schnorr as abi}, }; async fn setup_test() -> (AnvilInstance, Arc>, Address) { let anvil = Anvil::new().spawn(); let provider = RootProvider::new( ClientBuilder::default().transport(SimpleRequest::new(anvil.endpoint()), true), ); let wallet = anvil.keys()[0].clone().into(); let client = Arc::new(provider); let address = deploy_contract(client.clone(), &wallet, "TestSchnorr").await.unwrap(); (anvil, client, address) } #[tokio::test] async fn test_deploy_contract() { setup_test().await; } pub async fn call_verify( provider: &RootProvider, contract: Address, public_key: &PublicKey, message: &[u8], signature: &Signature, ) -> Result<(), Error> { let px: [u8; 32] = public_key.px.to_repr().into(); let c_bytes: [u8; 32] = signature.c.to_repr().into(); let s_bytes: [u8; 32] = signature.s.to_repr().into(); let call = TransactionRequest::default().to(contract).input(TransactionInput::new( abi::verifyCall::new((px.into(), message.to_vec().into(), c_bytes.into(), s_bytes.into())) .abi_encode() .into(), )); let bytes = provider.call(&call).await.map_err(|_| Error::ConnectionError)?; let res = abi::verifyCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?; if res._0 { Ok(()) } else { Err(Error::InvalidSignature) } } #[tokio::test] async fn test_ecrecover_hack() { let (_anvil, client, contract) = setup_test().await; let (keys, public_key) = key_gen(); const MESSAGE: &[u8] = b"Hello, World!"; let algo = IetfSchnorr::::ietf(); let sig = sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE); let sig = Signature::new(&public_key, MESSAGE, sig).unwrap(); call_verify(&client, contract, &public_key, MESSAGE, &sig).await.unwrap(); // Test an invalid signature fails let mut sig = sig; sig.s += Scalar::ONE; assert!(call_verify(&client, contract, &public_key, MESSAGE, &sig).await.is_err()); } ================================================ FILE: orchestration/Cargo.toml ================================================ [package] name = "serai-orchestrator" version = "0.0.1" description = "Generates Dockerfiles for Serai" license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/orchestration/" authors = ["Luke Parker "] keywords = [] edition = "2021" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] hex = { version = "0.4", default-features = false, features = ["std"] } zeroize = { version = "1", default-features = false, features = ["std"] } rand_core = { version = "0.6", default-features = false, features = ["std", "getrandom"] } rand_chacha = { version = "0.3", default-features = false, features = ["std"] } transcript = { package = "flexible-transcript", path = "../crypto/transcript", default-features = false, features = ["std", "recommended"] } dalek-ff-group = { path = "../crypto/dalek-ff-group", default-features = false, features = ["std"] } ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std"] } zalloc = { path = "../common/zalloc" } home = "0.5" ================================================ FILE: orchestration/README.md ================================================ # Orchestration This folder contains the tool which generates various dockerfiles and manage deployments of Serai. To start, run: ```sh cargo run -p serai-orchestrator ``` to generate all of the dockerfiles needed for development. ================================================ FILE: orchestration/dev/coordinator/.folder ================================================ ================================================ FILE: orchestration/dev/message-queue/.folder ================================================ ================================================ FILE: orchestration/dev/networks/bitcoin/run.sh ================================================ #!/bin/sh RPC_USER="${RPC_USER:=serai}" RPC_PASS="${RPC_PASS:=seraidex}" bitcoind -txindex -regtest --port=8333 \ -rpcuser=$RPC_USER -rpcpassword=$RPC_PASS \ -rpcbind=0.0.0.0 -rpcallowip=0.0.0.0/0 -rpcport=8332 \ $1 ================================================ FILE: orchestration/dev/networks/ethereum/run.sh ================================================ #!/bin/sh ~/.foundry/bin/anvil --host 0.0.0.0 --no-cors --no-mining --slots-in-an-epoch 32 --silent ================================================ FILE: orchestration/dev/networks/ethereum-relayer/.folder ================================================ #!/bin/sh RPC_USER="${RPC_USER:=serai}" RPC_PASS="${RPC_PASS:=seraidex}" # Run Monero monerod --non-interactive --regtest --offline --fixed-difficulty=1 \ --no-zmq --rpc-bind-ip=0.0.0.0 --rpc-bind-port=18081 --confirm-external-bind \ --rpc-access-control-origins "*" --disable-rpc-ban \ --rpc-login=$RPC_USER:$RPC_PASS \ $1 ================================================ FILE: orchestration/dev/networks/monero/hashes-v0.18.3.4.txt ================================================ -----BEGIN PGP SIGNED MESSAGE----- Hash: SHA256 # This GPG-signed message exists to confirm the SHA256 sums of Monero binaries. # # Please verify the signature against the key for binaryFate in the # source code repository (/utils/gpg_keys). # # ## CLI 15e4d7dfc2f9261a0a452b0f8fd157c33cdbc8a896e23d883ddd13e2480a3800 monero-android-armv7-v0.18.3.4.tar.bz2 d9c9249d1408822ce36b346c6b9fb6b896cda16714d62117fb1c588a5201763c monero-android-armv8-v0.18.3.4.tar.bz2 360a551388922c8991a9ba4abaa88676b0fc7ec1fa4d0f4b5c0500847e0b946c monero-freebsd-x64-v0.18.3.4.tar.bz2 354603c56446fb0551cdd6933bce5a13590b7881e05979b7ec25d89e7e59a0e2 monero-linux-armv7-v0.18.3.4.tar.bz2 33ca2f0055529d225b61314c56370e35606b40edad61c91c859f873ed67a1ea7 monero-linux-armv8-v0.18.3.4.tar.bz2 88739a1521b9fda3154540268e416c7af016ed7857041c76ab8ed7d7674c71ca monero-linux-riscv64-v0.18.3.4.tar.bz2 51ba03928d189c1c11b5379cab17dd9ae8d2230056dc05c872d0f8dba4a87f1d monero-linux-x64-v0.18.3.4.tar.bz2 d7ca0878abff2919a0104d7ed29d9c35df9ca0ea1b6fb4ebf6c8f7607ffb9e41 monero-linux-x86-v0.18.3.4.tar.bz2 44520cb3a05c2518ca9aeae1b2e3080fe2bba1e3596d014ceff1090dfcba8ab4 monero-mac-armv8-v0.18.3.4.tar.bz2 32c449f562216d3d83154e708471236d07db7477d6b67f1936a0a85a5005f2b8 monero-mac-x64-v0.18.3.4.tar.bz2 54a66db6c892b2a0999754841f4ca68511741b88ea3ab20c7cd504a027f465f5 monero-win-x64-v0.18.3.4.zip 1a9824742aa1587023c3bddea788c115940cfd49371c78a8dd62c40113132d01 monero-win-x86-v0.18.3.4.zip 7d4845ec0a3b52404d41785da348ec33509f0a5981e8a27c5fa55b18d696e139 monero-source-v0.18.3.4.tar.bz2 # ## GUI 63349d5a7637cd0c5d1693a1a2e910a92cbb123903d57667077a36454845d7bf monero-gui-install-win-x64-v0.18.3.4.exe 2866f3a2be30e4c4113e6274cad1d6698f81c37ceebc6e8f084c57230a0f70a6 monero-gui-linux-x64-v0.18.3.4.tar.bz2 eedbf827513607a3ef579077dacd573e65892b199102effef97dff9d73138ca6 monero-gui-mac-armv8-v0.18.3.4.dmg 54eb151d7511a9f26130864e2c02f258344803b2b68311c8be29850d7faef359 monero-gui-mac-x64-v0.18.3.4.dmg b5d42dddd722e728e480337f89038c8ea606c6507bf0c88ddf2af25050c9b751 monero-gui-win-x64-v0.18.3.4.zip 2f1d643bb2cc08e5eb334a6bfd649b0aa95ceb6178ff2f90448d5ef8d2a752a6 monero-gui-source-v0.18.3.4.tar.bz2 # # # ~binaryFate -----BEGIN PGP SIGNATURE----- iQIzBAEBCAAdFiEEgaxZH+nEtlxYBq/D8K9NRioL35IFAmbF8bAACgkQ8K9NRioL 35KQAQ/7BP9j0Tx+zlFs3zbVIFXzfoPbGo2/uerM4xUWX/NUoI7XDTGWV2lpcR1x o6eqstbuHciY0Aj2MsICsdqD+1PYW0EBZlfNLMrk161c3nQMJcjCE65uIhbLkOSs 6SUakmpxkueQOE/Ug5Afaa/JBATVTxLTmqSCI7Ai9NplF+6KNauXQXNrlwO/gHcd whYDmsqp2JyOtMpMlpOckzLgg7Oroj7B0LBf78Z13p1naUyPooBaIEXSdKm5g2HI vPd+z1bOVIluqPBnYWUwL7EmXy08/broejHGliQ+2iY9IsmDDx6rnSe/oprNEDic l+/w3KvPcTkBh8hJLVDyYieYdVYHqOktIPlR1dKV512CnuP1ljr/CXjJmkAkXHlg bObMUCIM9UYqp1I+KDaArjYNbzkHK02Lu6sak49GXgEuq66m9t4isF2GdcHrbERs cLGsnhkTO2LtnGcziOC2l9XSzL41swxe0GrkK0rdeiyDCGAlb7hllevFy7zlT90l Jw670TyFVBs8fUFHk/tOtT0ivSDJJg8m9waBzi/46ksOvuid6p3P3a0agqu3uclj rscSpk0JS3E/3+A/N0IaiTmUO5zSjbsCrSnxQjcfrRRtERL+6JVHFVlW+nJzYWWH u0O7bNZSqEruR4aTEtsddLgs57I10thDR5SUONuAqbEq8EYN8OE= =aLFR -----END PGP SIGNATURE----- ================================================ FILE: orchestration/dev/networks/monero/run.sh ================================================ #!/bin/sh RPC_USER="${RPC_USER:=serai}" RPC_PASS="${RPC_PASS:=seraidex}" # Run Monero monerod --non-interactive --regtest --offline --fixed-difficulty=1 \ --no-zmq --rpc-bind-ip=0.0.0.0 --rpc-bind-port=18081 --confirm-external-bind \ --rpc-access-control-origins "*" --disable-rpc-ban \ --rpc-login=$RPC_USER:$RPC_PASS --log-level 2 \ $1 ================================================ FILE: orchestration/dev/networks/monero-wallet-rpc/run.sh ================================================ #!/bin/sh monero-wallet-rpc \ --allow-mismatched-daemon-version \ --daemon-address serai-dev-monero:18081 --daemon-login serai:seraidex \ --disable-rpc-login --rpc-bind-ip=0.0.0.0 --rpc-bind-port 18082 --confirm-external-bind \ --wallet-dir /home/monero ================================================ FILE: orchestration/dev/processor/bitcoin/.folder ================================================ ================================================ FILE: orchestration/dev/processor/ethereum/.folder ================================================ ================================================ FILE: orchestration/dev/processor/monero/.folder ================================================ ================================================ FILE: orchestration/dev/serai/run.sh ================================================ #!/bin/sh serai-node --unsafe-rpc-external --rpc-cors all --chain local --$SERAI_NAME ================================================ FILE: orchestration/runtime/Dockerfile ================================================ # rust:1.89.0-slim-bookworm as of August 1st, 2025 (GMT) FROM --platform=linux/amd64 rust@sha256:703cfb0f80db8eb8a3452bf5151162472039c1b37fe4fb2957b495a6f0104ae7 AS deterministic # Move to a Debian package snapshot RUN rm -rf /etc/apt/sources.list.d/debian.sources && \ rm -rf /var/lib/apt/lists/* && \ echo "deb [arch=amd64] http://snapshot.debian.org/archive/debian/20250801T000000Z bookworm main" > /etc/apt/sources.list && \ apt update # Install dependencies RUN apt update -y && apt upgrade -y && apt install -y clang # Add the wasm toolchain RUN rustup target add wasm32v1-none FROM deterministic # Add files for build ADD patches /serai/patches ADD common /serai/common ADD crypto /serai/crypto ADD networks /serai/networks ADD message-queue /serai/message-queue ADD processor /serai/processor ADD coordinator /serai/coordinator ADD substrate /serai/substrate ADD orchestration/Cargo.toml /serai/orchestration/Cargo.toml ADD orchestration/src /serai/orchestration/src ADD mini /serai/mini ADD tests /serai/tests ADD Cargo.toml /serai ADD Cargo.lock /serai ADD AGPL-3.0 /serai WORKDIR /serai # Build the runtime, copying it to the volume if it exists CMD cargo build --release -p serai-runtime && \ mkdir -p /volume && \ cp /serai/target/release/wbuild/serai-runtime/serai_runtime.wasm /volume/serai.wasm ================================================ FILE: orchestration/src/coordinator.rs ================================================ use std::path::Path; use zeroize::Zeroizing; use dalek_ff_group::Ristretto; use ciphersuite::{group::ff::PrimeField, Ciphersuite}; use crate::{Network, Os, mimalloc, os, build_serai_service, write_dockerfile}; #[allow(clippy::needless_pass_by_value)] pub fn coordinator( orchestration_path: &Path, network: Network, coordinator_key: Zeroizing<::F>, serai_key: &Zeroizing<::F>, ) { let db = network.db(); let longer_reattempts = if network == Network::Dev { "longer-reattempts" } else { "" }; let setup = mimalloc(Os::Debian).to_string() + &build_serai_service( "", network.release(), &format!("{db} {longer_reattempts}"), "serai-coordinator", ); const ADDITIONAL_ROOT: &str = r#" # Install ca-certificates RUN apt install -y ca-certificates "#; #[rustfmt::skip] const DEFAULT_RUST_LOG: &str = "info,serai_coordinator=debug,tributary_chain=debug,tendermint=debug,libp2p_gossipsub::behaviour=error"; let env_vars = [ ("MESSAGE_QUEUE_RPC", format!("serai-{}-message-queue", network.label())), ("MESSAGE_QUEUE_KEY", hex::encode(coordinator_key.to_repr())), ("DB_PATH", "/volume/coordinator-db".to_string()), ("SERAI_KEY", hex::encode(serai_key.to_repr())), ("SERAI_HOSTNAME", format!("serai-{}-serai", network.label())), ("RUST_LOG", DEFAULT_RUST_LOG.to_string()), ]; let mut env_vars_str = String::new(); for (env_var, value) in env_vars { env_vars_str += &format!(r#"{env_var}=${{{env_var}:="{value}"}} "#); } let run_coordinator = format!( r#" # Copy the Coordinator binary and relevant license COPY --from=builder --chown=coordinator /serai/bin/serai-coordinator /bin/ COPY --from=builder --chown=coordinator /serai/AGPL-3.0 . # Run coordinator CMD {env_vars_str} serai-coordinator "# ); let run = os(Os::Debian, ADDITIONAL_ROOT, "coordinator") + &run_coordinator; let res = setup + &run; let mut coordinator_path = orchestration_path.to_path_buf(); coordinator_path.push("coordinator"); coordinator_path.push("Dockerfile"); write_dockerfile(coordinator_path, &res); } ================================================ FILE: orchestration/src/docker.rs ================================================ use std::{collections::HashSet, path::Path, env, process::Command}; use crate::Network; pub fn build(orchestration_path: &Path, network: Network, name: &str) { let mut repo_path = env::current_exe().unwrap(); repo_path.pop(); if repo_path.as_path().ends_with("deps") { repo_path.pop(); } assert!(repo_path.as_path().ends_with("debug") || repo_path.as_path().ends_with("release")); repo_path.pop(); assert!(repo_path.as_path().ends_with("target")); repo_path.pop(); let mut dockerfile_path = orchestration_path.to_path_buf(); if HashSet::from(["bitcoin", "ethereum", "monero", "monero-wallet-rpc"]).contains(name) { dockerfile_path = dockerfile_path.join("networks"); } if name.contains("-processor") { dockerfile_path = dockerfile_path.join("processor").join(name.split('-').next().unwrap()).join("Dockerfile"); } else { dockerfile_path = dockerfile_path.join(name).join("Dockerfile"); } println!("Building {}...", &name); if !Command::new("docker") .current_dir(&repo_path) .arg("build") .arg("-f") .arg(dockerfile_path) .arg(".") .arg("-t") .arg(format!("serai-{}-{name}-img", network.label())) .spawn() .unwrap() .wait() .unwrap() .success() { panic!("failed to build {name}"); } println!("Built!"); } ================================================ FILE: orchestration/src/ethereum_relayer.rs ================================================ use std::path::Path; use crate::{Network, Os, mimalloc, os, build_serai_service, write_dockerfile}; pub fn ethereum_relayer(orchestration_path: &Path, network: Network) { let setup = mimalloc(Os::Debian).to_string() + &build_serai_service("", network.release(), network.db(), "serai-ethereum-relayer"); let env_vars = [ ("DB_PATH", "/volume/ethereum-relayer-db".to_string()), ("RUST_LOG", "info,serai_ethereum_relayer=trace".to_string()), ]; let mut env_vars_str = String::new(); for (env_var, value) in env_vars { env_vars_str += &format!(r#"{env_var}=${{{env_var}:="{value}"}} "#); } let run_ethereum_relayer = format!( r#" # Copy the relayer server binary and relevant license COPY --from=builder --chown=ethereumrelayer /serai/bin/serai-ethereum-relayer /bin # Run ethereum-relayer EXPOSE 20830 EXPOSE 20831 CMD {env_vars_str} serai-ethereum-relayer "# ); let run = os(Os::Debian, "", "ethereumrelayer") + &run_ethereum_relayer; let res = setup + &run; let mut ethereum_relayer_path = orchestration_path.to_path_buf(); ethereum_relayer_path.push("networks"); ethereum_relayer_path.push("ethereum-relayer"); ethereum_relayer_path.push("Dockerfile"); write_dockerfile(ethereum_relayer_path, &res); } ================================================ FILE: orchestration/src/main.rs ================================================ // TODO: Generate randomized RPC credentials for all services // TODO: Generate keys for a validator and the infra use core::ops::Deref; use std::{ collections::{HashSet, HashMap}, env, path::PathBuf, io::Write, fs, process::{Stdio, Command}, }; use zeroize::Zeroizing; use rand_core::{RngCore, SeedableRng, OsRng}; use rand_chacha::ChaCha20Rng; use transcript::{Transcript, RecommendedTranscript}; use dalek_ff_group::Ristretto; use ciphersuite::{ group::{ ff::{Field, PrimeField}, GroupEncoding, }, Ciphersuite, }; mod mimalloc; use mimalloc::mimalloc; mod networks; use networks::*; mod ethereum_relayer; use ethereum_relayer::ethereum_relayer; mod message_queue; use message_queue::message_queue; mod processor; use processor::processor; mod coordinator; use coordinator::coordinator; mod serai; use serai::serai; mod docker; #[global_allocator] static ALLOCATOR: zalloc::ZeroizingAlloc = zalloc::ZeroizingAlloc(std::alloc::System); #[derive(Clone, Copy, PartialEq, Eq, Debug, PartialOrd, Ord, Hash)] pub enum Network { Dev, Testnet, } impl Network { pub fn db(&self) -> &'static str { match self { Network::Dev => "parity-db", Network::Testnet => "rocksdb", } } pub fn release(&self) -> bool { match self { Network::Dev => false, Network::Testnet => true, } } pub fn label(&self) -> &'static str { match self { Network::Dev => "dev", Network::Testnet => "testnet", } } } #[derive(Clone, Copy, PartialEq, Eq, Debug, PartialOrd, Ord, Hash)] enum Os { Alpine, Debian, } fn os(os: Os, additional_root: &str, user: &str) -> String { match os { Os::Alpine => format!( r#" FROM alpine:latest AS image COPY --from=mimalloc-alpine libmimalloc.so /usr/lib ENV LD_PRELOAD=libmimalloc.so RUN apk update && apk upgrade RUN adduser --system --shell /sbin/nologin --disabled-password {user} RUN addgroup {user} RUN addgroup {user} {user} # Make the /volume directory and transfer it to the user RUN mkdir /volume && chown {user}:{user} /volume {additional_root} # Switch to a non-root user USER {user} WORKDIR /home/{user} "# ), Os::Debian => format!( r#" FROM debian:trixie-slim AS image COPY --from=mimalloc-debian libmimalloc.so /usr/lib RUN echo "/usr/lib/libmimalloc.so" >> /etc/ld.so.preload RUN apt update && apt upgrade -y && apt autoremove -y && apt clean RUN useradd --system --user-group --create-home --shell /sbin/nologin {user} # Make the /volume directory and transfer it to the user RUN mkdir /volume && chown {user}:{user} /volume {additional_root} # Switch to a non-root user USER {user} WORKDIR /home/{user} "# ), } } fn build_serai_service(prelude: &str, release: bool, features: &str, package: &str) -> String { let profile = if release { "release" } else { "debug" }; let profile_flag = if release { "--release" } else { "" }; format!( r#" FROM rust:1.90-slim-trixie AS builder COPY --from=mimalloc-debian libmimalloc.so /usr/lib RUN echo "/usr/lib/libmimalloc.so" >> /etc/ld.so.preload RUN apt update && apt upgrade -y && apt autoremove -y && apt clean # Add dev dependencies RUN apt install -y pkg-config libclang-dev clang # Dependencies for the Serai node RUN apt install -y make protobuf-compiler # Add the wasm toolchain RUN rustup target add wasm32v1-none {prelude} # Add files for build ADD patches /serai/patches ADD common /serai/common ADD crypto /serai/crypto ADD networks /serai/networks ADD message-queue /serai/message-queue ADD processor /serai/processor ADD coordinator /serai/coordinator ADD substrate /serai/substrate ADD orchestration/Cargo.toml /serai/orchestration/Cargo.toml ADD orchestration/src /serai/orchestration/src ADD mini /serai/mini ADD tests /serai/tests ADD Cargo.toml /serai ADD Cargo.lock /serai ADD AGPL-3.0 /serai WORKDIR /serai # Mount the caches and build RUN --mount=type=cache,target=/root/.cargo \ --mount=type=cache,target=/usr/local/cargo/registry \ --mount=type=cache,target=/usr/local/cargo/git \ --mount=type=cache,target=/serai/target \ mkdir /serai/bin && \ cargo build {profile_flag} --features "{features}" -p {package} && \ mv /serai/target/{profile}/{package} /serai/bin "# ) } pub fn write_dockerfile(path: PathBuf, dockerfile: &str) { if let Ok(existing) = fs::read_to_string(&path).as_ref() { if existing == dockerfile { return; } } fs::File::create(path).unwrap().write_all(dockerfile.as_bytes()).unwrap(); } fn orchestration_path(network: Network) -> PathBuf { let mut repo_path = env::current_exe().unwrap(); repo_path.pop(); assert!(repo_path.as_path().ends_with("debug")); repo_path.pop(); assert!(repo_path.as_path().ends_with("target")); repo_path.pop(); let mut orchestration_path = repo_path.clone(); orchestration_path.push("orchestration"); orchestration_path.push(network.label()); orchestration_path } type InfrastructureKeys = HashMap<&'static str, (Zeroizing<::F>, ::G)>; fn infrastructure_keys(network: Network) -> InfrastructureKeys { // Generate entropy for the infrastructure keys let entropy = if network == Network::Dev { // Don't use actual entropy if this is a dev environment Zeroizing::new([0; 32]) } else { let path = home::home_dir() .unwrap() .join(".serai") .join(network.label()) .join("infrastructure_keys_entropy"); // Check if there's existing entropy if let Ok(entropy) = fs::read(&path).map(Zeroizing::new) { assert_eq!(entropy.len(), 32, "entropy saved to disk wasn't 32 bytes"); let mut res = Zeroizing::new([0; 32]); res.copy_from_slice(entropy.as_ref()); res } else { // If there isn't, generate fresh entropy let mut res = Zeroizing::new([0; 32]); OsRng.fill_bytes(res.as_mut()); fs::write(&path, &res).unwrap(); res } }; let mut transcript = RecommendedTranscript::new(b"Serai Orchestrator Infrastructure Keys Transcript"); transcript.append_message(b"network", network.label().as_bytes()); transcript.append_message(b"entropy", entropy); let mut rng = ChaCha20Rng::from_seed(transcript.rng_seed(b"infrastructure_keys")); let mut key_pair = || { let key = Zeroizing::new(::F::random(&mut rng)); let public = Ristretto::generator() * key.deref(); (key, public) }; HashMap::from([ ("coordinator", key_pair()), ("bitcoin", key_pair()), ("ethereum", key_pair()), ("monero", key_pair()), ]) } fn dockerfiles(network: Network) { let orchestration_path = orchestration_path(network); bitcoin(&orchestration_path, network); ethereum(&orchestration_path, network); monero(&orchestration_path, network); if network == Network::Dev { monero_wallet_rpc(&orchestration_path); } let mut infrastructure_keys = infrastructure_keys(network); let coordinator_key = infrastructure_keys.remove("coordinator").unwrap(); let bitcoin_key = infrastructure_keys.remove("bitcoin").unwrap(); let ethereum_key = infrastructure_keys.remove("ethereum").unwrap(); let monero_key = infrastructure_keys.remove("monero").unwrap(); ethereum_relayer(&orchestration_path, network); message_queue( &orchestration_path, network, coordinator_key.1, bitcoin_key.1, ethereum_key.1, monero_key.1, ); let new_entropy = || { let mut res = Zeroizing::new([0; 32]); OsRng.fill_bytes(res.as_mut()); res }; processor( &orchestration_path, network, "bitcoin", coordinator_key.1, bitcoin_key.0, new_entropy(), ); processor( &orchestration_path, network, "ethereum", coordinator_key.1, ethereum_key.0, new_entropy(), ); processor(&orchestration_path, network, "monero", coordinator_key.1, monero_key.0, new_entropy()); let serai_key = { let serai_key = Zeroizing::new( fs::read(home::home_dir().unwrap().join(".serai").join(network.label()).join("key")) .expect("couldn't read key for this network"), ); let mut serai_key_repr = Zeroizing::new(<::F as PrimeField>::Repr::default()); serai_key_repr.as_mut().copy_from_slice(serai_key.as_ref()); Zeroizing::new(::F::from_repr(*serai_key_repr).unwrap()) }; coordinator(&orchestration_path, network, coordinator_key.0, &serai_key); serai(&orchestration_path, network, &serai_key); } fn key_gen(network: Network) { let serai_dir = home::home_dir().unwrap().join(".serai").join(network.label()); let key_file = serai_dir.join("key"); if fs::File::open(&key_file).is_ok() { println!("already created key"); return; } let key = ::F::random(&mut OsRng); let _ = fs::create_dir_all(&serai_dir); fs::write(key_file, key.to_repr()).expect("couldn't write key"); println!( "Public Key: {}", hex::encode((::generator() * key).to_bytes()) ); } fn start(network: Network, services: HashSet) { // Create the serai network Command::new("docker") .arg("network") .arg("create") .arg("--driver") .arg("bridge") .arg("serai") .output() .unwrap(); for service in services { println!("Starting {service}"); let name = match service.as_ref() { "serai" => "serai", "coordinator" => "coordinator", "ethereum-relayer" => "ethereum-relayer", "message-queue" => "message-queue", "bitcoin-daemon" => "bitcoin", "bitcoin-processor" => "bitcoin-processor", "monero-daemon" => "monero", "monero-processor" => "monero-processor", "monero-wallet-rpc" => "monero-wallet-rpc", _ => panic!("starting unrecognized service"), }; // If we're building the Serai service, first build the runtime let serai_runtime_volume = format!("serai-{}-runtime-volume", network.label()); if name == "serai" { // Check if it's built by checking if the volume has the expected runtime file let wasm_build_container_name = format!("serai-{}-runtime", network.label()); let built = || { if let Ok(state_and_status) = Command::new("docker") .arg("inspect") .arg("-f") .arg("{{.State.Status}}:{{.State.ExitCode}}") .arg(&wasm_build_container_name) .output() { if let Ok(state_and_status) = String::from_utf8(state_and_status.stdout) { return state_and_status.trim() == "exited:0"; } } false }; if !built() { let mut repo_path = env::current_exe().unwrap(); repo_path.pop(); if repo_path.as_path().ends_with("deps") { repo_path.pop(); } assert!(repo_path.as_path().ends_with("debug") || repo_path.as_path().ends_with("release")); repo_path.pop(); assert!(repo_path.as_path().ends_with("target")); repo_path.pop(); // Build the image to build the runtime if !Command::new("docker") .current_dir(&repo_path) .arg("build") .arg("-f") .arg("orchestration/runtime/Dockerfile") .arg(".") .arg("-t") .arg(format!("serai-{}-runtime-img", network.label())) .spawn() .unwrap() .wait() .unwrap() .success() { panic!("failed to build runtime image"); } // Run the image, building the runtime println!("Building the Serai runtime"); let container_name = format!("serai-{}-runtime", network.label()); let _ = Command::new("docker").arg("rm").arg("-f").arg(&container_name).spawn().unwrap().wait(); let _ = Command::new("docker") .arg("run") .arg("--name") .arg(container_name) .arg("--volume") .arg(format!("{serai_runtime_volume}:/volume")) .arg(format!("serai-{}-runtime-img", network.label())) .spawn(); // Wait until its built let mut ticks = 0; while !built() { std::thread::sleep(core::time::Duration::from_secs(60)); ticks += 1; if ticks > 6 * 60 { panic!("couldn't build the runtime after 6 hours") } } } } // Build it println!("Building {service}"); docker::build(&orchestration_path(network), network, name); let docker_name = format!("serai-{}-{name}", network.label()); let docker_image = format!("{docker_name}-img"); if !Command::new("docker") .arg("container") .arg("inspect") .arg(&docker_name) // Use null for all IO to silence 'container does not exist' .stdin(Stdio::null()) .stdout(Stdio::null()) .stderr(Stdio::null()) .status() .unwrap() .success() { // Create the docker container println!("Creating new container for {service}"); let volume = format!("serai-{}-{name}-volume:/volume", network.label()); let mut command = Command::new("docker"); let command = command.arg("create").arg("--name").arg(&docker_name); let command = command.arg("--network").arg("serai"); let command = command.arg("--restart").arg("always"); let command = command.arg("--log-opt").arg("max-size=100m"); let command = command.arg("--log-opt").arg("max-file=3"); let command = if network == Network::Dev { command } else { // Assign a persistent volume if this isn't for Dev command.arg("--volume").arg(volume) }; let command = match name { "bitcoin" => { // Expose the RPC for tests if network == Network::Dev { command.arg("-p").arg("8332:8332") } else { command } } "ethereum-relayer" => { // Expose the router command fetch server command.arg("-p").arg("20831:20831") } "monero" => { // Expose the RPC for tests if network == Network::Dev { command.arg("-p").arg("18081:18081") } else { command } } "monero-wallet-rpc" => { assert_eq!(network, Network::Dev, "monero-wallet-rpc is only for dev"); // Expose the RPC for tests command.arg("-p").arg("18082:18082") } "coordinator" => { if network == Network::Dev { command } else { // Publish the port command.arg("-p").arg("30563:30563") } } "serai" => { let command = command.arg("--volume").arg(format!("{serai_runtime_volume}:/runtime")); if network == Network::Dev { command } else { // Publish the port command.arg("-p").arg("30333:30333") } } _ => command, }; assert!( command.arg(docker_image).status().unwrap().success(), "couldn't create the container" ); } // Start it // TODO: Check it successfully started println!("Starting existing container for {service}"); let _ = Command::new("docker").arg("start").arg(docker_name).output(); } } fn main() { let help = || -> ! { println!( r#" Serai Orchestrator v0.0.1 Commands: key_gen *network* Generate a key for the validator. setup *network* Generate the Dockerfiles for every Serai service. start *network* [service1, service2...] Start the specified services for the specified network ("dev" or "testnet"). - `serai` - `coordinator` - `message-queue` - `bitcoin-daemon` - `bitcoin-processor` - `ethereum-daemon` - `ethereum-processor` - `ethereum-relayer` - `monero-daemon` - `monero-processor` - `monero-wallet-rpc` (if "dev") are valid services. `*network*-processor` will automatically start `*network*-daemon`. "# ); std::process::exit(1); }; let mut args = env::args(); args.next(); let command = args.next(); let network = match args.next().as_ref().map(AsRef::as_ref) { Some("dev") => Network::Dev, Some("testnet") => Network::Testnet, Some(_) => panic!(r#"unrecognized network. only "dev" and "testnet" are recognized"#), None => help(), }; match command.as_ref().map(AsRef::as_ref) { Some("key_gen") => { key_gen(network); } Some("setup") => { dockerfiles(network); } Some("start") => { let mut services = HashSet::new(); for arg in args { if arg == "ethereum-processor" { services.insert("ethereum-relayer".to_string()); } if let Some(ext_network) = arg.strip_suffix("-processor") { services.insert(ext_network.to_string() + "-daemon"); } services.insert(arg); } start(network, services); } _ => help(), } } ================================================ FILE: orchestration/src/message_queue.rs ================================================ use std::path::Path; use dalek_ff_group::Ristretto; use ciphersuite::{group::GroupEncoding, Ciphersuite}; use crate::{Network, Os, mimalloc, os, build_serai_service, write_dockerfile}; pub fn message_queue( orchestration_path: &Path, network: Network, coordinator_key: ::G, bitcoin_key: ::G, ethereum_key: ::G, monero_key: ::G, ) { let setup = mimalloc(Os::Debian).to_string() + &build_serai_service("", network.release(), network.db(), "serai-message-queue"); let env_vars = [ ("COORDINATOR_KEY", hex::encode(coordinator_key.to_bytes())), ("BITCOIN_KEY", hex::encode(bitcoin_key.to_bytes())), ("ETHEREUM_KEY", hex::encode(ethereum_key.to_bytes())), ("MONERO_KEY", hex::encode(monero_key.to_bytes())), ("DB_PATH", "/volume/message-queue-db".to_string()), ("RUST_LOG", "info,serai_message_queue=trace".to_string()), ]; let mut env_vars_str = String::new(); for (env_var, value) in env_vars { env_vars_str += &format!(r#"{env_var}=${{{env_var}:="{value}"}} "#); } let run_message_queue = format!( r#" # Copy the Message Queue binary and relevant license COPY --from=builder --chown=messagequeue /serai/bin/serai-message-queue /bin COPY --from=builder --chown=messagequeue /serai/AGPL-3.0 . # Run message-queue EXPOSE 2287 CMD {env_vars_str} serai-message-queue "# ); let run = os(Os::Debian, "", "messagequeue") + &run_message_queue; let res = setup + &run; let mut message_queue_path = orchestration_path.to_path_buf(); message_queue_path.push("message-queue"); message_queue_path.push("Dockerfile"); write_dockerfile(message_queue_path, &res); } ================================================ FILE: orchestration/src/mimalloc.rs ================================================ use crate::Os; pub fn mimalloc(os: Os) -> &'static str { const ALPINE_MIMALLOC: &str = r#" FROM alpine:latest AS mimalloc-alpine RUN apk update && apk upgrade && apk --no-cache add gcc g++ libc-dev make cmake git RUN git clone https://github.com/microsoft/mimalloc && \ cd mimalloc && \ git checkout 43ce4bd7fd34bcc730c1c7471c99995597415488 && \ mkdir -p out/secure && \ cd out/secure && \ cmake -DMI_SECURE=ON ../.. && \ make && \ cp ./libmimalloc-secure.so ../../../libmimalloc.so "#; const DEBIAN_MIMALLOC: &str = r#" FROM debian:trixie-slim AS mimalloc-debian RUN apt update && apt upgrade -y && apt install -y gcc g++ make cmake git RUN git clone https://github.com/microsoft/mimalloc && \ cd mimalloc && \ git checkout 43ce4bd7fd34bcc730c1c7471c99995597415488 && \ mkdir -p out/secure && \ cd out/secure && \ cmake -DMI_SECURE=ON ../.. && \ make && \ cp ./libmimalloc-secure.so ../../../libmimalloc.so "#; match os { Os::Alpine => ALPINE_MIMALLOC, Os::Debian => DEBIAN_MIMALLOC, } } ================================================ FILE: orchestration/src/networks/bitcoin.rs ================================================ use std::path::Path; use crate::{Network, Os, mimalloc, os, write_dockerfile}; pub fn bitcoin(orchestration_path: &Path, network: Network) { #[rustfmt::skip] const DOWNLOAD_BITCOIN: &str = r#" FROM alpine:latest AS bitcoin ENV BITCOIN_VERSION=27.1 RUN apk --no-cache add wget git gnupg # Download Bitcoin RUN wget -4 https://bitcoincore.org/bin/bitcoin-core-${BITCOIN_VERSION}/bitcoin-${BITCOIN_VERSION}-$(uname -m)-linux-gnu.tar.gz RUN wget -4 https://bitcoincore.org/bin/bitcoin-core-${BITCOIN_VERSION}/SHA256SUMS RUN wget -4 https://bitcoincore.org/bin/bitcoin-core-${BITCOIN_VERSION}/SHA256SUMS.asc # Verify all sigs and check for a valid signature from laanwj -- 71A3 RUN git clone https://github.com/bitcoin-core/guix.sigs && \ cd guix.sigs/builder-keys && \ find . -iname '*.gpg' -exec gpg --import {} \; && \ gpg --verify --status-fd 1 --verify ../../SHA256SUMS.asc ../../SHA256SUMS | grep "^\[GNUPG:\] VALIDSIG.*71A3B16735405025D447E8F274810B012346C9A6" RUN grep bitcoin-${BITCOIN_VERSION}-$(uname -m)-linux-gnu.tar.gz SHA256SUMS | sha256sum -c # Prepare Image RUN tar xzvf bitcoin-${BITCOIN_VERSION}-$(uname -m)-linux-gnu.tar.gz RUN mv bitcoin-${BITCOIN_VERSION}/bin/bitcoind . "#; let setup = mimalloc(Os::Debian).to_string() + DOWNLOAD_BITCOIN; let run_bitcoin = format!( r#" COPY --from=bitcoin --chown=bitcoin bitcoind /bin EXPOSE 8332 8333 ADD /orchestration/{}/networks/bitcoin/run.sh / CMD ["/run.sh"] "#, network.label() ); let run = os(Os::Debian, "", "bitcoin") + &run_bitcoin; let res = setup + &run; let mut bitcoin_path = orchestration_path.to_path_buf(); bitcoin_path.push("networks"); bitcoin_path.push("bitcoin"); bitcoin_path.push("Dockerfile"); write_dockerfile(bitcoin_path, &res); } ================================================ FILE: orchestration/src/networks/ethereum/consensus/lighthouse.rs ================================================ use crate::Network; pub fn lighthouse(network: Network) -> (String, String, String) { assert_ne!(network, Network::Dev); #[rustfmt::skip] const DOWNLOAD_LIGHTHOUSE: &str = r#" FROM alpine:latest AS lighthouse ENV LIGHTHOUSE_VERSION=5.1.3 RUN apk --no-cache add wget git gnupg # Download lighthouse RUN wget -4 https://github.com/sigp/lighthouse/releases/download/v${LIGHTHOUSE_VERSION}/lighthouse-v${LIGHTHOUSE_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz RUN wget -4 https://github.com/sigp/lighthouse/releases/download/v${LIGHTHOUSE_VERSION}/lighthouse-v${LIGHTHOUSE_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz.asc # Verify the signature gpg --keyserver keyserver.ubuntu.com --recv-keys 15E66D941F697E28F49381F426416DC3F30674B0 gpg --verify lighthouse-v${LIGHTHOUSE_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz.asc lighthouse-v${LIGHTHOUSE_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz # Extract lighthouse RUN tar xvf lighthouse-v${LIGHTHOUSE_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz "#; let run_lighthouse = format!( r#" COPY --from=lighthouse --chown=ethereum lighthouse /bin ADD /orchestration/{}/networks/ethereum/consensus/lighthouse/run.sh /consensus_layer.sh "#, network.label() ); (DOWNLOAD_LIGHTHOUSE.to_string(), String::new(), run_lighthouse) } ================================================ FILE: orchestration/src/networks/ethereum/consensus/mod.rs ================================================ mod lighthouse; #[allow(unused)] pub use lighthouse::lighthouse; mod nimbus; pub use nimbus::nimbus; ================================================ FILE: orchestration/src/networks/ethereum/consensus/nimbus.rs ================================================ use crate::Network; pub fn nimbus(network: Network) -> (String, String, String) { assert_ne!(network, Network::Dev); let platform = match std::env::consts::ARCH { "x86_64" => "amd64", "arm" => "arm32v7", "aarch64" => "arm64v8", _ => panic!("unsupported platform"), }; #[rustfmt::skip] let checksum = match platform { "amd64" => "5da10222cfb555ce2e3820ece12e8e30318945e3ed4b2b88d295963c879daeee071623c47926f880f3db89ce537fd47c6b26fe37e47aafbae3222b58bcec2fba", "arm32v7" => "7055da77bfa1186ee2e7ce2a48b923d45ccb039592f529c58d93d55a62bca46566ada451bd7497c3ae691260544f0faf303602afd85ccc18388fdfdac0bb2b45", "arm64v8" => "1a68f44598462abfade0dbeb6adf10b52614ba03605a8bf487b99493deb41468317926ef2d657479fcc26fce640aeebdbd880956beec3fb110b5abc97bd83556", _ => panic!("unsupported platform"), }; #[rustfmt::skip] let download_nimbus = format!(r#" FROM alpine:latest AS nimbus ENV NIMBUS_VERSION=24.3.0 ENV NIMBUS_COMMIT=dc19b082 RUN apk --no-cache add wget # Download nimbus RUN wget -4 https://github.com/status-im/nimbus-eth2/releases/download/v${{NIMBUS_VERSION}}/nimbus-eth2_Linux_{platform}_${{NIMBUS_VERSION}}_${{NIMBUS_COMMIT}}.tar.gz # Extract nimbus RUN tar xvf nimbus-eth2_Linux_{platform}_${{NIMBUS_VERSION}}_${{NIMBUS_COMMIT}}.tar.gz RUN mv nimbus-eth2_Linux_{platform}_${{NIMBUS_VERSION}}_${{NIMBUS_COMMIT}}/build/nimbus_beacon_node ./nimbus # Verify the checksum RUN sha512sum nimbus | grep {checksum} "#); let run_nimbus = format!( r#" COPY --from=nimbus --chown=ethereum nimbus /bin ADD /orchestration/{}/networks/ethereum/consensus/nimbus/run.sh /consensus_layer.sh "#, network.label() ); (download_nimbus, String::new(), run_nimbus) } ================================================ FILE: orchestration/src/networks/ethereum/execution/anvil.rs ================================================ use crate::Network; pub fn anvil(network: Network) -> (String, String, String) { assert_eq!(network, Network::Dev); const ANVIL_SETUP: &str = r#" RUN curl -L https://foundry.paradigm.xyz | bash || exit 0 RUN ~/.foundry/bin/foundryup EXPOSE 8545 "#; (String::new(), "RUN apt install git curl -y".to_string(), ANVIL_SETUP.to_string()) } ================================================ FILE: orchestration/src/networks/ethereum/execution/mod.rs ================================================ mod reth; pub use reth::reth; mod anvil; pub use anvil::anvil; ================================================ FILE: orchestration/src/networks/ethereum/execution/reth.rs ================================================ use crate::Network; pub fn reth(network: Network) -> (String, String, String) { assert_ne!(network, Network::Dev); #[rustfmt::skip] const DOWNLOAD_RETH: &str = r#" FROM alpine:latest AS reth ENV RETH_VERSION=0.2.0-beta.6 RUN apk --no-cache add wget git gnupg # Download reth RUN wget -4 https://github.com/paradigmxyz/reth/releases/download/v${RETH_VERSION}/reth-v${RETH_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz RUN wget -4 https://github.com/paradigmxyz/reth/releases/download/v${RETH_VERSION}/reth-v${RETH_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz.asc # Verify the signature gpg --keyserver keyserver.ubuntu.com --recv-keys A3AE097C89093A124049DF1F5391A3C4100530B4 gpg --verify reth-v${RETH_VERSION}-$(uname -m).tar.gz.asc reth-v${RETH_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz # Extract reth RUN tar xvf reth-v${RETH_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz "#; let run_reth = format!( r#" COPY --from=reth --chown=ethereum reth /bin EXPOSE 30303 9001 8545 ADD /orchestration/{}/networks/ethereum/execution/reth/run.sh /execution_layer.sh "#, network.label() ); (DOWNLOAD_RETH.to_string(), String::new(), run_reth) } ================================================ FILE: orchestration/src/networks/ethereum/mod.rs ================================================ use std::path::Path; use crate::{Network, Os, mimalloc, os, write_dockerfile}; mod execution; use execution::*; mod consensus; use consensus::*; pub fn ethereum(orchestration_path: &Path, network: Network) { let ((el_download, el_run_as_root, el_run), (cl_download, cl_run_as_root, cl_run)) = if network == Network::Dev { (anvil(network), (String::new(), String::new(), String::new())) } else { // TODO: Select an EL/CL based off a RNG seeded from the public key (reth(network), nimbus(network)) }; let download = mimalloc(Os::Alpine).to_string() + &el_download + &cl_download; let run = format!( r#" ADD /orchestration/{}/networks/ethereum/run.sh /run.sh CMD ["/run.sh"] "#, network.label() ); let run = mimalloc(Os::Debian).to_string() + &os(Os::Debian, &(el_run_as_root + "\r\n" + &cl_run_as_root), "ethereum") + &el_run + &cl_run + &run; let res = download + &run; let mut ethereum_path = orchestration_path.to_path_buf(); ethereum_path.push("networks"); ethereum_path.push("ethereum"); ethereum_path.push("Dockerfile"); write_dockerfile(ethereum_path, &res); } ================================================ FILE: orchestration/src/networks/mod.rs ================================================ mod bitcoin; pub use bitcoin::*; mod ethereum; pub use ethereum::*; mod monero; pub use monero::*; ================================================ FILE: orchestration/src/networks/monero.rs ================================================ use std::path::Path; use crate::{Network, Os, mimalloc, write_dockerfile}; fn monero_internal( network: Network, os: Os, orchestration_path: &Path, folder: &str, monero_binary: &str, ports: &str, ) { const MONERO_VERSION: &str = "0.18.3.4"; let arch = match std::env::consts::ARCH { // We probably would run this without issues yet it's not worth needing to provide support for "x86" | "arm" => panic!("unsupported architecture, please download a 64-bit OS"), "x86_64" => "x64", "aarch64" => "armv8", _ => panic!("unsupported architecture"), }; #[rustfmt::skip] let download_monero = format!(r#" FROM alpine:latest AS monero RUN apk --no-cache add wget gnupg # Download Monero RUN wget -4 https://downloads.getmonero.org/cli/monero-linux-{arch}-v{MONERO_VERSION}.tar.bz2 # Verify Binary -- fingerprint from https://github.com/monero-project/monero-site/issues/1949 ADD orchestration/{}/networks/monero/hashes-v{MONERO_VERSION}.txt . RUN gpg --keyserver hkp://keyserver.ubuntu.com:80 --keyserver-options no-self-sigs-only --receive-keys 81AC591FE9C4B65C5806AFC3F0AF4D462A0BDF92 && \ gpg --verify hashes-v{MONERO_VERSION}.txt && \ grep "$(sha256sum monero-linux-{arch}-v{MONERO_VERSION}.tar.bz2 | cut -c 1-64)" hashes-v{MONERO_VERSION}.txt # Extract it RUN tar -xvjf monero-linux-{arch}-v{MONERO_VERSION}.tar.bz2 --strip-components=1 "#, network.label(), ); let setup = mimalloc(os).to_string() + &download_monero; let run_monero = format!( r#" COPY --from=monero --chown=monero:nogroup {monero_binary} /bin EXPOSE {ports} ADD /orchestration/{}/networks/{folder}/run.sh / CMD ["/run.sh"] "#, network.label(), ); let run = crate::os(os, if os == Os::Alpine { "RUN apk --no-cache add gcompat" } else { "" }, "monero") + &run_monero; let res = setup + &run; let mut monero_path = orchestration_path.to_path_buf(); monero_path.push("networks"); monero_path.push(folder); monero_path.push("Dockerfile"); write_dockerfile(monero_path, &res); } pub fn monero(orchestration_path: &Path, network: Network) { monero_internal(network, Os::Debian, orchestration_path, "monero", "monerod", "18080 18081") } pub fn monero_wallet_rpc(orchestration_path: &Path) { monero_internal( Network::Dev, Os::Debian, orchestration_path, "monero-wallet-rpc", "monero-wallet-rpc", "18082", ) } ================================================ FILE: orchestration/src/processor.rs ================================================ use std::path::Path; use zeroize::Zeroizing; use dalek_ff_group::Ristretto; use ciphersuite::{group::ff::PrimeField, Ciphersuite}; use crate::{Network, Os, mimalloc, os, build_serai_service, write_dockerfile}; #[allow(clippy::needless_pass_by_value)] pub fn processor( orchestration_path: &Path, network: Network, coin: &'static str, _coordinator_key: ::G, coin_key: Zeroizing<::F>, entropy: Zeroizing<[u8; 32]>, ) { let setup = mimalloc(Os::Debian).to_string() + &build_serai_service( if coin == "ethereum" { r#" RUN cargo install svm-rs RUN svm install 0.8.26 RUN svm use 0.8.26 "# } else { "" }, network.release(), &format!("binaries {} {coin}", network.db()), "serai-processor", ); const ADDITIONAL_ROOT: &str = r#" # Install ca-certificates RUN apt install -y ca-certificates "#; // TODO: Randomly generate these const RPC_USER: &str = "serai"; const RPC_PASS: &str = "seraidex"; // TODO: Isolate networks let hostname = format!("serai-{}-{coin}", network.label()); let port = format!( "{}", match coin { "bitcoin" => 8332, "ethereum" => 8545, "monero" => 18081, _ => panic!("unrecognized external network"), } ); let mut env_vars = vec![ ("MESSAGE_QUEUE_RPC", format!("serai-{}-message-queue", network.label())), ("MESSAGE_QUEUE_KEY", hex::encode(coin_key.to_repr())), ("ENTROPY", hex::encode(entropy.as_ref())), ("NETWORK", coin.to_string()), ("NETWORK_RPC_LOGIN", format!("{RPC_USER}:{RPC_PASS}")), ("NETWORK_RPC_HOSTNAME", hostname), ("NETWORK_RPC_PORT", port), ("DB_PATH", "/volume/processor-db".to_string()), ("RUST_LOG", "info,serai_processor=debug".to_string()), ]; if coin == "ethereum" { env_vars .push(("ETHEREUM_RELAYER_HOSTNAME", format!("serai-{}-ethereum-relayer", network.label()))); env_vars.push(("ETHEREUM_RELAYER_PORT", "20830".to_string())); } let mut env_vars_str = String::new(); for (env_var, value) in env_vars { env_vars_str += &format!(r#"{env_var}=${{{env_var}:="{value}"}} "#); } let run_processor = format!( r#" # Copy the Processor binary and relevant license COPY --from=builder --chown=processor /serai/bin/serai-processor /bin/ COPY --from=builder --chown=processor /serai/AGPL-3.0 . # Run processor CMD {env_vars_str} serai-processor "# ); let run = os(Os::Debian, ADDITIONAL_ROOT, "processor") + &run_processor; let res = setup + &run; let mut processor_path = orchestration_path.to_path_buf(); processor_path.push("processor"); processor_path.push(coin); processor_path.push("Dockerfile"); write_dockerfile(processor_path, &res); } ================================================ FILE: orchestration/src/serai.rs ================================================ use std::path::Path; use zeroize::Zeroizing; use dalek_ff_group::Ristretto; use ciphersuite::{group::ff::PrimeField, Ciphersuite}; use crate::{Network, Os, mimalloc, os, build_serai_service, write_dockerfile}; pub fn serai( orchestration_path: &Path, network: Network, serai_key: &Zeroizing<::F>, ) { // Always builds in release for performance reasons let setup = mimalloc(Os::Debian).to_string() + &build_serai_service("", true, "", "serai-node"); let setup_fast_epoch = mimalloc(Os::Debian).to_string() + &build_serai_service("", true, "fast-epoch", "serai-node"); let env_vars = [("KEY", hex::encode(serai_key.to_repr()))]; let mut env_vars_str = String::new(); for (env_var, value) in env_vars { env_vars_str += &format!(r#"{env_var}=${{{env_var}:="{value}"}} "#); } let run_serai = format!( r#" # Copy the Serai binary and relevant license COPY --from=builder --chown=serai /serai/bin/serai-node /bin/ COPY --from=builder --chown=serai /serai/AGPL-3.0 . # Run the Serai node EXPOSE 30333 9944 ADD /orchestration/{}/serai/run.sh / CMD {env_vars_str} "/run.sh" "#, network.label(), ); let run = os(Os::Debian, "", "serai") + &run_serai; let res = setup + &run; let res_fast_epoch = setup_fast_epoch + &run; let mut serai_path = orchestration_path.to_path_buf(); serai_path.push("serai"); let mut serai_fast_epoch_path = serai_path.clone(); serai_path.push("Dockerfile"); serai_fast_epoch_path.push("Dockerfile.fast-epoch"); write_dockerfile(serai_path, &res); write_dockerfile(serai_fast_epoch_path, &res_fast_epoch); } ================================================ FILE: orchestration/testnet/coordinator/.folder ================================================ ================================================ FILE: orchestration/testnet/message-queue/.folder ================================================ ================================================ FILE: orchestration/testnet/networks/bitcoin/run.sh ================================================ #!/bin/sh RPC_USER="${RPC_USER:=serai}" RPC_PASS="${RPC_PASS:=seraidex}" bitcoind -txindex -testnet -port=8333 \ -rpcuser=$RPC_USER -rpcpassword=$RPC_PASS \ -rpcbind=0.0.0.0 -rpcallowip=0.0.0.0/0 -rpcport=8332 \ --datadir=/volume ================================================ FILE: orchestration/testnet/networks/ethereum/consensus/lighthouse/run.sh ================================================ #!/bin/sh RUST_LOG=info lighthouse bn --execution-endpoint http://localhost:8551 --execution-jwt /home/ethereum/.jwt ================================================ FILE: orchestration/testnet/networks/ethereum/consensus/nimbus/run.sh ================================================ #!/bin/sh exit 1 ================================================ FILE: orchestration/testnet/networks/ethereum/execution/geth/run.sh ================================================ #!/bin/sh #geth --dev --networkid 5208 \ # --http --http.api "web3,net,eth,miner" \ # --http.addr 0.0.0.0 --http.port 8545 \ # --http.vhosts="*" --http.corsdomain "*" exit 1 ================================================ FILE: orchestration/testnet/networks/ethereum/execution/reth/run.sh ================================================ #!/bin/sh RUST_LOG=info reth node --authrpc.jwtsecret /home/ethereum/.jwt ================================================ FILE: orchestration/testnet/networks/ethereum/run.sh ================================================ /execution_layer.sh & /consensus_layer.sh ================================================ FILE: orchestration/testnet/networks/ethereum-relayer/.folder ================================================ #!/bin/sh RPC_USER="${RPC_USER:=serai}" RPC_PASS="${RPC_PASS:=seraidex}" # Run Monero monerod --non-interactive --regtest --offline --fixed-difficulty=1 \ --no-zmq --rpc-bind-ip=0.0.0.0 --rpc-bind-port=18081 --confirm-external-bind \ --rpc-access-control-origins "*" --disable-rpc-ban \ --rpc-login=$RPC_USER:$RPC_PASS \ $1 ================================================ FILE: orchestration/testnet/networks/monero/hashes-v0.18.3.4.txt ================================================ -----BEGIN PGP SIGNED MESSAGE----- Hash: SHA256 # This GPG-signed message exists to confirm the SHA256 sums of Monero binaries. # # Please verify the signature against the key for binaryFate in the # source code repository (/utils/gpg_keys). # # ## CLI 15e4d7dfc2f9261a0a452b0f8fd157c33cdbc8a896e23d883ddd13e2480a3800 monero-android-armv7-v0.18.3.4.tar.bz2 d9c9249d1408822ce36b346c6b9fb6b896cda16714d62117fb1c588a5201763c monero-android-armv8-v0.18.3.4.tar.bz2 360a551388922c8991a9ba4abaa88676b0fc7ec1fa4d0f4b5c0500847e0b946c monero-freebsd-x64-v0.18.3.4.tar.bz2 354603c56446fb0551cdd6933bce5a13590b7881e05979b7ec25d89e7e59a0e2 monero-linux-armv7-v0.18.3.4.tar.bz2 33ca2f0055529d225b61314c56370e35606b40edad61c91c859f873ed67a1ea7 monero-linux-armv8-v0.18.3.4.tar.bz2 88739a1521b9fda3154540268e416c7af016ed7857041c76ab8ed7d7674c71ca monero-linux-riscv64-v0.18.3.4.tar.bz2 51ba03928d189c1c11b5379cab17dd9ae8d2230056dc05c872d0f8dba4a87f1d monero-linux-x64-v0.18.3.4.tar.bz2 d7ca0878abff2919a0104d7ed29d9c35df9ca0ea1b6fb4ebf6c8f7607ffb9e41 monero-linux-x86-v0.18.3.4.tar.bz2 44520cb3a05c2518ca9aeae1b2e3080fe2bba1e3596d014ceff1090dfcba8ab4 monero-mac-armv8-v0.18.3.4.tar.bz2 32c449f562216d3d83154e708471236d07db7477d6b67f1936a0a85a5005f2b8 monero-mac-x64-v0.18.3.4.tar.bz2 54a66db6c892b2a0999754841f4ca68511741b88ea3ab20c7cd504a027f465f5 monero-win-x64-v0.18.3.4.zip 1a9824742aa1587023c3bddea788c115940cfd49371c78a8dd62c40113132d01 monero-win-x86-v0.18.3.4.zip 7d4845ec0a3b52404d41785da348ec33509f0a5981e8a27c5fa55b18d696e139 monero-source-v0.18.3.4.tar.bz2 # ## GUI 63349d5a7637cd0c5d1693a1a2e910a92cbb123903d57667077a36454845d7bf monero-gui-install-win-x64-v0.18.3.4.exe 2866f3a2be30e4c4113e6274cad1d6698f81c37ceebc6e8f084c57230a0f70a6 monero-gui-linux-x64-v0.18.3.4.tar.bz2 eedbf827513607a3ef579077dacd573e65892b199102effef97dff9d73138ca6 monero-gui-mac-armv8-v0.18.3.4.dmg 54eb151d7511a9f26130864e2c02f258344803b2b68311c8be29850d7faef359 monero-gui-mac-x64-v0.18.3.4.dmg b5d42dddd722e728e480337f89038c8ea606c6507bf0c88ddf2af25050c9b751 monero-gui-win-x64-v0.18.3.4.zip 2f1d643bb2cc08e5eb334a6bfd649b0aa95ceb6178ff2f90448d5ef8d2a752a6 monero-gui-source-v0.18.3.4.tar.bz2 # # # ~binaryFate -----BEGIN PGP SIGNATURE----- iQIzBAEBCAAdFiEEgaxZH+nEtlxYBq/D8K9NRioL35IFAmbF8bAACgkQ8K9NRioL 35KQAQ/7BP9j0Tx+zlFs3zbVIFXzfoPbGo2/uerM4xUWX/NUoI7XDTGWV2lpcR1x o6eqstbuHciY0Aj2MsICsdqD+1PYW0EBZlfNLMrk161c3nQMJcjCE65uIhbLkOSs 6SUakmpxkueQOE/Ug5Afaa/JBATVTxLTmqSCI7Ai9NplF+6KNauXQXNrlwO/gHcd whYDmsqp2JyOtMpMlpOckzLgg7Oroj7B0LBf78Z13p1naUyPooBaIEXSdKm5g2HI vPd+z1bOVIluqPBnYWUwL7EmXy08/broejHGliQ+2iY9IsmDDx6rnSe/oprNEDic l+/w3KvPcTkBh8hJLVDyYieYdVYHqOktIPlR1dKV512CnuP1ljr/CXjJmkAkXHlg bObMUCIM9UYqp1I+KDaArjYNbzkHK02Lu6sak49GXgEuq66m9t4isF2GdcHrbERs cLGsnhkTO2LtnGcziOC2l9XSzL41swxe0GrkK0rdeiyDCGAlb7hllevFy7zlT90l Jw670TyFVBs8fUFHk/tOtT0ivSDJJg8m9waBzi/46ksOvuid6p3P3a0agqu3uclj rscSpk0JS3E/3+A/N0IaiTmUO5zSjbsCrSnxQjcfrRRtERL+6JVHFVlW+nJzYWWH u0O7bNZSqEruR4aTEtsddLgs57I10thDR5SUONuAqbEq8EYN8OE= =aLFR -----END PGP SIGNATURE----- ================================================ FILE: orchestration/testnet/networks/monero/run.sh ================================================ #!/bin/sh RPC_USER="${RPC_USER:=serai}" RPC_PASS="${RPC_PASS:=seraidex}" # Run Monero monerod --non-interactive --stagenet \ --no-zmq --rpc-bind-ip=0.0.0.0 --rpc-bind-port=18081 --confirm-external-bind \ --rpc-access-control-origins "*" --disable-rpc-ban \ --rpc-login=$RPC_USER:$RPC_PASS \ --data-dir=/volume ================================================ FILE: orchestration/testnet/processor/bitcoin/.folder ================================================ ================================================ FILE: orchestration/testnet/processor/ethereum/.folder ================================================ ================================================ FILE: orchestration/testnet/processor/monero/.folder ================================================ ================================================ FILE: orchestration/testnet/serai/run.sh ================================================ #!/bin/sh serai-node --base-path /volume --unsafe-rpc-external --rpc-cors all --chain testnet --validator ================================================ FILE: patches/directories-next/Cargo.toml ================================================ [package] name = "directories-next" version = "2.0.0" description = "Patch from directories-next back to directories" license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/patches/directories-next" authors = ["Luke Parker "] keywords = [] edition = "2021" rust-version = "1.74" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [dependencies] directories = "5" ================================================ FILE: patches/directories-next/src/lib.rs ================================================ pub use directories::*; ================================================ FILE: patches/home/Cargo.toml ================================================ [package] name = "home" version = "0.5.99" description = "Replacement for `home` which uses the `std` impl" license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/patches/home" authors = ["Luke Parker "] keywords = [] edition = "2024" rust-version = "1.85" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [workspace] ================================================ FILE: patches/home/src/lib.rs ================================================ pub use std::env::home_dir; ================================================ FILE: patches/matches/Cargo.toml ================================================ [package] name = "matches" version = "0.1.10" description = "Replacement for the matches polyfill which uses the std impl" license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/patches/matches" authors = ["Luke Parker "] keywords = [] edition = "2021" rust-version = "1.56" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] ================================================ FILE: patches/matches/src/lib.rs ================================================ pub use std::matches; ================================================ FILE: patches/option-ext/Cargo.toml ================================================ [package] name = "option-ext" version = "0.2.0" description = "Non-MPL option-ext with the exactly needed API for directories" license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/patches/option-ext" authors = ["Luke Parker "] keywords = [] edition = "2021" rust-version = "1.74" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] ================================================ FILE: patches/option-ext/src/lib.rs ================================================ pub trait OptionExt { fn contains(&self, x: &T) -> bool; } impl OptionExt for Option { fn contains(&self, x: &T) -> bool { self.as_ref() == Some(x) } } ================================================ FILE: processor/Cargo.toml ================================================ [package] name = "serai-processor" version = "0.1.0" description = "Multichain processor premised on canonicity to reach distributed consensus automatically" license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/processor" authors = ["Luke Parker "] keywords = [] edition = "2021" publish = false [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] # Macros async-trait = { version = "0.1", default-features = false } zeroize = { version = "1", default-features = false, features = ["std"] } thiserror = { version = "1", default-features = false } # Libs rand_core = { version = "0.6", default-features = false, features = ["std", "getrandom"] } rand_chacha = { version = "0.3", default-features = false, features = ["std"] } # Encoders const-hex = { version = "1", default-features = false } hex = { version = "0.4", default-features = false, features = ["std"] } scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } serde_json = { version = "1", default-features = false, features = ["std"] } # Cryptography blake2 = { version = "0.10", default-features = false, features = ["std"] } dalek-ff-group = { path = "../crypto/dalek-ff-group", default-features = false, features = ["std"] } ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std"] } transcript = { package = "flexible-transcript", path = "../crypto/transcript", default-features = false, features = ["std"] } dkg-pedpop = { path = "../crypto/dkg/pedpop", default-features = false } frost = { package = "modular-frost", path = "../crypto/frost", default-features = false, features = ["ristretto"] } frost-schnorrkel = { path = "../crypto/schnorrkel", default-features = false } # Bitcoin/Ethereum k256 = { version = "^0.13.1", default-features = false, features = ["std"], optional = true } # Bitcoin secp256k1 = { version = "0.29", default-features = false, features = ["std", "global-context", "rand-std"], optional = true } bitcoin-serai = { path = "../networks/bitcoin", default-features = false, features = ["std"], optional = true } # Ethereum ciphersuite-kp256 = { path = "../crypto/ciphersuite/kp256", default-features = false, features = ["std"], optional = true } ethereum-serai = { path = "../networks/ethereum", default-features = false, optional = true } # Monero monero-simple-request-rpc = { git = "https://github.com/monero-oxide/monero-oxide", rev = "32e6b5fe5ba9e1ea3e68da882550005122a11d22", default-features = false, optional = true } monero-wallet = { git = "https://github.com/monero-oxide/monero-oxide", rev = "32e6b5fe5ba9e1ea3e68da882550005122a11d22", default-features = false, features = ["std", "multisig", "compile-time-generators"], optional = true } # Application log = { version = "0.4", default-features = false, features = ["std"] } env_logger = { version = "0.10", default-features = false, features = ["humantime"], optional = true } tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } zalloc = { path = "../common/zalloc" } serai-db = { path = "../common/db" } serai-env = { path = "../common/env", optional = true } # TODO: Replace with direct usage of primitives serai-client = { path = "../substrate/client", default-features = false, features = ["serai"] } messages = { package = "serai-processor-messages", path = "./messages" } message-queue = { package = "serai-message-queue", path = "../message-queue", optional = true } [dev-dependencies] frost = { package = "modular-frost", path = "../crypto/frost", features = ["tests"] } sp-application-crypto = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false, features = ["std"] } ethereum-serai = { path = "../networks/ethereum", default-features = false, features = ["tests"] } dockertest = "0.5" serai-docker-tests = { path = "../tests/docker" } [features] secp256k1 = ["k256", "frost/secp256k1"] bitcoin = ["dep:secp256k1", "secp256k1", "bitcoin-serai", "serai-client/bitcoin"] ethereum = ["secp256k1", "ciphersuite-kp256", "ethereum-serai/tests"] ed25519 = ["frost/ed25519"] monero = ["ed25519", "monero-simple-request-rpc", "monero-wallet", "serai-client/monero"] binaries = ["env_logger", "serai-env", "message-queue"] parity-db = ["serai-db/parity-db"] rocksdb = ["serai-db/rocksdb"] ================================================ FILE: processor/LICENSE ================================================ AGPL-3.0-only license Copyright (c) 2022-2023 Luke Parker This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License Version 3 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see . ================================================ FILE: processor/README.md ================================================ # Processor The Serai processor scans a specified external network, communicating with the coordinator. For details on its exact messaging flow, and overall policies, please view `docs/processor`. ================================================ FILE: processor/messages/Cargo.toml ================================================ [package] name = "serai-processor-messages" version = "0.1.0" description = "Messages sent and received by the processor" license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/processor/messages" authors = ["Luke Parker "] keywords = [] edition = "2021" publish = false [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } dkg = { path = "../../crypto/dkg", default-features = false, features = ["std", "borsh"] } serai-primitives = { path = "../../substrate/primitives", default-features = false, features = ["std", "borsh"] } in-instructions-primitives = { package = "serai-in-instructions-primitives", path = "../../substrate/in-instructions/primitives", default-features = false, features = ["std", "borsh"] } coins-primitives = { package = "serai-coins-primitives", path = "../../substrate/coins/primitives", default-features = false, features = ["std", "borsh"] } validator-sets-primitives = { package = "serai-validator-sets-primitives", path = "../../substrate/validator-sets/primitives", default-features = false, features = ["std", "borsh"] } ================================================ FILE: processor/messages/LICENSE ================================================ AGPL-3.0-only license Copyright (c) 2023 Luke Parker This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License Version 3 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see . ================================================ FILE: processor/messages/src/lib.rs ================================================ #![expect(clippy::cast_possible_truncation)] use std::collections::HashMap; use scale::{Encode, Decode}; use borsh::{BorshSerialize, BorshDeserialize}; use dkg::{Participant, ThresholdParams}; use serai_primitives::BlockHash; use in_instructions_primitives::{Batch, SignedBatch}; use coins_primitives::OutInstructionWithBalance; use validator_sets_primitives::{Session, KeyPair}; #[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub struct SubstrateContext { pub serai_time: u64, pub network_latest_finalized_block: BlockHash, } pub mod key_gen { use super::*; #[derive( Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize, )] pub struct KeyGenId { pub session: Session, pub attempt: u32, } #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub enum CoordinatorMessage { // Instructs the Processor to begin the key generation process. // TODO: Should this be moved under Substrate? GenerateKey { id: KeyGenId, params: ThresholdParams, shares: u16, }, // Received commitments for the specified key generation protocol. Commitments { id: KeyGenId, commitments: HashMap>, }, // Received shares for the specified key generation protocol. Shares { id: KeyGenId, shares: Vec>>, }, /// Instruction to verify a blame accusation. VerifyBlame { id: KeyGenId, accuser: Participant, accused: Participant, share: Vec, blame: Option>, }, } impl CoordinatorMessage { pub fn required_block(&self) -> Option { None } } #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub enum ProcessorMessage { // Created commitments for the specified key generation protocol. Commitments { id: KeyGenId, commitments: Vec>, }, // Participant published invalid commitments. InvalidCommitments { id: KeyGenId, faulty: Participant, }, // Created shares for the specified key generation protocol. Shares { id: KeyGenId, shares: Vec>>, }, // Participant published an invalid share. #[rustfmt::skip] InvalidShare { id: KeyGenId, accuser: Participant, faulty: Participant, blame: Option>, }, // Resulting keys from the specified key generation protocol. GeneratedKeyPair { id: KeyGenId, substrate_key: [u8; 32], network_key: Vec, }, // Blame this participant. Blame { id: KeyGenId, participant: Participant, }, } } pub mod sign { use super::*; #[derive(Clone, PartialEq, Eq, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize)] pub struct SignId { pub session: Session, pub id: [u8; 32], pub attempt: u32, } #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub enum CoordinatorMessage { // Received preprocesses for the specified signing protocol. Preprocesses { id: SignId, preprocesses: HashMap> }, // Received shares for the specified signing protocol. Shares { id: SignId, shares: HashMap> }, // Re-attempt a signing protocol. Reattempt { id: SignId }, // Completed a signing protocol already. Completed { session: Session, id: [u8; 32], tx: Vec }, } impl CoordinatorMessage { pub fn required_block(&self) -> Option { None } pub fn session(&self) -> Session { match self { CoordinatorMessage::Preprocesses { id, .. } | CoordinatorMessage::Shares { id, .. } | CoordinatorMessage::Reattempt { id } => id.session, CoordinatorMessage::Completed { session, .. } => *session, } } } #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub enum ProcessorMessage { // Participant sent an invalid message during the sign protocol. InvalidParticipant { id: SignId, participant: Participant }, // Created preprocess for the specified signing protocol. Preprocess { id: SignId, preprocesses: Vec> }, // Signed share for the specified signing protocol. Share { id: SignId, shares: Vec> }, // Completed a signing protocol already. Completed { session: Session, id: [u8; 32], tx: Vec }, } } pub mod coordinator { use super::*; pub fn cosign_block_msg(block_number: u64, block: [u8; 32]) -> Vec { const DST: &[u8] = b"Cosign"; let mut res = vec![u8::try_from(DST.len()).unwrap()]; res.extend(DST); res.extend(block_number.to_le_bytes()); res.extend(block); res } #[derive( Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize, )] pub enum SubstrateSignableId { CosigningSubstrateBlock([u8; 32]), Batch(u32), SlashReport, } #[derive(Clone, PartialEq, Eq, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize)] pub struct SubstrateSignId { pub session: Session, pub id: SubstrateSignableId, pub attempt: u32, } #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub enum CoordinatorMessage { CosignSubstrateBlock { id: SubstrateSignId, block_number: u64 }, SignSlashReport { id: SubstrateSignId, report: Vec<([u8; 32], u32)> }, SubstratePreprocesses { id: SubstrateSignId, preprocesses: HashMap }, SubstrateShares { id: SubstrateSignId, shares: HashMap }, // Re-attempt a batch signing protocol. BatchReattempt { id: SubstrateSignId }, } impl CoordinatorMessage { // The Coordinator will only send Batch messages once the Batch ID has been recognized // The ID will only be recognized when the block is acknowledged by a super-majority of the // network *and the local node* // This synchrony obtained lets us ignore the synchrony requirement offered here pub fn required_block(&self) -> Option { None } } #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub struct PlanMeta { pub session: Session, pub id: [u8; 32], } #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub enum ProcessorMessage { SubstrateBlockAck { block: u64, plans: Vec }, InvalidParticipant { id: SubstrateSignId, participant: Participant }, CosignPreprocess { id: SubstrateSignId, preprocesses: Vec<[u8; 64]> }, BatchPreprocess { id: SubstrateSignId, block: BlockHash, preprocesses: Vec<[u8; 64]> }, SlashReportPreprocess { id: SubstrateSignId, preprocesses: Vec<[u8; 64]> }, SubstrateShare { id: SubstrateSignId, shares: Vec<[u8; 32]> }, // TODO: Make these signatures [u8; 64]? CosignedBlock { block_number: u64, block: [u8; 32], signature: Vec }, SignedSlashReport { session: Session, signature: Vec }, } } pub mod substrate { use super::*; #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub enum CoordinatorMessage { ConfirmKeyPair { context: SubstrateContext, session: Session, key_pair: KeyPair, }, SubstrateBlock { context: SubstrateContext, block: u64, burns: Vec, batches: Vec, }, } impl CoordinatorMessage { pub fn required_block(&self) -> Option { let context = match self { CoordinatorMessage::ConfirmKeyPair { context, .. } | CoordinatorMessage::SubstrateBlock { context, .. } => context, }; Some(context.network_latest_finalized_block) } } #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub enum ProcessorMessage { Batch { batch: Batch }, SignedBatch { batch: SignedBatch }, } } macro_rules! impl_from { ($from: ident, $to: ident, $via: ident) => { impl From<$from::$to> for $to { fn from(msg: $from::$to) -> $to { $to::$via(msg) } } }; } #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub enum CoordinatorMessage { KeyGen(key_gen::CoordinatorMessage), Sign(sign::CoordinatorMessage), Coordinator(coordinator::CoordinatorMessage), Substrate(substrate::CoordinatorMessage), } impl_from!(key_gen, CoordinatorMessage, KeyGen); impl_from!(sign, CoordinatorMessage, Sign); impl_from!(coordinator, CoordinatorMessage, Coordinator); impl_from!(substrate, CoordinatorMessage, Substrate); impl CoordinatorMessage { pub fn required_block(&self) -> Option { let required = match self { CoordinatorMessage::KeyGen(msg) => msg.required_block(), CoordinatorMessage::Sign(msg) => msg.required_block(), CoordinatorMessage::Coordinator(msg) => msg.required_block(), CoordinatorMessage::Substrate(msg) => msg.required_block(), }; // 0 is used when Serai hasn't acknowledged *any* block for this network, which also means // there's no need to wait for the block in question if required == Some(BlockHash([0; 32])) { return None; } required } } #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub enum ProcessorMessage { KeyGen(key_gen::ProcessorMessage), Sign(sign::ProcessorMessage), Coordinator(coordinator::ProcessorMessage), Substrate(substrate::ProcessorMessage), } impl_from!(key_gen, ProcessorMessage, KeyGen); impl_from!(sign, ProcessorMessage, Sign); impl_from!(coordinator, ProcessorMessage, Coordinator); impl_from!(substrate, ProcessorMessage, Substrate); // Intent generation code const COORDINATOR_UID: u8 = 0; const PROCESSOR_UID: u8 = 1; const TYPE_KEY_GEN_UID: u8 = 2; const TYPE_SIGN_UID: u8 = 3; const TYPE_COORDINATOR_UID: u8 = 4; const TYPE_SUBSTRATE_UID: u8 = 5; impl CoordinatorMessage { /// The intent for this message, which should be unique across the validator's entire system, /// including all of its processors. /// /// This doesn't use H(msg.serialize()) as it's meant to be unique to intent, not unique to /// values. While the values should be consistent per intent, that assumption isn't required /// here. pub fn intent(&self) -> Vec { match self { CoordinatorMessage::KeyGen(msg) => { // Unique since key gen ID embeds the session and attempt let (sub, id) = match msg { key_gen::CoordinatorMessage::GenerateKey { id, .. } => (0, id), key_gen::CoordinatorMessage::Commitments { id, .. } => (1, id), key_gen::CoordinatorMessage::Shares { id, .. } => (2, id), key_gen::CoordinatorMessage::VerifyBlame { id, .. } => (3, id), }; let mut res = vec![COORDINATOR_UID, TYPE_KEY_GEN_UID, sub]; res.extend(&id.encode()); res } CoordinatorMessage::Sign(msg) => { let (sub, id) = match msg { // Unique since SignId includes a hash of the network, and specific transaction info sign::CoordinatorMessage::Preprocesses { id, .. } => (0, id.encode()), sign::CoordinatorMessage::Shares { id, .. } => (1, id.encode()), sign::CoordinatorMessage::Reattempt { id } => (2, id.encode()), // The coordinator should report all reported completions to the processor // Accordingly, the intent is a combination of plan ID and actual TX // While transaction alone may suffice, that doesn't cover cross-chain TX ID conflicts, // which are possible sign::CoordinatorMessage::Completed { id, tx, .. } => (3, (id, tx).encode()), }; let mut res = vec![COORDINATOR_UID, TYPE_SIGN_UID, sub]; res.extend(&id); res } CoordinatorMessage::Coordinator(msg) => { let (sub, id) = match msg { // Unique since this ID contains the hash of the block being cosigned coordinator::CoordinatorMessage::CosignSubstrateBlock { id, .. } => (0, id.encode()), // Unique since there's only one of these per session/attempt, and ID is inclusive to // both coordinator::CoordinatorMessage::SignSlashReport { id, .. } => (1, id.encode()), // Unique since this embeds the batch ID (including its network) and attempt coordinator::CoordinatorMessage::SubstratePreprocesses { id, .. } => (2, id.encode()), coordinator::CoordinatorMessage::SubstrateShares { id, .. } => (3, id.encode()), coordinator::CoordinatorMessage::BatchReattempt { id, .. } => (4, id.encode()), }; let mut res = vec![COORDINATOR_UID, TYPE_COORDINATOR_UID, sub]; res.extend(&id); res } CoordinatorMessage::Substrate(msg) => { let (sub, id) = match msg { // Unique since there's only one key pair for a session substrate::CoordinatorMessage::ConfirmKeyPair { session, .. } => (0, session.encode()), substrate::CoordinatorMessage::SubstrateBlock { block, .. } => (1, block.encode()), }; let mut res = vec![COORDINATOR_UID, TYPE_SUBSTRATE_UID, sub]; res.extend(&id); res } } } } impl ProcessorMessage { /// The intent for this message, which should be unique across the validator's entire system, /// including all of its processors. /// /// This doesn't use H(msg.serialize()) as it's meant to be unique to intent, not unique to /// values. While the values should be consistent per intent, that assumption isn't required /// here. pub fn intent(&self) -> Vec { match self { ProcessorMessage::KeyGen(msg) => { let (sub, id) = match msg { // Unique since KeyGenId key_gen::ProcessorMessage::Commitments { id, .. } => (0, id), key_gen::ProcessorMessage::InvalidCommitments { id, .. } => (1, id), key_gen::ProcessorMessage::Shares { id, .. } => (2, id), key_gen::ProcessorMessage::InvalidShare { id, .. } => (3, id), key_gen::ProcessorMessage::GeneratedKeyPair { id, .. } => (4, id), key_gen::ProcessorMessage::Blame { id, .. } => (5, id), }; let mut res = vec![PROCESSOR_UID, TYPE_KEY_GEN_UID, sub]; res.extend(&id.encode()); res } ProcessorMessage::Sign(msg) => { let (sub, id) = match msg { // Unique since SignId sign::ProcessorMessage::InvalidParticipant { id, .. } => (0, id.encode()), sign::ProcessorMessage::Preprocess { id, .. } => (1, id.encode()), sign::ProcessorMessage::Share { id, .. } => (2, id.encode()), // Unique since a processor will only sign a TX once sign::ProcessorMessage::Completed { id, .. } => (3, id.to_vec()), }; let mut res = vec![PROCESSOR_UID, TYPE_SIGN_UID, sub]; res.extend(&id); res } ProcessorMessage::Coordinator(msg) => { let (sub, id) = match msg { coordinator::ProcessorMessage::SubstrateBlockAck { block, .. } => (0, block.encode()), // Unique since SubstrateSignId coordinator::ProcessorMessage::InvalidParticipant { id, .. } => (1, id.encode()), coordinator::ProcessorMessage::CosignPreprocess { id, .. } => (2, id.encode()), coordinator::ProcessorMessage::BatchPreprocess { id, .. } => (3, id.encode()), coordinator::ProcessorMessage::SlashReportPreprocess { id, .. } => (4, id.encode()), coordinator::ProcessorMessage::SubstrateShare { id, .. } => (5, id.encode()), // Unique since only one instance of a signature matters coordinator::ProcessorMessage::CosignedBlock { block, .. } => (6, block.encode()), coordinator::ProcessorMessage::SignedSlashReport { .. } => (7, vec![]), }; let mut res = vec![PROCESSOR_UID, TYPE_COORDINATOR_UID, sub]; res.extend(&id); res } ProcessorMessage::Substrate(msg) => { let (sub, id) = match msg { // Unique since network and ID binding substrate::ProcessorMessage::Batch { batch } => (0, (batch.network, batch.id).encode()), substrate::ProcessorMessage::SignedBatch { batch, .. } => { (1, (batch.batch.network, batch.batch.id).encode()) } }; let mut res = vec![PROCESSOR_UID, TYPE_SUBSTRATE_UID, sub]; res.extend(&id); res } } } } ================================================ FILE: processor/src/additional_key.rs ================================================ use ciphersuite::Ciphersuite; use crate::networks::Network; // Generate a static additional key for a given chain in a globally consistent manner // Doesn't consider the current group key to increase the simplicity of verifying Serai's status // Takes an index, k, to support protocols which use multiple secondary keys // Presumably a view key pub fn additional_key(k: u64) -> ::F { ::hash_to_F( b"Serai DEX Additional Key", &[N::ID.as_bytes(), &k.to_le_bytes()].concat(), ) } ================================================ FILE: processor/src/batch_signer.rs ================================================ use core::{marker::PhantomData, fmt}; use std::collections::HashMap; use rand_core::OsRng; use frost::{ curve::Ristretto, ThresholdKeys, FrostError, algorithm::Algorithm, sign::{ Writable, PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine, AlgorithmSignMachine, AlgorithmSignatureMachine, }, }; use frost_schnorrkel::Schnorrkel; use log::{info, debug, warn}; use serai_client::{ primitives::{ExternalNetworkId, BlockHash}, in_instructions::primitives::{Batch, SignedBatch, batch_message}, validator_sets::primitives::Session, }; use messages::coordinator::*; use crate::{Get, DbTxn, Db, create_db}; create_db!( BatchSignerDb { CompletedDb: (id: u32) -> (), AttemptDb: (id: u32, attempt: u32) -> (), BatchDb: (block: BlockHash) -> SignedBatch } ); type Preprocess = as PreprocessMachine>::Preprocess; type SignatureShare = as SignMachine< >::Signature, >>::SignatureShare; pub struct BatchSigner { db: PhantomData, network: ExternalNetworkId, session: Session, keys: Vec>, signable: HashMap, attempt: HashMap, #[allow(clippy::type_complexity)] preprocessing: HashMap>, Vec)>, #[allow(clippy::type_complexity)] signing: HashMap, Vec)>, } impl fmt::Debug for BatchSigner { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt .debug_struct("BatchSigner") .field("signable", &self.signable) .field("attempt", &self.attempt) .finish_non_exhaustive() } } impl BatchSigner { pub fn new( network: ExternalNetworkId, session: Session, keys: Vec>, ) -> BatchSigner { assert!(!keys.is_empty()); BatchSigner { db: PhantomData, network, session, keys, signable: HashMap::new(), attempt: HashMap::new(), preprocessing: HashMap::new(), signing: HashMap::new(), } } fn verify_id(&self, id: &SubstrateSignId) -> Result<(Session, u32, u32), ()> { let SubstrateSignId { session, id, attempt } = id; let SubstrateSignableId::Batch(id) = id else { panic!("BatchSigner handed non-Batch") }; assert_eq!(session, &self.session); // Check the attempt lines up match self.attempt.get(id) { // If we don't have an attempt logged, it's because the coordinator is faulty OR because we // rebooted OR we detected the signed batch on chain // The latter is the expected flow for batches not actively being participated in None => { warn!("not attempting batch {id} #{attempt}"); Err(())?; } Some(our_attempt) => { if attempt != our_attempt { warn!("sent signing data for batch {id} #{attempt} yet we have attempt #{our_attempt}"); Err(())?; } } } Ok((*session, *id, *attempt)) } #[must_use] fn attempt( &mut self, txn: &mut D::Transaction<'_>, id: u32, attempt: u32, ) -> Option { // See above commentary for why this doesn't emit SignedBatch if CompletedDb::get(txn, id).is_some() { return None; } // Check if we're already working on this attempt if let Some(curr_attempt) = self.attempt.get(&id) { if curr_attempt >= &attempt { warn!("told to attempt {id} #{attempt} yet we're already working on {curr_attempt}"); return None; } } // Start this attempt let block = if let Some(batch) = self.signable.get(&id) { batch.block } else { warn!("told to attempt signing a batch we aren't currently signing for"); return None; }; // Delete any existing machines self.preprocessing.remove(&id); self.signing.remove(&id); // Update the attempt number self.attempt.insert(id, attempt); info!("signing batch {id} #{attempt}"); // If we reboot mid-sign, the current design has us abort all signs and wait for latter // attempts/new signing protocols // This is distinct from the DKG which will continue DKG sessions, even on reboot // This is because signing is tolerant of failures of up to 1/3rd of the group // The DKG requires 100% participation // While we could apply similar tricks as the DKG (a seeded RNG) to achieve support for // reboots, it's not worth the complexity when messing up here leaks our secret share // // Despite this, on reboot, we'll get told of active signing items, and may be in this // branch again for something we've already attempted // // Only run if this hasn't already been attempted // TODO: This isn't complete as this txn may not be committed with the expected timing if AttemptDb::get(txn, id, attempt).is_some() { warn!( "already attempted batch {id}, attempt #{attempt}. this is an error if we didn't reboot" ); return None; } AttemptDb::set(txn, id, attempt, &()); let mut machines = vec![]; let mut preprocesses = vec![]; let mut serialized_preprocesses = vec![]; for keys in &self.keys { // b"substrate" is a literal from sp-core let machine = AlgorithmMachine::new(Schnorrkel::new(b"substrate"), keys.clone()); let (machine, preprocess) = machine.preprocess(&mut OsRng); machines.push(machine); serialized_preprocesses.push(preprocess.serialize().try_into().unwrap()); preprocesses.push(preprocess); } self.preprocessing.insert(id, (machines, preprocesses)); let id = SubstrateSignId { session: self.session, id: SubstrateSignableId::Batch(id), attempt }; // Broadcast our preprocesses Some(ProcessorMessage::BatchPreprocess { id, block, preprocesses: serialized_preprocesses }) } #[must_use] pub fn sign(&mut self, txn: &mut D::Transaction<'_>, batch: Batch) -> Option { debug_assert_eq!(self.network, batch.network); let id = batch.id; if CompletedDb::get(txn, id).is_some() { debug!("Sign batch order for ID we've already completed signing"); // See batch_signed for commentary on why this simply returns return None; } self.signable.insert(id, batch); self.attempt(txn, id, 0) } #[must_use] pub fn handle( &mut self, txn: &mut D::Transaction<'_>, msg: CoordinatorMessage, ) -> Option { match msg { CoordinatorMessage::CosignSubstrateBlock { .. } => { panic!("BatchSigner passed CosignSubstrateBlock") } CoordinatorMessage::SignSlashReport { .. } => { panic!("Cosigner passed SignSlashReport") } CoordinatorMessage::SubstratePreprocesses { id, preprocesses } => { let (session, id, attempt) = self.verify_id(&id).ok()?; let substrate_sign_id = SubstrateSignId { session, id: SubstrateSignableId::Batch(id), attempt }; let (machines, our_preprocesses) = match self.preprocessing.remove(&id) { // Either rebooted or RPC error, or some invariant None => { warn!("not preprocessing for {id}. this is an error if we didn't reboot"); return None; } Some(preprocess) => preprocess, }; let mut parsed = HashMap::new(); for l in { let mut keys = preprocesses.keys().copied().collect::>(); keys.sort(); keys } { let mut preprocess_ref = preprocesses.get(&l).unwrap().as_slice(); let Ok(res) = machines[0].read_preprocess(&mut preprocess_ref) else { return Some( (ProcessorMessage::InvalidParticipant { id: substrate_sign_id, participant: l }) .into(), ); }; if !preprocess_ref.is_empty() { return Some( (ProcessorMessage::InvalidParticipant { id: substrate_sign_id, participant: l }) .into(), ); } parsed.insert(l, res); } let preprocesses = parsed; // Only keep a single machine as we only need one to get the signature let mut signature_machine = None; let mut shares = vec![]; let mut serialized_shares = vec![]; for (m, machine) in machines.into_iter().enumerate() { let mut preprocesses = preprocesses.clone(); for (i, our_preprocess) in our_preprocesses.clone().into_iter().enumerate() { if i != m { assert!(preprocesses.insert(self.keys[i].params().i(), our_preprocess).is_none()); } } let (machine, share) = match machine .sign(preprocesses, &batch_message(&self.signable[&id])) { Ok(res) => res, Err(e) => match e { FrostError::InternalError(_) | FrostError::InvalidParticipant(_, _) | FrostError::InvalidSigningSet(_) | FrostError::InvalidParticipantQuantity(_, _) | FrostError::DuplicatedParticipant(_) | FrostError::MissingParticipant(_) => unreachable!(), FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => { return Some( (ProcessorMessage::InvalidParticipant { id: substrate_sign_id, participant: l }) .into(), ) } }, }; if m == 0 { signature_machine = Some(machine); } let mut share_bytes = [0; 32]; share_bytes.copy_from_slice(&share.serialize()); serialized_shares.push(share_bytes); shares.push(share); } self.signing.insert(id, (signature_machine.unwrap(), shares)); // Broadcast our shares Some( (ProcessorMessage::SubstrateShare { id: substrate_sign_id, shares: serialized_shares }) .into(), ) } CoordinatorMessage::SubstrateShares { id, shares } => { let (session, id, attempt) = self.verify_id(&id).ok()?; let substrate_sign_id = SubstrateSignId { session, id: SubstrateSignableId::Batch(id), attempt }; let (machine, our_shares) = match self.signing.remove(&id) { // Rebooted, RPC error, or some invariant None => { // If preprocessing has this ID, it means we were never sent the preprocess by the // coordinator if self.preprocessing.contains_key(&id) { panic!("never preprocessed yet signing?"); } warn!("not preprocessing for {id}. this is an error if we didn't reboot"); return None; } Some(signing) => signing, }; let mut parsed = HashMap::new(); for l in { let mut keys = shares.keys().copied().collect::>(); keys.sort(); keys } { let mut share_ref = shares.get(&l).unwrap().as_slice(); let Ok(res) = machine.read_share(&mut share_ref) else { return Some( (ProcessorMessage::InvalidParticipant { id: substrate_sign_id, participant: l }) .into(), ); }; if !share_ref.is_empty() { return Some( (ProcessorMessage::InvalidParticipant { id: substrate_sign_id, participant: l }) .into(), ); } parsed.insert(l, res); } let mut shares = parsed; for (i, our_share) in our_shares.into_iter().enumerate().skip(1) { assert!(shares.insert(self.keys[i].params().i(), our_share).is_none()); } let sig = match machine.complete(shares) { Ok(res) => res, Err(e) => match e { FrostError::InternalError(_) | FrostError::InvalidParticipant(_, _) | FrostError::InvalidSigningSet(_) | FrostError::InvalidParticipantQuantity(_, _) | FrostError::DuplicatedParticipant(_) | FrostError::MissingParticipant(_) => unreachable!(), FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => { return Some( (ProcessorMessage::InvalidParticipant { id: substrate_sign_id, participant: l }) .into(), ) } }, }; info!("signed batch {id} with attempt #{attempt}"); let batch = SignedBatch { batch: self.signable.remove(&id).unwrap(), signature: sig.into() }; // Save the batch in case it's needed for recovery BatchDb::set(txn, batch.batch.block, &batch); CompletedDb::set(txn, id, &()); // Stop trying to sign for this batch assert!(self.attempt.remove(&id).is_some()); assert!(self.preprocessing.remove(&id).is_none()); assert!(self.signing.remove(&id).is_none()); Some((messages::substrate::ProcessorMessage::SignedBatch { batch }).into()) } CoordinatorMessage::BatchReattempt { id } => { let SubstrateSignableId::Batch(batch_id) = id.id else { panic!("BatchReattempt passed non-Batch ID") }; self.attempt(txn, batch_id, id.attempt).map(Into::into) } } } pub fn batch_signed(&mut self, txn: &mut D::Transaction<'_>, id: u32) { // Stop trying to sign for this batch CompletedDb::set(txn, id, &()); self.signable.remove(&id); self.attempt.remove(&id); self.preprocessing.remove(&id); self.signing.remove(&id); // This doesn't emit SignedBatch because it doesn't have access to the SignedBatch // This function is expected to only be called once Substrate acknowledges this block, // which means its batch must have been signed // While a successive batch's signing would also cause this block to be acknowledged, Substrate // guarantees a batch's ordered inclusion // This also doesn't return any messages since all mutation from the Batch being signed happens // on the substrate::CoordinatorMessage::SubstrateBlock message (which SignedBatch is meant to // end up triggering) } } ================================================ FILE: processor/src/coordinator.rs ================================================ use messages::{ProcessorMessage, CoordinatorMessage}; use message_queue::{Service, Metadata, client::MessageQueue}; #[derive(Clone, PartialEq, Eq, Debug)] pub struct Message { pub id: u64, pub msg: CoordinatorMessage, } #[async_trait::async_trait] pub trait Coordinator { async fn send(&mut self, msg: impl Send + Into); async fn recv(&mut self) -> Message; async fn ack(&mut self, msg: Message); } #[async_trait::async_trait] impl Coordinator for MessageQueue { async fn send(&mut self, msg: impl Send + Into) { let msg: ProcessorMessage = msg.into(); let metadata = Metadata { from: self.service, to: Service::Coordinator, intent: msg.intent() }; let msg = borsh::to_vec(&msg).unwrap(); self.queue(metadata, msg).await; } async fn recv(&mut self) -> Message { let msg = self.next(Service::Coordinator).await; let id = msg.id; // Deserialize it into a CoordinatorMessage let msg: CoordinatorMessage = borsh::from_slice(&msg.msg).expect("message wasn't a borsh-encoded CoordinatorMessage"); return Message { id, msg }; } async fn ack(&mut self, msg: Message) { MessageQueue::ack(self, Service::Coordinator, msg.id).await } } ================================================ FILE: processor/src/cosigner.rs ================================================ use core::fmt; use std::collections::HashMap; use rand_core::OsRng; use frost::{ curve::Ristretto, ThresholdKeys, FrostError, algorithm::Algorithm, sign::{ Writable, PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine, AlgorithmSignMachine, AlgorithmSignatureMachine, }, }; use frost_schnorrkel::Schnorrkel; use log::{info, warn}; use serai_client::validator_sets::primitives::Session; use messages::coordinator::*; use crate::{Get, DbTxn, create_db}; create_db! { CosignerDb { Completed: (id: [u8; 32]) -> (), Attempt: (id: [u8; 32], attempt: u32) -> (), } } type Preprocess = as PreprocessMachine>::Preprocess; type SignatureShare = as SignMachine< >::Signature, >>::SignatureShare; pub struct Cosigner { session: Session, keys: Vec>, block_number: u64, id: [u8; 32], attempt: u32, #[allow(clippy::type_complexity)] preprocessing: Option<(Vec>, Vec)>, #[allow(clippy::type_complexity)] signing: Option<(AlgorithmSignatureMachine, Vec)>, } impl fmt::Debug for Cosigner { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt .debug_struct("Cosigner") .field("session", &self.session) .field("block_number", &self.block_number) .field("id", &self.id) .field("attempt", &self.attempt) .field("preprocessing", &self.preprocessing.is_some()) .field("signing", &self.signing.is_some()) .finish_non_exhaustive() } } impl Cosigner { pub fn new( txn: &mut impl DbTxn, session: Session, keys: Vec>, block_number: u64, id: [u8; 32], attempt: u32, ) -> Option<(Cosigner, ProcessorMessage)> { assert!(!keys.is_empty()); if Completed::get(txn, id).is_some() { return None; } if Attempt::get(txn, id, attempt).is_some() { warn!( "already attempted cosigning {}, attempt #{}. this is an error if we didn't reboot", hex::encode(id), attempt, ); return None; } Attempt::set(txn, id, attempt, &()); info!("cosigning block {} with attempt #{}", hex::encode(id), attempt); let mut machines = vec![]; let mut preprocesses = vec![]; let mut serialized_preprocesses = vec![]; for keys in &keys { // b"substrate" is a literal from sp-core let machine = AlgorithmMachine::new(Schnorrkel::new(b"substrate"), keys.clone()); let (machine, preprocess) = machine.preprocess(&mut OsRng); machines.push(machine); serialized_preprocesses.push(preprocess.serialize().try_into().unwrap()); preprocesses.push(preprocess); } let preprocessing = Some((machines, preprocesses)); let substrate_sign_id = SubstrateSignId { session, id: SubstrateSignableId::CosigningSubstrateBlock(id), attempt }; Some(( Cosigner { session, keys, block_number, id, attempt, preprocessing, signing: None }, ProcessorMessage::CosignPreprocess { id: substrate_sign_id, preprocesses: serialized_preprocesses, }, )) } #[must_use] pub fn handle( &mut self, txn: &mut impl DbTxn, msg: CoordinatorMessage, ) -> Option { match msg { CoordinatorMessage::CosignSubstrateBlock { .. } => { panic!("Cosigner passed CosignSubstrateBlock") } CoordinatorMessage::SignSlashReport { .. } => { panic!("Cosigner passed SignSlashReport") } CoordinatorMessage::SubstratePreprocesses { id, preprocesses } => { assert_eq!(id.session, self.session); let SubstrateSignableId::CosigningSubstrateBlock(block) = id.id else { panic!("cosigner passed Batch") }; if block != self.id { panic!("given preprocesses for a distinct block than cosigner is signing") } if id.attempt != self.attempt { panic!("given preprocesses for a distinct attempt than cosigner is signing") } let (machines, our_preprocesses) = match self.preprocessing.take() { // Either rebooted or RPC error, or some invariant None => { warn!( "not preprocessing for {}. this is an error if we didn't reboot", hex::encode(block), ); return None; } Some(preprocess) => preprocess, }; let mut parsed = HashMap::new(); for l in { let mut keys = preprocesses.keys().copied().collect::>(); keys.sort(); keys } { let mut preprocess_ref = preprocesses.get(&l).unwrap().as_slice(); let Ok(res) = machines[0].read_preprocess(&mut preprocess_ref) else { return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); }; if !preprocess_ref.is_empty() { return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); } parsed.insert(l, res); } let preprocesses = parsed; // Only keep a single machine as we only need one to get the signature let mut signature_machine = None; let mut shares = vec![]; let mut serialized_shares = vec![]; for (m, machine) in machines.into_iter().enumerate() { let mut preprocesses = preprocesses.clone(); for (i, our_preprocess) in our_preprocesses.clone().into_iter().enumerate() { if i != m { assert!(preprocesses.insert(self.keys[i].params().i(), our_preprocess).is_none()); } } let (machine, share) = match machine.sign(preprocesses, &cosign_block_msg(self.block_number, self.id)) { Ok(res) => res, Err(e) => match e { FrostError::InternalError(_) | FrostError::InvalidParticipant(_, _) | FrostError::InvalidSigningSet(_) | FrostError::InvalidParticipantQuantity(_, _) | FrostError::DuplicatedParticipant(_) | FrostError::MissingParticipant(_) => unreachable!(), FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => { return Some(ProcessorMessage::InvalidParticipant { id, participant: l }) } }, }; if m == 0 { signature_machine = Some(machine); } let mut share_bytes = [0; 32]; share_bytes.copy_from_slice(&share.serialize()); serialized_shares.push(share_bytes); shares.push(share); } self.signing = Some((signature_machine.unwrap(), shares)); // Broadcast our shares Some(ProcessorMessage::SubstrateShare { id, shares: serialized_shares }) } CoordinatorMessage::SubstrateShares { id, shares } => { assert_eq!(id.session, self.session); let SubstrateSignableId::CosigningSubstrateBlock(block) = id.id else { panic!("cosigner passed Batch") }; if block != self.id { panic!("given preprocesses for a distinct block than cosigner is signing") } if id.attempt != self.attempt { panic!("given preprocesses for a distinct attempt than cosigner is signing") } let (machine, our_shares) = match self.signing.take() { // Rebooted, RPC error, or some invariant None => { // If preprocessing has this ID, it means we were never sent the preprocess by the // coordinator if self.preprocessing.is_some() { panic!("never preprocessed yet signing?"); } warn!( "not preprocessing for {}. this is an error if we didn't reboot", hex::encode(block) ); return None; } Some(signing) => signing, }; let mut parsed = HashMap::new(); for l in { let mut keys = shares.keys().copied().collect::>(); keys.sort(); keys } { let mut share_ref = shares.get(&l).unwrap().as_slice(); let Ok(res) = machine.read_share(&mut share_ref) else { return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); }; if !share_ref.is_empty() { return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); } parsed.insert(l, res); } let mut shares = parsed; for (i, our_share) in our_shares.into_iter().enumerate().skip(1) { assert!(shares.insert(self.keys[i].params().i(), our_share).is_none()); } let sig = match machine.complete(shares) { Ok(res) => res, Err(e) => match e { FrostError::InternalError(_) | FrostError::InvalidParticipant(_, _) | FrostError::InvalidSigningSet(_) | FrostError::InvalidParticipantQuantity(_, _) | FrostError::DuplicatedParticipant(_) | FrostError::MissingParticipant(_) => unreachable!(), FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => { return Some(ProcessorMessage::InvalidParticipant { id, participant: l }) } }, }; info!("cosigned {} with attempt #{}", hex::encode(block), id.attempt); Completed::set(txn, block, &()); Some(ProcessorMessage::CosignedBlock { block_number: self.block_number, block, signature: sig.to_bytes().to_vec(), }) } CoordinatorMessage::BatchReattempt { .. } => panic!("BatchReattempt passed to Cosigner"), } } } ================================================ FILE: processor/src/db.rs ================================================ use std::io::Read; use scale::{Encode, Decode}; use serai_client::validator_sets::primitives::{Session, KeyPair}; pub use serai_db::*; use crate::networks::{Block, Network}; create_db!( MainDb { HandledMessageDb: (id: u64) -> (), PendingActivationsDb: () -> Vec } ); impl PendingActivationsDb { pub fn pending_activation( getter: &impl Get, ) -> Option<(>::Id, Session, KeyPair)> { if let Some(bytes) = Self::get(getter) { if !bytes.is_empty() { let mut slice = bytes.as_slice(); let (session, key_pair) = <(Session, KeyPair)>::decode(&mut slice).unwrap(); let mut block_before_queue_block = >::Id::default(); slice.read_exact(block_before_queue_block.as_mut()).unwrap(); assert!(slice.is_empty()); return Some((block_before_queue_block, session, key_pair)); } } None } pub fn set_pending_activation( txn: &mut impl DbTxn, block_before_queue_block: &>::Id, session: Session, key_pair: KeyPair, ) { let mut buf = (session, key_pair).encode(); buf.extend(block_before_queue_block.as_ref()); Self::set(txn, &buf); } } ================================================ FILE: processor/src/key_gen.rs ================================================ use std::collections::HashMap; use zeroize::Zeroizing; use rand_core::SeedableRng; use rand_chacha::ChaCha20Rng; use transcript::{Transcript, RecommendedTranscript}; use ciphersuite::group::GroupEncoding; use dkg_pedpop::*; use frost::{ curve::{Ciphersuite, Ristretto}, dkg::{Participant, ThresholdParams, ThresholdKeys}, }; use log::info; use serai_client::validator_sets::primitives::{Session, KeyPair}; use messages::key_gen::*; use crate::{Get, DbTxn, Db, create_db, networks::Network}; #[derive(Debug)] pub struct KeyConfirmed { pub substrate_keys: Vec>, pub network_keys: Vec>, } create_db!( KeyGenDb { ParamsDb: (session: &Session, attempt: u32) -> (ThresholdParams, u16), // Not scoped to the set since that'd have latter attempts overwrite former // A former attempt may become the finalized attempt, even if it doesn't in a timely manner // Overwriting its commitments would be accordingly poor CommitmentsDb: (key: &KeyGenId) -> HashMap>, GeneratedKeysDb: (session: &Session, substrate_key: &[u8; 32], network_key: &[u8]) -> Vec, // These do assume a key is only used once across sets, which holds true so long as a single // participant is honest in their execution of the protocol KeysDb: (network_key: &[u8]) -> Vec, SessionDb: (network_key: &[u8]) -> Session, NetworkKeyDb: (session: Session) -> Vec, } ); impl GeneratedKeysDb { #[allow(clippy::type_complexity)] fn read_keys( getter: &impl Get, key: &[u8], ) -> Option<(Vec, (Vec>, Vec>))> { let keys_vec = getter.get(key)?; let mut keys_ref: &[u8] = keys_vec.as_ref(); let mut substrate_keys = vec![]; let mut network_keys = vec![]; while !keys_ref.is_empty() { substrate_keys.push(ThresholdKeys::read(&mut keys_ref).unwrap()); let mut these_network_keys = ThresholdKeys::read(&mut keys_ref).unwrap(); N::tweak_keys(&mut these_network_keys); network_keys.push(these_network_keys); } Some((keys_vec, (substrate_keys, network_keys))) } fn save_keys( txn: &mut impl DbTxn, id: &KeyGenId, substrate_keys: &[ThresholdKeys], network_keys: &[ThresholdKeys], ) { let mut keys = Zeroizing::new(vec![]); for (substrate_keys, network_keys) in substrate_keys.iter().zip(network_keys) { keys.extend(substrate_keys.serialize().as_slice()); keys.extend(network_keys.serialize().as_slice()); } txn.put( Self::key( &id.session, &substrate_keys[0].group_key().to_bytes(), network_keys[0].group_key().to_bytes().as_ref(), ), keys, ); } } impl KeysDb { fn confirm_keys( txn: &mut impl DbTxn, session: Session, key_pair: &KeyPair, ) -> (Vec>, Vec>) { let (keys_vec, keys) = GeneratedKeysDb::read_keys::( txn, &GeneratedKeysDb::key(&session, &key_pair.0 .0, key_pair.1.as_ref()), ) .unwrap(); assert_eq!(key_pair.0 .0, keys.0[0].group_key().to_bytes()); assert_eq!( { let network_key: &[u8] = key_pair.1.as_ref(); network_key }, keys.1[0].group_key().to_bytes().as_ref(), ); txn.put(Self::key(key_pair.1.as_ref()), keys_vec); NetworkKeyDb::set(txn, session, &key_pair.1.clone().into_inner()); SessionDb::set(txn, key_pair.1.as_ref(), &session); keys } #[allow(clippy::type_complexity)] fn keys( getter: &impl Get, network_key: &::G, ) -> Option<(Session, (Vec>, Vec>))> { let res = GeneratedKeysDb::read_keys::(getter, &Self::key(network_key.to_bytes().as_ref()))?.1; assert_eq!(&res.1[0].group_key(), network_key); Some((SessionDb::get(getter, network_key.to_bytes().as_ref()).unwrap(), res)) } pub fn substrate_keys_by_session( getter: &impl Get, session: Session, ) -> Option>> { let network_key = NetworkKeyDb::get(getter, session)?; Some(GeneratedKeysDb::read_keys::(getter, &Self::key(&network_key))?.1 .0) } } type SecretShareMachines = Vec<(SecretShareMachine, SecretShareMachine<::Curve>)>; type KeyMachines = Vec<(KeyMachine, KeyMachine<::Curve>)>; #[derive(Debug)] pub struct KeyGen { db: D, entropy: Zeroizing<[u8; 32]>, active_commit: HashMap, Vec>)>, #[allow(clippy::type_complexity)] active_share: HashMap, Vec>>)>, } impl KeyGen { #[allow(clippy::new_ret_no_self)] pub fn new(db: D, entropy: Zeroizing<[u8; 32]>) -> KeyGen { KeyGen { db, entropy, active_commit: HashMap::new(), active_share: HashMap::new() } } pub fn in_set(&self, session: &Session) -> bool { // We determine if we're in set using if we have the parameters for a session's key generation // The usage of 0 for the attempt is valid so long as we aren't malicious and accordingly // aren't fatally slashed // TODO: Revisit once we do DKG removals for being offline ParamsDb::get(&self.db, session, 0).is_some() } #[allow(clippy::type_complexity)] pub fn keys( &self, key: &::G, ) -> Option<(Session, (Vec>, Vec>))> { // This is safe, despite not having a txn, since it's a static value // It doesn't change over time/in relation to other operations KeysDb::keys::(&self.db, key) } pub fn substrate_keys_by_session( &self, session: Session, ) -> Option>> { KeysDb::substrate_keys_by_session::(&self.db, session) } pub fn handle( &mut self, txn: &mut D::Transaction<'_>, msg: CoordinatorMessage, ) -> ProcessorMessage { const SUBSTRATE_KEY_CONTEXT: &str = "substrate"; const NETWORK_KEY_CONTEXT: &str = "network"; let context = |id: &KeyGenId, key| -> [u8; 32] { // TODO2: Also embed the chain ID/genesis block ::digest( format!( "Serai Key Gen. Session: {:?}, Network: {:?}, Attempt: {}, Key: {}", id.session, N::NETWORK, id.attempt, key, ) .as_bytes(), ) .into() }; let rng = |label, id: KeyGenId| { let mut transcript = RecommendedTranscript::new(label); transcript.append_message(b"entropy", &self.entropy); transcript.append_message(b"context", context(&id, "rng")); ChaCha20Rng::from_seed(transcript.rng_seed(b"rng")) }; let coefficients_rng = |id| rng(b"Key Gen Coefficients", id); let secret_shares_rng = |id| rng(b"Key Gen Secret Shares", id); let share_rng = |id| rng(b"Key Gen Share", id); let key_gen_machines = |id, params: ThresholdParams, shares| { let mut rng = coefficients_rng(id); let mut machines = vec![]; let mut commitments = vec![]; for s in 0 .. shares { let params = ThresholdParams::new( params.t(), params.n(), Participant::new(u16::from(params.i()) + s).unwrap(), ) .unwrap(); let substrate = KeyGenMachine::new(params, context(&id, SUBSTRATE_KEY_CONTEXT)) .generate_coefficients(&mut rng); let network = KeyGenMachine::new(params, context(&id, NETWORK_KEY_CONTEXT)) .generate_coefficients(&mut rng); machines.push((substrate.0, network.0)); let mut serialized = vec![]; substrate.1.write(&mut serialized).unwrap(); network.1.write(&mut serialized).unwrap(); commitments.push(serialized); } (machines, commitments) }; let secret_share_machines = |id, params: ThresholdParams, machines: SecretShareMachines, commitments: HashMap>| -> Result<_, ProcessorMessage> { let mut rng = secret_shares_rng(id); #[allow(clippy::type_complexity)] fn handle_machine( rng: &mut ChaCha20Rng, id: KeyGenId, machine: SecretShareMachine, commitments: HashMap>>, ) -> Result< (KeyMachine, HashMap>>), ProcessorMessage, > { match machine.generate_secret_shares(rng, commitments) { Ok(res) => Ok(res), Err(e) => match e { PedPoPError::InvalidCommitments(i) => { Err(ProcessorMessage::InvalidCommitments { id, faulty: i })? } _ => panic!("unknown error: {e:?}"), }, } } let mut substrate_commitments = HashMap::new(); let mut network_commitments = HashMap::new(); for i in 1 ..= params.n() { let i = Participant::new(i).unwrap(); let mut commitments = commitments[&i].as_slice(); substrate_commitments.insert( i, EncryptionKeyMessage::>::read(&mut commitments, params) .map_err(|_| ProcessorMessage::InvalidCommitments { id, faulty: i })?, ); network_commitments.insert( i, EncryptionKeyMessage::>::read(&mut commitments, params) .map_err(|_| ProcessorMessage::InvalidCommitments { id, faulty: i })?, ); if !commitments.is_empty() { // Malicious Participant included extra bytes in their commitments // (a potential DoS attack) Err(ProcessorMessage::InvalidCommitments { id, faulty: i })?; } } let mut key_machines = vec![]; let mut shares = vec![]; for (m, (substrate_machine, network_machine)) in machines.into_iter().enumerate() { let actual_i = Participant::new(u16::from(params.i()) + u16::try_from(m).unwrap()).unwrap(); let mut substrate_commitments = substrate_commitments.clone(); substrate_commitments.remove(&actual_i); let (substrate_machine, mut substrate_shares) = handle_machine::(&mut rng, id, substrate_machine, substrate_commitments)?; let mut network_commitments = network_commitments.clone(); network_commitments.remove(&actual_i); let (network_machine, network_shares) = handle_machine(&mut rng, id, network_machine, network_commitments.clone())?; key_machines.push((substrate_machine, network_machine)); let mut these_shares: HashMap<_, _> = substrate_shares.drain().map(|(i, share)| (i, share.serialize())).collect(); for (i, share) in &mut these_shares { share.extend(network_shares[i].serialize()); } shares.push(these_shares); } Ok((key_machines, shares)) }; match msg { CoordinatorMessage::GenerateKey { id, params, shares } => { info!("Generating new key. ID: {id:?} Params: {params:?} Shares: {shares}"); // Remove old attempts if self.active_commit.remove(&id.session).is_none() && self.active_share.remove(&id.session).is_none() { // If we haven't handled this session before, save the params ParamsDb::set(txn, &id.session, id.attempt, &(params, shares)); } let (machines, commitments) = key_gen_machines(id, params, shares); self.active_commit.insert(id.session, (machines, commitments.clone())); ProcessorMessage::Commitments { id, commitments } } CoordinatorMessage::Commitments { id, mut commitments } => { info!("Received commitments for {:?}", id); if self.active_share.contains_key(&id.session) { // We should've been told of a new attempt before receiving commitments again // The coordinator is either missing messages or repeating itself // Either way, it's faulty panic!("commitments when already handled commitments"); } let (params, share_quantity) = ParamsDb::get(txn, &id.session, id.attempt).unwrap(); // Unwrap the machines, rebuilding them if we didn't have them in our cache // We won't if the processor rebooted // This *may* be inconsistent if we receive a KeyGen for attempt x, then commitments for // attempt y // The coordinator is trusted to be proper in this regard let (prior, our_commitments) = self .active_commit .remove(&id.session) .unwrap_or_else(|| key_gen_machines(id, params, share_quantity)); for (i, our_commitments) in our_commitments.into_iter().enumerate() { assert!(commitments .insert( Participant::new(u16::from(params.i()) + u16::try_from(i).unwrap()).unwrap(), our_commitments, ) .is_none()); } CommitmentsDb::set(txn, &id, &commitments); match secret_share_machines(id, params, prior, commitments) { Ok((machines, shares)) => { self.active_share.insert(id.session, (machines, shares.clone())); ProcessorMessage::Shares { id, shares } } Err(e) => e, } } CoordinatorMessage::Shares { id, shares } => { info!("Received shares for {:?}", id); let (params, share_quantity) = ParamsDb::get(txn, &id.session, id.attempt).unwrap(); // Same commentary on inconsistency as above exists let (machines, our_shares) = self.active_share.remove(&id.session).unwrap_or_else(|| { let prior = key_gen_machines(id, params, share_quantity).0; let (machines, shares) = secret_share_machines(id, params, prior, CommitmentsDb::get(txn, &id).unwrap()) .expect("got Shares for a key gen which faulted"); (machines, shares) }); let mut rng = share_rng(id); fn handle_machine( rng: &mut ChaCha20Rng, id: KeyGenId, // These are the params of our first share, not this machine's shares params: ThresholdParams, m: usize, machine: KeyMachine, shares_ref: &mut HashMap, ) -> Result, ProcessorMessage> { let params = ThresholdParams::new( params.t(), params.n(), Participant::new(u16::from(params.i()) + u16::try_from(m).unwrap()).unwrap(), ) .unwrap(); // Parse the shares let mut shares = HashMap::new(); for i in 1 ..= params.n() { let i = Participant::new(i).unwrap(); let Some(share) = shares_ref.get_mut(&i) else { continue }; shares.insert( i, EncryptedMessage::>::read(share, params).map_err(|_| { ProcessorMessage::InvalidShare { id, accuser: params.i(), faulty: i, blame: None } })?, ); } Ok( (match machine.calculate_share(rng, shares) { Ok(res) => res, Err(e) => match e { PedPoPError::InvalidShare { participant, blame } => { Err(ProcessorMessage::InvalidShare { id, accuser: params.i(), faulty: participant, blame: Some(blame.map(|blame| blame.serialize())).flatten(), })? } _ => panic!("unknown error: {e:?}"), }, }) .complete(), ) } let mut substrate_keys = vec![]; let mut network_keys = vec![]; for (m, machines) in machines.into_iter().enumerate() { let mut shares_ref: HashMap = shares[m].iter().map(|(i, shares)| (*i, shares.as_ref())).collect(); for (i, our_shares) in our_shares.iter().enumerate() { if m != i { assert!(shares_ref .insert( Participant::new(u16::from(params.i()) + u16::try_from(i).unwrap()).unwrap(), our_shares [&Participant::new(u16::from(params.i()) + u16::try_from(m).unwrap()).unwrap()] .as_ref(), ) .is_none()); } } let these_substrate_keys = match handle_machine(&mut rng, id, params, m, machines.0, &mut shares_ref) { Ok(keys) => keys, Err(msg) => return msg, }; let mut these_network_keys = match handle_machine(&mut rng, id, params, m, machines.1, &mut shares_ref) { Ok(keys) => keys, Err(msg) => return msg, }; for i in 1 ..= params.n() { let i = Participant::new(i).unwrap(); let Some(shares) = shares_ref.get(&i) else { continue }; if !shares.is_empty() { return ProcessorMessage::InvalidShare { id, accuser: these_substrate_keys.params().i(), faulty: i, blame: None, }; } } N::tweak_keys(&mut these_network_keys); substrate_keys.push(these_substrate_keys); network_keys.push(these_network_keys); } let mut generated_substrate_key = None; let mut generated_network_key = None; for keys in substrate_keys.iter().zip(&network_keys) { if generated_substrate_key.is_none() { generated_substrate_key = Some(keys.0.group_key()); generated_network_key = Some(keys.1.group_key()); } else { assert_eq!(generated_substrate_key, Some(keys.0.group_key())); assert_eq!(generated_network_key, Some(keys.1.group_key())); } } GeneratedKeysDb::save_keys::(txn, &id, &substrate_keys, &network_keys); ProcessorMessage::GeneratedKeyPair { id, substrate_key: generated_substrate_key.unwrap().to_bytes(), // TODO: This can be made more efficient since tweaked keys may be a subset of keys network_key: generated_network_key.unwrap().to_bytes().as_ref().to_vec(), } } CoordinatorMessage::VerifyBlame { id, accuser, accused, share, blame } => { let params = ParamsDb::get(txn, &id.session, id.attempt).unwrap().0; let mut share_ref = share.as_slice(); let Ok(substrate_share) = EncryptedMessage::< Ristretto, SecretShare<::F>, >::read(&mut share_ref, params) else { return ProcessorMessage::Blame { id, participant: accused }; }; let Ok(network_share) = EncryptedMessage::< N::Curve, SecretShare<::F>, >::read(&mut share_ref, params) else { return ProcessorMessage::Blame { id, participant: accused }; }; if !share_ref.is_empty() { return ProcessorMessage::Blame { id, participant: accused }; } let mut substrate_commitment_msgs = HashMap::new(); let mut network_commitment_msgs = HashMap::new(); let commitments = CommitmentsDb::get(txn, &id).unwrap(); for (i, commitments) in commitments { let mut commitments = commitments.as_slice(); substrate_commitment_msgs .insert(i, EncryptionKeyMessage::<_, _>::read(&mut commitments, params).unwrap()); network_commitment_msgs .insert(i, EncryptionKeyMessage::<_, _>::read(&mut commitments, params).unwrap()); } // There is a mild DoS here where someone with a valid blame bloats it to the maximum size // Given the ambiguity, and limited potential to DoS (this being called means *someone* is // getting fatally slashed) voids the need to ensure blame is minimal let substrate_blame = blame.clone().and_then(|blame| EncryptionKeyProof::read(&mut blame.as_slice()).ok()); let network_blame = blame.clone().and_then(|blame| EncryptionKeyProof::read(&mut blame.as_slice()).ok()); let substrate_blame = AdditionalBlameMachine::new( context(&id, SUBSTRATE_KEY_CONTEXT), params.n(), substrate_commitment_msgs, ) .unwrap() .blame(accuser, accused, substrate_share, substrate_blame); let network_blame = AdditionalBlameMachine::new( context(&id, NETWORK_KEY_CONTEXT), params.n(), network_commitment_msgs, ) .unwrap() .blame(accuser, accused, network_share, network_blame); // If the accused was blamed for either, mark them as at fault if (substrate_blame == accused) || (network_blame == accused) { return ProcessorMessage::Blame { id, participant: accused }; } ProcessorMessage::Blame { id, participant: accuser } } } } // This should only be called if we're participating, hence taking our instance #[allow(clippy::unused_self)] pub fn confirm( &mut self, txn: &mut D::Transaction<'_>, session: Session, key_pair: &KeyPair, ) -> KeyConfirmed { info!( "Confirmed key pair {} {} for {:?}", hex::encode(key_pair.0), hex::encode(&key_pair.1), session, ); let (substrate_keys, network_keys) = KeysDb::confirm_keys::(txn, session, key_pair); KeyConfirmed { substrate_keys, network_keys } } } ================================================ FILE: processor/src/lib.rs ================================================ #![allow(dead_code)] mod plan; pub use plan::*; mod db; pub(crate) use db::*; mod key_gen; pub mod networks; pub(crate) mod multisigs; mod additional_key; pub use additional_key::additional_key; ================================================ FILE: processor/src/main.rs ================================================ use std::{time::Duration, collections::HashMap}; use zeroize::{Zeroize, Zeroizing}; use transcript::{Transcript, RecommendedTranscript}; use ciphersuite::{group::GroupEncoding, Ciphersuite}; use log::{info, warn}; use tokio::time::sleep; use serai_client::{ primitives::{BlockHash, ExternalNetworkId}, validator_sets::primitives::{Session, KeyPair}, }; use messages::{ coordinator::{ SubstrateSignableId, PlanMeta, CoordinatorMessage as CoordinatorCoordinatorMessage, }, CoordinatorMessage, }; use serai_env as env; use message_queue::{Service, client::MessageQueue}; mod plan; pub use plan::*; mod networks; use networks::{Block, Network}; #[cfg(feature = "bitcoin")] use networks::Bitcoin; #[cfg(feature = "ethereum")] use networks::Ethereum; #[cfg(feature = "monero")] use networks::Monero; mod additional_key; pub use additional_key::additional_key; mod db; pub use db::*; mod coordinator; pub use coordinator::*; mod key_gen; use key_gen::{SessionDb, KeyConfirmed, KeyGen}; mod signer; use signer::Signer; mod cosigner; use cosigner::Cosigner; mod batch_signer; use batch_signer::BatchSigner; mod slash_report_signer; use slash_report_signer::SlashReportSigner; mod multisigs; use multisigs::{MultisigEvent, MultisigManager}; #[cfg(test)] mod tests; #[global_allocator] static ALLOCATOR: zalloc::ZeroizingAlloc = zalloc::ZeroizingAlloc(std::alloc::System); // Items which are mutably borrowed by Tributary. // Any exceptions to this have to be carefully monitored in order to ensure consistency isn't // violated. struct TributaryMutable { // The following are actually mutably borrowed by Substrate as well. // - Substrate triggers key gens, and determines which to use. // - SubstrateBlock events cause scheduling which causes signing. // // This is still considered Tributary-mutable as most mutation (preprocesses/shares) happens by // the Tributary. // // Creation of tasks is by Substrate, yet this is safe since the mutable borrow is transferred to // Tributary. // // Tributary stops mutating a key gen attempt before Substrate is made aware of it, ensuring // Tributary drops its mutable borrow before Substrate acquires it. Tributary will maintain a // mutable borrow on the *key gen task*, yet the finalization code can successfully run for any // attempt. // // The only other note is how the scanner may cause a signer task to be dropped, effectively // invalidating the Tributary's mutable borrow. The signer is coded to allow for attempted usage // of a dropped task. key_gen: KeyGen, signers: HashMap>, // This is also mutably borrowed by the Scanner. // The Scanner starts new sign tasks. // The Tributary mutates already-created signed tasks, potentially completing them. // Substrate may mark tasks as completed, invalidating any existing mutable borrows. // The safety of this follows as written above. // There should only be one BatchSigner at a time (see #277) batch_signer: Option>, // Solely mutated by the tributary. cosigner: Option, slash_report_signer: Option, } // Items which are mutably borrowed by Substrate. // Any exceptions to this have to be carefully monitored in order to ensure consistency isn't // violated. /* The MultisigManager contains the Scanner and Schedulers. The scanner is expected to autonomously operate, scanning blocks as they appear. When a block is sufficiently confirmed, the scanner causes the Substrate signer to sign a batch. It itself only mutates its list of finalized blocks, to protect against re-orgs, and its in-memory state though. Disk mutations to the scan-state only happens once the relevant `Batch` is included on Substrate. It can't be mutated as soon as the `Batch` is signed as we need to know the order of `Batch`s relevant to `Burn`s. Schedulers take in new outputs, confirmed in `Batch`s, and outbound payments, triggered by `Burn`s. Substrate also decides when to move to a new multisig, hence why this entire object is Substate-mutable. Since MultisigManager should always be verifiable, and the Tributary is temporal, MultisigManager being entirely SubstrateMutable shows proper data pipe-lining. */ type SubstrateMutable = MultisigManager; async fn handle_coordinator_msg( txn: &mut D::Transaction<'_>, network: &N, coordinator: &mut Co, tributary_mutable: &mut TributaryMutable, substrate_mutable: &mut SubstrateMutable, msg: &Message, ) { // If this message expects a higher block number than we have, halt until synced async fn wait( txn: &D::Transaction<'_>, substrate_mutable: &SubstrateMutable, block_hash: &BlockHash, ) { let mut needed_hash = >::Id::default(); needed_hash.as_mut().copy_from_slice(&block_hash.0); loop { // Ensure our scanner has scanned this block, which means our daemon has this block at // a sufficient depth if substrate_mutable.block_number(txn, &needed_hash).await.is_none() { warn!( "node is desynced. we haven't scanned {} which should happen after {} confirms", hex::encode(&needed_hash), N::CONFIRMATIONS, ); sleep(Duration::from_secs(10)).await; continue; }; break; } // TODO2: Sanity check we got an AckBlock (or this is the AckBlock) for the block in question /* let synced = |context: &SubstrateContext, key| -> Result<(), ()> { // Check that we've synced this block and can actually operate on it ourselves let latest = scanner.latest_scanned(key); if usize::try_from(context.network_latest_finalized_block).unwrap() < latest { log::warn!( "external network node disconnected/desynced from rest of the network. \ our block: {latest:?}, network's acknowledged: {}", context.network_latest_finalized_block, ); Err(())?; } Ok(()) }; */ } if let Some(required) = msg.msg.required_block() { // wait only reads from, it doesn't mutate, substrate_mutable wait(txn, substrate_mutable, &required).await; } async fn activate_key( network: &N, substrate_mutable: &mut SubstrateMutable, tributary_mutable: &mut TributaryMutable, txn: &mut D::Transaction<'_>, session: Session, key_pair: KeyPair, activation_number: usize, ) { info!("activating {session:?}'s keys at {activation_number}"); let network_key = ::Curve::read_G::<&[u8]>(&mut key_pair.1.as_ref()) .expect("Substrate finalized invalid point as a network's key"); if tributary_mutable.key_gen.in_set(&session) { // See TributaryMutable's struct definition for why this block is safe let KeyConfirmed { substrate_keys, network_keys } = tributary_mutable.key_gen.confirm(txn, session, &key_pair); if session.0 == 0 { tributary_mutable.batch_signer = Some(BatchSigner::new(N::NETWORK, session, substrate_keys)); } tributary_mutable .signers .insert(session, Signer::new(network.clone(), session, network_keys)); } substrate_mutable.add_key(txn, activation_number, network_key).await; } match msg.msg.clone() { CoordinatorMessage::KeyGen(msg) => { coordinator.send(tributary_mutable.key_gen.handle(txn, msg)).await; } CoordinatorMessage::Sign(msg) => { if let Some(msg) = tributary_mutable .signers .get_mut(&msg.session()) .expect("coordinator told us to sign with a signer we don't have") .handle(txn, msg) .await { coordinator.send(msg).await; } } CoordinatorMessage::Coordinator(msg) => match msg { CoordinatorCoordinatorMessage::CosignSubstrateBlock { id, block_number } => { let SubstrateSignableId::CosigningSubstrateBlock(block) = id.id else { panic!("CosignSubstrateBlock id didn't have a CosigningSubstrateBlock") }; let Some(keys) = tributary_mutable.key_gen.substrate_keys_by_session(id.session) else { panic!("didn't have key shares for the key we were told to cosign with"); }; if let Some((cosigner, msg)) = Cosigner::new(txn, id.session, keys, block_number, block, id.attempt) { tributary_mutable.cosigner = Some(cosigner); coordinator.send(msg).await; } else { log::warn!("Cosigner::new returned None"); } } CoordinatorCoordinatorMessage::SignSlashReport { id, report } => { assert_eq!(id.id, SubstrateSignableId::SlashReport); let Some(keys) = tributary_mutable.key_gen.substrate_keys_by_session(id.session) else { panic!("didn't have key shares for the key we were told to perform a slash report with"); }; if let Some((slash_report_signer, msg)) = SlashReportSigner::new(txn, N::NETWORK, id.session, keys, report, id.attempt) { tributary_mutable.slash_report_signer = Some(slash_report_signer); coordinator.send(msg).await; } else { log::warn!("SlashReportSigner::new returned None"); } } _ => { let (is_cosign, is_batch, is_slash_report) = match msg { CoordinatorCoordinatorMessage::CosignSubstrateBlock { .. } | CoordinatorCoordinatorMessage::SignSlashReport { .. } => (false, false, false), CoordinatorCoordinatorMessage::SubstratePreprocesses { ref id, .. } | CoordinatorCoordinatorMessage::SubstrateShares { ref id, .. } => ( matches!(&id.id, SubstrateSignableId::CosigningSubstrateBlock(_)), matches!(&id.id, SubstrateSignableId::Batch(_)), matches!(&id.id, SubstrateSignableId::SlashReport), ), CoordinatorCoordinatorMessage::BatchReattempt { .. } => (false, true, false), }; if is_cosign { if let Some(cosigner) = tributary_mutable.cosigner.as_mut() { if let Some(msg) = cosigner.handle(txn, msg) { coordinator.send(msg).await; } } else { log::warn!( "received message for cosigner yet didn't have a cosigner. {}", "this is an error if we didn't reboot", ); } } else if is_batch { if let Some(msg) = tributary_mutable .batch_signer .as_mut() .expect( "coordinator told us to sign a batch when we don't currently have a Substrate signer", ) .handle(txn, msg) { coordinator.send(msg).await; } } else if is_slash_report { if let Some(slash_report_signer) = tributary_mutable.slash_report_signer.as_mut() { if let Some(msg) = slash_report_signer.handle(txn, msg) { coordinator.send(msg).await; } } else { log::warn!( "received message for slash report signer yet didn't have {}", "a slash report signer. this is an error if we didn't reboot", ); } } } }, CoordinatorMessage::Substrate(msg) => { match msg { messages::substrate::CoordinatorMessage::ConfirmKeyPair { context, session, key_pair } => { // This is the first key pair for this network so no block has been finalized yet // TODO: Write documentation for this in docs/ // TODO: Use an Option instead of a magic? if context.network_latest_finalized_block.0 == [0; 32] { assert!(tributary_mutable.signers.is_empty()); assert!(tributary_mutable.batch_signer.is_none()); assert!(tributary_mutable.cosigner.is_none()); // We can't check this as existing is no longer pub // assert!(substrate_mutable.existing.as_ref().is_none()); // Wait until a network's block's time exceeds Serai's time // These time calls are extremely expensive for what they do, yet they only run when // confirming the first key pair, before any network activity has occurred, so they // should be fine // If the latest block number is 10, then the block indexed by 1 has 10 confirms // 10 + 1 - 10 = 1 let mut block_i; while { block_i = (network.get_latest_block_number_with_retries().await + 1) .saturating_sub(N::CONFIRMATIONS); network.get_block_with_retries(block_i).await.time(network).await < context.serai_time } { info!( "serai confirmed the first key pair for a set. {} {}", "we're waiting for a network's finalized block's time to exceed unix time ", context.serai_time, ); sleep(Duration::from_secs(5)).await; } // Find the first block to do so let mut earliest = block_i; // earliest > 0 prevents a panic if Serai creates keys before the genesis block // which... should be impossible // Yet a prevented panic is a prevented panic while (earliest > 0) && (network.get_block_with_retries(earliest - 1).await.time(network).await >= context.serai_time) { earliest -= 1; } // Use this as the activation block let activation_number = earliest; activate_key( network, substrate_mutable, tributary_mutable, txn, session, key_pair, activation_number, ) .await; } else { let mut block_before_queue_block = >::Id::default(); block_before_queue_block .as_mut() .copy_from_slice(&context.network_latest_finalized_block.0); // We can't set these keys for activation until we know their queue block, which we // won't until the next Batch is confirmed // Set this variable so when we get the next Batch event, we can handle it PendingActivationsDb::set_pending_activation::( txn, &block_before_queue_block, session, key_pair, ); } } messages::substrate::CoordinatorMessage::SubstrateBlock { context, block: substrate_block, burns, batches, } => { if let Some((block, session, key_pair)) = PendingActivationsDb::pending_activation::(txn) { // Only run if this is a Batch belonging to a distinct block if context.network_latest_finalized_block.as_ref() != block.as_ref() { let mut queue_block = >::Id::default(); queue_block.as_mut().copy_from_slice(context.network_latest_finalized_block.as_ref()); let activation_number = substrate_mutable .block_number(txn, &queue_block) .await .expect("KeyConfirmed from context we haven't synced") + N::CONFIRMATIONS; activate_key( network, substrate_mutable, tributary_mutable, txn, session, key_pair, activation_number, ) .await; //clear pending activation txn.del(PendingActivationsDb::key()); } } // Since this block was acknowledged, we no longer have to sign the batches within it if let Some(batch_signer) = tributary_mutable.batch_signer.as_mut() { for batch_id in batches { batch_signer.batch_signed(txn, batch_id); } } let (acquired_lock, to_sign) = substrate_mutable.substrate_block(txn, network, context, burns).await; // Send SubstrateBlockAck, with relevant plan IDs, before we trigger the signing of these // plans if !tributary_mutable.signers.is_empty() { coordinator .send(messages::coordinator::ProcessorMessage::SubstrateBlockAck { block: substrate_block, plans: to_sign .iter() .filter_map(|signable| { SessionDb::get(txn, signable.0.to_bytes().as_ref()) .map(|session| PlanMeta { session, id: signable.1 }) }) .collect(), }) .await; } // See commentary in TributaryMutable for why this is safe let signers = &mut tributary_mutable.signers; for (key, id, tx, eventuality) in to_sign { if let Some(session) = SessionDb::get(txn, key.to_bytes().as_ref()) { let signer = signers.get_mut(&session).unwrap(); if let Some(msg) = signer.sign_transaction(txn, id, tx, &eventuality).await { coordinator.send(msg).await; } } } // This is not premature, even if this block had multiple `Batch`s created, as the first // `Batch` alone will trigger all Plans/Eventualities/Signs if acquired_lock { substrate_mutable.release_scanner_lock().await; } } } } } } async fn boot( raw_db: &mut D, network: &N, coordinator: &mut Co, ) -> (D, TributaryMutable, SubstrateMutable) { let mut entropy_transcript = { let entropy = Zeroizing::new(env::var("ENTROPY").expect("entropy wasn't specified")); if entropy.len() != 64 { panic!("entropy isn't the right length"); } let mut bytes = Zeroizing::new(hex::decode(entropy).map_err(|_| ()).expect("entropy wasn't hex-formatted")); if bytes.len() != 32 { bytes.zeroize(); panic!("entropy wasn't 32 bytes"); } let mut entropy = Zeroizing::new([0; 32]); let entropy_mut: &mut [u8] = entropy.as_mut(); entropy_mut.copy_from_slice(bytes.as_ref()); let mut transcript = RecommendedTranscript::new(b"Serai Processor Entropy"); transcript.append_message(b"entropy", entropy); transcript }; // TODO: Save a hash of the entropy to the DB and make sure the entropy didn't change let mut entropy = |label| { let mut challenge = entropy_transcript.challenge(label); let mut res = Zeroizing::new([0; 32]); let res_mut: &mut [u8] = res.as_mut(); res_mut.copy_from_slice(&challenge[.. 32]); challenge.zeroize(); res }; // We don't need to re-issue GenerateKey orders because the coordinator is expected to // schedule/notify us of new attempts // TODO: Is this above comment still true? Not at all due to the planned lack of DKG timeouts? let key_gen = KeyGen::::new(raw_db.clone(), entropy(b"key-gen_entropy")); let (multisig_manager, current_keys, actively_signing) = MultisigManager::new(raw_db, network).await; let mut batch_signer = None; let mut signers = HashMap::new(); for (i, key) in current_keys.iter().enumerate() { let Some((session, (substrate_keys, network_keys))) = key_gen.keys(key) else { continue }; let network_key = network_keys[0].group_key(); // If this is the oldest key, load the BatchSigner for it as the active BatchSigner // The new key only takes responsibility once the old key is fully deprecated // // We don't have to load any state for this since the Scanner will re-fire any events // necessary, only no longer scanning old blocks once Substrate acks them if i == 0 { batch_signer = Some(BatchSigner::new(N::NETWORK, session, substrate_keys)); } // The Scanner re-fires events as needed for batch_signer yet not signer // This is due to the transactions which we start signing from due to a block not being // guaranteed to be signed before we stop scanning the block on reboot // We could simplify the Signer flow by delaying when it acks a block, yet that'd: // 1) Increase the startup time // 2) Cause re-emission of Batch events, which we'd need to check the safety of // (TODO: Do anyways?) // 3) Violate the attempt counter (TODO: Is this already being violated?) let mut signer = Signer::new(network.clone(), session, network_keys); // Sign any TXs being actively signed for (plan, tx, eventuality) in &actively_signing { if plan.key == network_key { let mut txn = raw_db.txn(); if let Some(msg) = signer.sign_transaction(&mut txn, plan.id(), tx.clone(), eventuality).await { coordinator.send(msg).await; } // This should only have re-writes of existing data drop(txn); } } signers.insert(session, signer); } // Spawn a task to rebroadcast signed TXs yet to be mined into a finalized block // This hedges against being dropped due to full mempools, temporarily too low of a fee... tokio::spawn(Signer::::rebroadcast_task(raw_db.clone(), network.clone())); ( raw_db.clone(), TributaryMutable { key_gen, batch_signer, cosigner: None, slash_report_signer: None, signers }, multisig_manager, ) } #[allow(clippy::await_holding_lock)] // Needed for txn, unfortunately can't be down-scoped async fn run(mut raw_db: D, network: N, mut coordinator: Co) { // We currently expect a contextless bidirectional mapping between these two values // (which is that any value of A can be interpreted as B and vice versa) // While we can write a contextual mapping, we have yet to do so // This check ensures no network which doesn't have a bidirectional mapping is defined assert_eq!(>::Id::default().as_ref().len(), BlockHash([0u8; 32]).0.len()); let (main_db, mut tributary_mutable, mut substrate_mutable) = boot(&mut raw_db, &network, &mut coordinator).await; // We can't load this from the DB as we can't guarantee atomic increments with the ack function // TODO: Load with a slight tolerance let mut last_coordinator_msg = None; loop { let mut txn = raw_db.txn(); log::trace!("new db txn in run"); let mut outer_msg = None; tokio::select! { // This blocks the entire processor until it finishes handling this message // KeyGen specifically may take a notable amount of processing time // While that shouldn't be an issue in practice, as after processing an attempt it'll handle // the other messages in the queue, it may be beneficial to parallelize these // They could potentially be parallelized by type (KeyGen, Sign, Substrate) without issue msg = coordinator.recv() => { if let Some(last_coordinator_msg) = last_coordinator_msg { assert_eq!(msg.id, last_coordinator_msg + 1); } last_coordinator_msg = Some(msg.id); // Only handle this if we haven't already if HandledMessageDb::get(&main_db, msg.id).is_none() { HandledMessageDb::set(&mut txn, msg.id, &()); // This is isolated to better think about how its ordered, or rather, about how the other // cases aren't ordered // // While the coordinator messages are ordered, they're not deterministically ordered // Tributary-caused messages are deterministically ordered, and Substrate-caused messages // are deterministically-ordered, yet they're both shoved into a singular queue // The order at which they're shoved in together isn't deterministic // // This is safe so long as Tributary and Substrate messages don't both expect mutable // references over the same data handle_coordinator_msg( &mut txn, &network, &mut coordinator, &mut tributary_mutable, &mut substrate_mutable, &msg, ).await; } outer_msg = Some(msg); }, scanner_event = substrate_mutable.next_scanner_event() => { let msg = substrate_mutable.scanner_event_to_multisig_event( &mut txn, &network, scanner_event ).await; match msg { MultisigEvent::Batches(retired_key_new_key, batches) => { // Start signing this batch for batch in batches { info!("created batch {} ({} instructions)", batch.id, batch.instructions.len()); // The coordinator expects BatchPreprocess to immediately follow Batch coordinator.send( messages::substrate::ProcessorMessage::Batch { batch: batch.clone() } ).await; if let Some(batch_signer) = tributary_mutable.batch_signer.as_mut() { if let Some(msg) = batch_signer.sign(&mut txn, batch) { coordinator.send(msg).await; } } } if let Some((retired_key, new_key)) = retired_key_new_key { // Safe to mutate since all signing operations are done and no more will be added if let Some(retired_session) = SessionDb::get(&txn, retired_key.to_bytes().as_ref()) { tributary_mutable.signers.remove(&retired_session); } tributary_mutable.batch_signer.take(); let keys = tributary_mutable.key_gen.keys(&new_key); if let Some((session, (substrate_keys, _))) = keys { tributary_mutable.batch_signer = Some(BatchSigner::new(N::NETWORK, session, substrate_keys)); } } }, MultisigEvent::Completed(key, id, tx) => { if let Some(session) = SessionDb::get(&txn, &key) { let signer = tributary_mutable.signers.get_mut(&session).unwrap(); if let Some(msg) = signer.completed(&mut txn, id, &tx) { coordinator.send(msg).await; } } } } }, } txn.commit(); if let Some(msg) = outer_msg { coordinator.ack(msg).await; } } } #[tokio::main] async fn main() { // Override the panic handler with one which will panic if any tokio task panics { let existing = std::panic::take_hook(); std::panic::set_hook(Box::new(move |panic| { existing(panic); const MSG: &str = "exiting the process due to a task panicking"; println!("{MSG}"); log::error!("{MSG}"); std::process::exit(1); })); } if std::env::var("RUST_LOG").is_err() { std::env::set_var("RUST_LOG", serai_env::var("RUST_LOG").unwrap_or_else(|| "info".to_string())); } env_logger::init(); #[allow(unused_variables, unreachable_code)] let db = { #[cfg(all(feature = "parity-db", feature = "rocksdb"))] panic!("built with parity-db and rocksdb"); #[cfg(all(feature = "parity-db", not(feature = "rocksdb")))] let db = serai_db::new_parity_db(&serai_env::var("DB_PATH").expect("path to DB wasn't specified")); #[cfg(feature = "rocksdb")] let db = serai_db::new_rocksdb(&serai_env::var("DB_PATH").expect("path to DB wasn't specified")); db }; // Network configuration let url = { let login = env::var("NETWORK_RPC_LOGIN").expect("network RPC login wasn't specified"); let hostname = env::var("NETWORK_RPC_HOSTNAME").expect("network RPC hostname wasn't specified"); let port = env::var("NETWORK_RPC_PORT").expect("network port domain wasn't specified"); "http://".to_string() + &login + "@" + &hostname + ":" + &port }; let network_id = match env::var("NETWORK").expect("network wasn't specified").as_str() { "bitcoin" => ExternalNetworkId::Bitcoin, "ethereum" => ExternalNetworkId::Ethereum, "monero" => ExternalNetworkId::Monero, _ => panic!("unrecognized network"), }; let coordinator = MessageQueue::from_env(Service::Processor(network_id)); // This allow is necessary since each configuration deletes the other networks from the following // match arms. So we match all cases but since all cases already there according to the compiler // we put this to allow clippy to get pass this. #[allow(unreachable_patterns)] match network_id { #[cfg(feature = "bitcoin")] ExternalNetworkId::Bitcoin => run(db, Bitcoin::new(url).await, coordinator).await, #[cfg(feature = "ethereum")] ExternalNetworkId::Ethereum => { let relayer_hostname = env::var("ETHEREUM_RELAYER_HOSTNAME") .expect("ethereum relayer hostname wasn't specified") .clone(); let relayer_port = env::var("ETHEREUM_RELAYER_PORT").expect("ethereum relayer port wasn't specified"); let relayer_url = relayer_hostname + ":" + &relayer_port; run(db.clone(), Ethereum::new(db, url, relayer_url).await, coordinator).await } #[cfg(feature = "monero")] ExternalNetworkId::Monero => run(db, Monero::new(url).await, coordinator).await, _ => panic!("spawning a processor for an unsupported network"), } } ================================================ FILE: processor/src/multisigs/db.rs ================================================ use std::io; use ciphersuite::Ciphersuite; pub use serai_db::*; use scale::{Encode, Decode}; #[rustfmt::skip] use serai_client::{ in_instructions::primitives::InInstructionWithBalance, primitives::ExternalBalance }; use crate::{ Get, Plan, networks::{Output, Transaction, Network}, }; #[derive(Clone, PartialEq, Eq, Debug)] pub enum PlanFromScanning { Refund(N::Output, N::Address), Forward(N::Output), } impl PlanFromScanning { fn read(reader: &mut R) -> io::Result { let mut kind = [0xff]; reader.read_exact(&mut kind)?; match kind[0] { 0 => { let output = N::Output::read(reader)?; let mut address_vec_len = [0; 4]; reader.read_exact(&mut address_vec_len)?; let mut address_vec = vec![0; usize::try_from(u32::from_le_bytes(address_vec_len)).unwrap()]; reader.read_exact(&mut address_vec)?; let address = N::Address::try_from(address_vec).map_err(|_| "invalid address saved to disk").unwrap(); Ok(PlanFromScanning::Refund(output, address)) } 1 => { let output = N::Output::read(reader)?; Ok(PlanFromScanning::Forward(output)) } _ => panic!("reading unrecognized PlanFromScanning"), } } fn write(&self, writer: &mut W) -> io::Result<()> { match self { PlanFromScanning::Refund(output, address) => { writer.write_all(&[0])?; output.write(writer)?; let address_vec: Vec = address.clone().try_into().map_err(|_| "invalid address being refunded to").unwrap(); writer.write_all(&u32::try_from(address_vec.len()).unwrap().to_le_bytes())?; writer.write_all(&address_vec) } PlanFromScanning::Forward(output) => { writer.write_all(&[1])?; output.write(writer) } } } } create_db!( MultisigsDb { NextBatchDb: () -> u32, PlanDb: (id: &[u8]) -> Vec, PlansFromScanningDb: (block_number: u64) -> Vec, OperatingCostsDb: () -> u64, ResolvedDb: (tx: &[u8]) -> [u8; 32], SigningDb: (key: &[u8]) -> Vec, ForwardedOutputDb: (balance: ExternalBalance) -> Vec, DelayedOutputDb: () -> Vec } ); impl PlanDb { pub fn save_active_plan( txn: &mut impl DbTxn, key: &[u8], block_number: usize, plan: &Plan, operating_costs_at_time: u64, ) { let id = plan.id(); { let mut signing = SigningDb::get(txn, key).unwrap_or_default(); // If we've already noted we're signing this, return assert_eq!(signing.len() % 32, 0); for i in 0 .. (signing.len() / 32) { if signing[(i * 32) .. ((i + 1) * 32)] == id { return; } } signing.extend(&id); SigningDb::set(txn, key, &signing); } { let mut buf = block_number.to_le_bytes().to_vec(); plan.write(&mut buf).unwrap(); buf.extend(&operating_costs_at_time.to_le_bytes()); Self::set(txn, &id, &buf); } } pub fn active_plans(getter: &impl Get, key: &[u8]) -> Vec<(u64, Plan, u64)> { let signing = SigningDb::get(getter, key).unwrap_or_default(); let mut res = vec![]; assert_eq!(signing.len() % 32, 0); for i in 0 .. (signing.len() / 32) { let id = &signing[(i * 32) .. ((i + 1) * 32)]; let buf = Self::get(getter, id).unwrap(); let block_number = u64::from_le_bytes(buf[.. 8].try_into().unwrap()); let plan = Plan::::read::<&[u8]>(&mut &buf[8 ..]).unwrap(); assert_eq!(id, &plan.id()); let operating_costs = u64::from_le_bytes(buf[(buf.len() - 8) ..].try_into().unwrap()); res.push((block_number, plan, operating_costs)); } res } pub fn plan_by_key_with_self_change( getter: &impl Get, key: ::G, id: [u8; 32], ) -> bool { let plan = Plan::::read::<&[u8]>(&mut &Self::get(getter, &id).unwrap()[8 ..]).unwrap(); assert_eq!(plan.id(), id); if let Some(change) = N::change_address(plan.key) { (key == plan.key) && (Some(change) == plan.change) } else { false } } } impl OperatingCostsDb { pub fn take_operating_costs(txn: &mut impl DbTxn) -> u64 { let existing = Self::get(txn).unwrap_or_default(); txn.del(Self::key()); existing } pub fn set_operating_costs(txn: &mut impl DbTxn, amount: u64) { if amount != 0 { Self::set(txn, &amount); } } } impl ResolvedDb { pub fn resolve_plan( txn: &mut impl DbTxn, key: &[u8], plan: [u8; 32], resolution: &>::Id, ) { let mut signing = SigningDb::get(txn, key).unwrap_or_default(); assert_eq!(signing.len() % 32, 0); let mut found = false; for i in 0 .. (signing.len() / 32) { let start = i * 32; let end = i + 32; if signing[start .. end] == plan { found = true; signing = [&signing[.. start], &signing[end ..]].concat(); break; } } if !found { log::warn!("told to finish signing {} yet wasn't actively signing it", hex::encode(plan)); } SigningDb::set(txn, key, &signing); Self::set(txn, resolution.as_ref(), &plan); } } impl PlansFromScanningDb { pub fn set_plans_from_scanning( txn: &mut impl DbTxn, block_number: usize, plans: Vec>, ) { let mut buf = vec![]; for plan in plans { plan.write(&mut buf).unwrap(); } Self::set(txn, block_number.try_into().unwrap(), &buf); } pub fn take_plans_from_scanning( txn: &mut impl DbTxn, block_number: usize, ) -> Option>> { let block_number = u64::try_from(block_number).unwrap(); let res = Self::get(txn, block_number).map(|plans| { let mut plans_ref = plans.as_slice(); let mut res = vec![]; while !plans_ref.is_empty() { res.push(PlanFromScanning::::read(&mut plans_ref).unwrap()); } res }); if res.is_some() { txn.del(Self::key(block_number)); } res } } impl ForwardedOutputDb { pub fn save_forwarded_output(txn: &mut impl DbTxn, instruction: &InInstructionWithBalance) { let mut existing = Self::get(txn, instruction.balance).unwrap_or_default(); existing.extend(instruction.encode()); Self::set(txn, instruction.balance, &existing); } pub fn take_forwarded_output( txn: &mut impl DbTxn, balance: ExternalBalance, ) -> Option { let outputs = Self::get(txn, balance)?; let mut outputs_ref = outputs.as_slice(); let res = InInstructionWithBalance::decode(&mut outputs_ref).unwrap(); assert!(outputs_ref.len() < outputs.len()); if outputs_ref.is_empty() { txn.del(Self::key(balance)); } else { Self::set(txn, balance, &outputs); } Some(res) } } impl DelayedOutputDb { pub fn save_delayed_output(txn: &mut impl DbTxn, instruction: &InInstructionWithBalance) { let mut existing = Self::get(txn).unwrap_or_default(); existing.extend(instruction.encode()); Self::set(txn, &existing); } pub fn take_delayed_outputs(txn: &mut impl DbTxn) -> Vec { let Some(outputs) = Self::get(txn) else { return vec![] }; txn.del(Self::key()); let mut outputs_ref = outputs.as_slice(); let mut res = vec![]; while !outputs_ref.is_empty() { res.push(InInstructionWithBalance::decode(&mut outputs_ref).unwrap()); } res } } ================================================ FILE: processor/src/multisigs/mod.rs ================================================ use core::time::Duration; use std::collections::HashSet; use ciphersuite::{group::GroupEncoding, Ciphersuite}; use scale::{Encode, Decode}; use messages::SubstrateContext; use serai_client::{ primitives::{MAX_DATA_LEN, ExternalAddress, BlockHash, Data}, in_instructions::primitives::{ InInstructionWithBalance, Batch, RefundableInInstruction, Shorthand, MAX_BATCH_SIZE, }, coins::primitives::{OutInstruction, OutInstructionWithBalance}, }; use log::{info, error}; use tokio::time::sleep; #[cfg(not(test))] mod scanner; #[cfg(test)] pub mod scanner; use scanner::{ScannerEvent, ScannerHandle, Scanner}; mod db; use db::*; pub(crate) mod scheduler; use scheduler::Scheduler; use crate::{ Get, Db, Payment, Plan, networks::{OutputType, Output, SignableTransaction, Eventuality, Block, PreparedSend, Network}, }; // InInstructionWithBalance from an external output fn instruction_from_output( output: &N::Output, ) -> (Option, Option) { assert_eq!(output.kind(), OutputType::External); let presumed_origin = output.presumed_origin().map(|address| { ExternalAddress::new( address .try_into() .map_err(|_| ()) .expect("presumed origin couldn't be converted to a Vec"), ) .expect("presumed origin exceeded address limits") }); let mut data = output.data(); let max_data_len = usize::try_from(MAX_DATA_LEN).unwrap(); if data.len() > max_data_len { error!( "data in output {} exceeded MAX_DATA_LEN ({MAX_DATA_LEN}): {}. skipping", hex::encode(output.id()), data.len(), ); return (presumed_origin, None); } let shorthand = match Shorthand::decode(&mut data) { Ok(shorthand) => shorthand, Err(e) => { info!("data in output {} wasn't valid shorthand: {e:?}", hex::encode(output.id())); return (presumed_origin, None); } }; let instruction = match RefundableInInstruction::try_from(shorthand) { Ok(instruction) => instruction, Err(e) => { info!( "shorthand in output {} wasn't convertible to a RefundableInInstruction: {e:?}", hex::encode(output.id()) ); return (presumed_origin, None); } }; let mut balance = output.balance(); // Deduct twice the cost to aggregate to prevent economic attacks by malicious miners against // other users balance.amount.0 -= 2 * N::COST_TO_AGGREGATE; ( instruction.origin.or(presumed_origin), Some(InInstructionWithBalance { instruction: instruction.instruction, balance }), ) } #[derive(Clone, Copy, PartialEq, Eq, Debug)] enum RotationStep { // Use the existing multisig for all actions (steps 1-3) UseExisting, // Use the new multisig as change (step 4) NewAsChange, // The existing multisig is expected to solely forward transactions at this point (step 5) ForwardFromExisting, // The existing multisig is expected to finish its own transactions and do nothing more // (step 6) ClosingExisting, } // This explicitly shouldn't take the database as we prepare Plans we won't execute for fee // estimates async fn prepare_send( network: &N, block_number: usize, plan: Plan, operating_costs: u64, ) -> PreparedSend { loop { match network.prepare_send(block_number, plan.clone(), operating_costs).await { Ok(prepared) => { return prepared; } Err(e) => { error!("couldn't prepare a send for plan {}: {e}", hex::encode(plan.id())); // The processor is either trying to create an invalid TX (fatal) or the node went // offline // The former requires a patch, the latter is a connection issue // If the latter, this is an appropriate sleep. If the former, we should panic, yet // this won't flood the console ad infinitum sleep(Duration::from_secs(60)).await; } } } } pub struct MultisigViewer { activation_block: usize, key: ::G, scheduler: N::Scheduler, } #[allow(clippy::type_complexity)] #[derive(Clone, Debug)] pub enum MultisigEvent { // Batches to publish Batches(Option<(::G, ::G)>, Vec), // Eventuality completion found on-chain Completed(Vec, [u8; 32], ::Completion), } pub struct MultisigManager { scanner: ScannerHandle, existing: Option>, new: Option>, } impl MultisigManager { pub async fn new( raw_db: &D, network: &N, ) -> ( Self, Vec<::G>, Vec<(Plan, N::SignableTransaction, N::Eventuality)>, ) { // The scanner has no long-standing orders to re-issue let (mut scanner, current_keys) = Scanner::new(network.clone(), raw_db.clone()); let mut schedulers = vec![]; assert!(current_keys.len() <= 2); let mut actively_signing = vec![]; for (_, key) in ¤t_keys { schedulers.push(N::Scheduler::from_db(raw_db, *key, N::NETWORK).unwrap()); // Load any TXs being actively signed let key = key.to_bytes(); for (block_number, plan, operating_costs) in PlanDb::active_plans::(raw_db, key.as_ref()) { let block_number = block_number.try_into().unwrap(); let id = plan.id(); info!("reloading plan {}: {:?}", hex::encode(id), plan); let key_bytes = plan.key.to_bytes(); let Some((tx, eventuality)) = prepare_send(network, block_number, plan.clone(), operating_costs).await.tx else { panic!("previously created transaction is no longer being created") }; scanner .register_eventuality(key_bytes.as_ref(), block_number, id, eventuality.clone()) .await; actively_signing.push((plan, tx, eventuality)); } } ( MultisigManager { scanner, existing: current_keys.first().copied().map(|(activation_block, key)| MultisigViewer { activation_block, key, scheduler: schedulers.remove(0), }), new: current_keys.get(1).copied().map(|(activation_block, key)| MultisigViewer { activation_block, key, scheduler: schedulers.remove(0), }), }, current_keys.into_iter().map(|(_, key)| key).collect(), actively_signing, ) } /// Returns the block number for a block hash, if it's known and all keys have scanned the block. // This is guaranteed to atomically increment so long as no new keys are added to the scanner // which activate at a block before the currently highest scanned block. This is prevented by // the processor waiting for `Batch` inclusion before scanning too far ahead, and activation only // happening after the "too far ahead" window. pub async fn block_number( &self, getter: &G, hash: &>::Id, ) -> Option { let latest = ScannerHandle::::block_number(getter, hash)?; // While the scanner has cemented this block, that doesn't mean it's been scanned for all // keys // ram_scanned will return the lowest scanned block number out of all keys if latest > self.scanner.ram_scanned().await { return None; } Some(latest) } pub async fn add_key( &mut self, txn: &mut D::Transaction<'_>, activation_block: usize, external_key: ::G, ) { self.scanner.register_key(txn, activation_block, external_key).await; let viewer = Some(MultisigViewer { activation_block, key: external_key, scheduler: N::Scheduler::new::(txn, external_key, N::NETWORK), }); if self.existing.is_none() { self.existing = viewer; return; } self.new = viewer; } fn current_rotation_step(&self, block_number: usize) -> RotationStep { let Some(new) = self.new.as_ref() else { return RotationStep::UseExisting }; // Period numbering here has no meaning other than these are the time values useful here, and // the order they're calculated in. They have no reference/shared marker with anything else // ESTIMATED_BLOCK_TIME_IN_SECONDS is fine to use here. While inaccurate, it shouldn't be // drastically off, and even if it is, it's a hiccup to latency handling only possible when // rotating. The error rate wouldn't be acceptable if it was allowed to accumulate over time, // yet rotation occurs on Serai's clock, disconnecting any errors here from any prior. // N::CONFIRMATIONS + 10 minutes let period_1_start = new.activation_block + N::CONFIRMATIONS + (10usize * 60).div_ceil(N::ESTIMATED_BLOCK_TIME_IN_SECONDS); // N::CONFIRMATIONS let period_2_start = period_1_start + N::CONFIRMATIONS; // 6 hours after period 2 // Also ensure 6 hours is greater than the amount of CONFIRMATIONS, for sanity purposes let period_3_start = period_2_start + ((6 * 60 * 60) / N::ESTIMATED_BLOCK_TIME_IN_SECONDS).max(N::CONFIRMATIONS); if block_number < period_1_start { RotationStep::UseExisting } else if block_number < period_2_start { RotationStep::NewAsChange } else if block_number < period_3_start { RotationStep::ForwardFromExisting } else { RotationStep::ClosingExisting } } // Convert new Burns to Payments. // // Also moves payments from the old Scheduler to the new multisig if the step calls for it. fn burns_to_payments( &mut self, txn: &mut D::Transaction<'_>, step: RotationStep, burns: Vec, ) -> (Vec>, Vec>) { let mut payments = vec![]; for out in burns { let OutInstructionWithBalance { instruction: OutInstruction { address, data }, balance } = out; assert_eq!(balance.coin.network(), N::NETWORK); if let Ok(address) = N::Address::try_from(address.consume()) { payments.push(Payment { address, data: data.map(Data::consume), balance }); } } let payments = payments; match step { RotationStep::UseExisting | RotationStep::NewAsChange => (payments, vec![]), RotationStep::ForwardFromExisting | RotationStep::ClosingExisting => { // Consume any payments the prior scheduler was unable to complete // This should only actually matter once let mut new_payments = self.existing.as_mut().unwrap().scheduler.consume_payments::(txn); // Add the new payments new_payments.extend(payments); (vec![], new_payments) } } } fn split_outputs_by_key(&self, outputs: Vec) -> (Vec, Vec) { let mut existing_outputs = Vec::with_capacity(outputs.len()); let mut new_outputs = vec![]; let existing_key = self.existing.as_ref().unwrap().key; let new_key = self.new.as_ref().map(|new| new.key); for output in outputs { if output.key() == existing_key { existing_outputs.push(output); } else { assert_eq!(Some(output.key()), new_key); new_outputs.push(output); } } (existing_outputs, new_outputs) } fn refund_plan( scheduler: &mut N::Scheduler, txn: &mut D::Transaction<'_>, output: N::Output, refund_to: N::Address, ) -> Plan { log::info!("creating refund plan for {}", hex::encode(output.id())); assert_eq!(output.kind(), OutputType::External); scheduler.refund_plan::(txn, output, refund_to) } // Returns the plan for forwarding if one is needed. // Returns None if one is not needed to forward this output. fn forward_plan(&mut self, txn: &mut D::Transaction<'_>, output: &N::Output) -> Option> { log::info!("creating forwarding plan for {}", hex::encode(output.id())); let res = self.existing.as_mut().unwrap().scheduler.forward_plan::( txn, output.clone(), self.new.as_ref().expect("forwarding plan yet no new multisig").key, ); if res.is_none() { log::info!("no forwarding plan was necessary for {}", hex::encode(output.id())); } res } // Filter newly received outputs due to the step being RotationStep::ClosingExisting. // // Returns the Plans for the `Branch`s which should be created off outputs which passed the // filter. fn filter_outputs_due_to_closing( &mut self, txn: &mut D::Transaction<'_>, existing_outputs: &mut Vec, ) -> Vec> { /* The document says to only handle outputs we created. We don't know what outputs we created. We do have an ordered view of equivalent outputs however, and can assume the first (and likely only) ones are the ones we created. Accordingly, only handling outputs we created should be definable as only handling outputs from the resolution of Eventualities. This isn't feasible. It requires knowing what Eventualities were completed in this block, when we handle this block, which we don't know without fully serialized scanning + Batch publication. Take the following scenario: 1) A network uses 10 confirmations. Block x is scanned, meaning x+9a exists. 2) 67% of nodes process x, create, sign, and publish a TX, creating an Eventuality. 3) A reorganization to a shorter chain occurs, including the published TX in x+1b. 4) The 33% of nodes which are latent will be allowed to scan x+1b as soon as x+10b exists. They won't wait for Serai to include the Batch for x until they try to scan x+10b. 5) These latent nodes will handle x+1b, post-create an Eventuality, post-learn x+1b contained resolutions, changing how x+1b should've been interpreted. We either have to: A) Fully serialize scanning (removing the ability to utilize throughput to allow higher latency, at least while the step is `ClosingExisting`). B) Create Eventualities immediately, which we can't do as then both the external network's clock AND Serai's clock can trigger Eventualities, removing ordering. We'd need to shift entirely to the external network's clock, only handling Burns outside the parallelization window (which would be extremely latent). C) Use a different mechanism to determine if we created an output. D) Re-define which outputs are still to be handled after the 6 hour period expires, such that the multisig's lifetime cannot be further extended yet it does fulfill its responsibility. External outputs to the existing multisig will be: - Scanned before the rotation and unused (as used External outputs become Change) - Forwarded immediately upon scanning - Not scanned before the cut off time (and accordingly dropped) For the first case, since they're scanned before the rotation and unused, they'll be forwarded with all other available outputs (since they'll be available when scanned). Change outputs will be: - Scanned before the rotation and forwarded with all other available outputs - Forwarded immediately upon scanning - Not scanned before the cut off time, requiring an extension exclusive to these outputs The important thing to note about honest Change outputs to the existing multisig is that they'll only be created within `CONFIRMATIONS+1` blocks of the activation block. Also important to note is that there's another explicit window of `CONFIRMATIONS` before the 6 hour window. Eventualities are not guaranteed to be known before we scan the block containing their resolution. They are guaranteed to be known within `CONFIRMATIONS-1` blocks however, due to the limitation on how far we'll scan ahead. This means we will know of all Eventualities related to Change outputs we need to forward before the 6 hour period begins (as forwarding outputs will not create any Change outputs to the existing multisig). This means a definition of complete can be defined as: 1) Handled all Branch outputs 2) Forwarded all External outputs received before the end of 6 hour window 3) Forwarded the results of all Eventualities with Change, which will have been created before the 6 hour window How can we track and ensure this without needing to check if an output is from the resolution of an Eventuality? 1) We only create Branch outputs before the 6 hour window starts. These are guaranteed to appear within `CONFIRMATIONS` blocks. They will exist with arbitrary depth however, meaning that upon completion they will spawn several more Eventualities. The further created Eventualities re-risk being present after the 6 hour period ends. We can: 1) Build a queue for Branch outputs, delaying their handling until relevant Eventualities are guaranteed to be present. This solution would theoretically work for all outputs and allow collapsing this problem to simply: > Accordingly, only handling outputs we created should be definable as only handling outputs from the resolution of Eventualities. 2) Create all Eventualities under a Branch at time of Branch creation. This idea fails as Plans are tightly bound to outputs. 3) Don't track Branch outputs by Eventualities, yet by the amount of Branch outputs remaining. Any Branch output received, of a useful amount, is assumed to be our own and handled. All other Branch outputs, even if they're the completion of some Eventuality, are dropped. This avoids needing any additional queue, avoiding additional pipelining/latency. 2) External outputs are self-evident. We simply stop handling them at the cut-off point, and only start checking after `CONFIRMATIONS` blocks if all Eventualities are complete. 3) Since all Change Eventualities will be known prior to the 6 hour window's beginning, we can safely check if a received Change output is the resolution of an Eventuality. We only need to forward it if so. Forwarding it simply requires only checking if Eventualities are complete after `CONFIRMATIONS` blocks, same as for straggling External outputs. */ let mut plans = vec![]; existing_outputs.retain(|output| { match output.kind() { OutputType::External | OutputType::Forwarded => false, OutputType::Branch => { let scheduler = &mut self.existing.as_mut().unwrap().scheduler; // There *would* be a race condition here due to the fact we only mark a `Branch` output // as needed when we process the block (and handle scheduling), yet actual `Branch` // outputs may appear as soon as the next block (and we scan the next block before we // process the prior block) // // Unlike Eventuality checking, which happens on scanning and is therefore asynchronous, // all scheduling (and this check against the scheduler) happens on processing, which is // synchronous // // While we could move Eventuality checking into the block processing, removing its // asynchonicity, we could only check data the Scanner deems important. The Scanner won't // deem important Eventuality resolutions which don't create an output to Serai unless // it knows of the Eventuality. Accordingly, at best we could have a split role (the // Scanner noting completion of Eventualities which don't have relevant outputs, the // processing noting completion of ones which do) // // This is unnecessary, due to the current flow around Eventuality resolutions and the // current bounds naturally found being sufficiently amenable, yet notable for the future if scheduler.can_use_branch(output.balance()) { // We could simply call can_use_branch, yet it'd have an edge case where if we receive // two outputs for 100, and we could use one such output, we'd handle both. // // Individually schedule each output once confirming they're usable in order to avoid // this. let mut plan = scheduler.schedule::( txn, vec![output.clone()], vec![], self.new.as_ref().unwrap().key, false, ); assert_eq!(plan.len(), 1); let plan = plan.remove(0); plans.push(plan); } false } OutputType::Change => { // If the TX containing this output resolved an Eventuality... if let Some(plan) = ResolvedDb::get(txn, output.tx_id().as_ref()) { // And the Eventuality had change... // We need this check as Eventualities have a race condition and can't be relied // on, as extensively detailed above. Eventualities explicitly with change do have // a safe timing window however if PlanDb::plan_by_key_with_self_change::( txn, // Pass the key so the DB checks the Plan's key is this multisig's, preventing a // potential issue where the new multisig creates a Plan with change *and a // payment to the existing multisig's change address* self.existing.as_ref().unwrap().key, plan, ) { // Then this is an honest change output we need to forward // (or it's a payment to the change address in the same transaction as an honest // change output, which is fine to let slip in) return true; } } false } } }); plans } // Returns the Plans caused from a block being acknowledged. // // Will rotate keys if the block acknowledged is the retirement block. async fn plans_from_block( &mut self, txn: &mut D::Transaction<'_>, block_number: usize, block_id: >::Id, step: &mut RotationStep, burns: Vec, ) -> (bool, Vec>, HashSet<[u8; 32]>) { let (mut existing_payments, mut new_payments) = self.burns_to_payments(txn, *step, burns); let mut plans = vec![]; let mut plans_from_scanning = HashSet::new(); // We now have to acknowledge the acknowledged block, if it's new // It won't be if this block's `InInstruction`s were split into multiple `Batch`s let (acquired_lock, (mut existing_outputs, new_outputs)) = { let (acquired_lock, mut outputs) = if ScannerHandle::::db_scanned(txn) .expect("published a Batch despite never scanning a block") < block_number { // Load plans crated when we scanned the block let scanning_plans = PlansFromScanningDb::take_plans_from_scanning::(txn, block_number).unwrap(); // Expand into actual plans plans = scanning_plans .into_iter() .map(|plan| match plan { PlanFromScanning::Refund(output, refund_to) => { let existing = self.existing.as_mut().unwrap(); if output.key() == existing.key { Self::refund_plan(&mut existing.scheduler, txn, output, refund_to) } else { let new = self .new .as_mut() .expect("new multisig didn't expect yet output wasn't for existing multisig"); assert_eq!(output.key(), new.key, "output wasn't for existing nor new multisig"); Self::refund_plan(&mut new.scheduler, txn, output, refund_to) } } PlanFromScanning::Forward(output) => self .forward_plan(txn, &output) .expect("supposed to forward an output yet no forwarding plan"), }) .collect(); for plan in &plans { plans_from_scanning.insert(plan.id()); } let (is_retirement_block, outputs) = self.scanner.ack_block(txn, block_id.clone()).await; if is_retirement_block { let existing = self.existing.take().unwrap(); assert!(existing.scheduler.empty()); self.existing = self.new.take(); *step = RotationStep::UseExisting; assert!(existing_payments.is_empty()); existing_payments = new_payments; new_payments = vec![]; } (true, outputs) } else { (false, vec![]) }; // Remove all outputs already present in plans let mut output_set = HashSet::new(); for plan in &plans { for input in &plan.inputs { output_set.insert(input.id().as_ref().to_vec()); } } outputs.retain(|output| !output_set.remove(output.id().as_ref())); assert_eq!(output_set.len(), 0); (acquired_lock, self.split_outputs_by_key(outputs)) }; // If we're closing the existing multisig, filter its outputs down if *step == RotationStep::ClosingExisting { plans.extend(self.filter_outputs_due_to_closing(txn, &mut existing_outputs)); } // Now that we've done all our filtering, schedule the existing multisig's outputs plans.extend({ let existing = self.existing.as_mut().unwrap(); let existing_key = existing.key; self.existing.as_mut().unwrap().scheduler.schedule::( txn, existing_outputs, existing_payments, match *step { RotationStep::UseExisting => existing_key, RotationStep::NewAsChange | RotationStep::ForwardFromExisting | RotationStep::ClosingExisting => self.new.as_ref().unwrap().key, }, match *step { RotationStep::UseExisting | RotationStep::NewAsChange => false, RotationStep::ForwardFromExisting | RotationStep::ClosingExisting => true, }, ) }); for plan in &plans { // This first equality should 'never meaningfully' be false // All created plans so far are by the existing multisig EXCEPT: // A) If we created a refund plan from the new multisig (yet that wouldn't have change) // B) The existing Scheduler returned a Plan for the new key (yet that happens with the SC // scheduler, yet that doesn't have change) // Despite being 'unnecessary' now, it's better to explicitly ensure and be robust if plan.key == self.existing.as_ref().unwrap().key { if let Some(change) = N::change_address(plan.key) { if plan.change == Some(change) { // Assert these (self-change) are only created during the expected step match *step { RotationStep::UseExisting => {} RotationStep::NewAsChange | RotationStep::ForwardFromExisting | RotationStep::ClosingExisting => panic!("change was set to self despite rotating"), } } } } } // Schedule the new multisig's outputs too if let Some(new) = self.new.as_mut() { plans.extend(new.scheduler.schedule::(txn, new_outputs, new_payments, new.key, false)); } (acquired_lock, plans, plans_from_scanning) } /// Handle a SubstrateBlock event, building the relevant Plans. pub async fn substrate_block( &mut self, txn: &mut D::Transaction<'_>, network: &N, context: SubstrateContext, burns: Vec, ) -> (bool, Vec<(::G, [u8; 32], N::SignableTransaction, N::Eventuality)>) { let mut block_id = >::Id::default(); block_id.as_mut().copy_from_slice(context.network_latest_finalized_block.as_ref()); let block_number = ScannerHandle::::block_number(txn, &block_id) .expect("SubstrateBlock with context we haven't synced"); // Determine what step of rotation we're currently in let mut step = self.current_rotation_step(block_number); // Get the Plans from this block let (acquired_lock, plans, plans_from_scanning) = self.plans_from_block(txn, block_number, block_id, &mut step, burns).await; let res = { let mut res = Vec::with_capacity(plans.len()); for plan in plans { let id = plan.id(); info!("preparing plan {}: {:?}", hex::encode(id), plan); let key = plan.key; let key_bytes = key.to_bytes(); let (tx, post_fee_branches) = { let running_operating_costs = OperatingCostsDb::take_operating_costs(txn); PlanDb::save_active_plan::( txn, key_bytes.as_ref(), block_number, &plan, running_operating_costs, ); // If this Plan is from the scanner handler below, don't take the opportunity to amortze // operating costs // It operates with limited context, and on a different clock, making it nable to react // to operating costs // Despite this, in order to properly save forwarded outputs' instructions, it needs to // know the actual value forwarded outputs will be created with // Including operating costs prevents that let from_scanning = plans_from_scanning.contains(&plan.id()); let to_use_operating_costs = if from_scanning { 0 } else { running_operating_costs }; let PreparedSend { tx, post_fee_branches, mut operating_costs } = prepare_send(network, block_number, plan, to_use_operating_costs).await; // Restore running_operating_costs to operating_costs if from_scanning { // If we're forwarding (or refunding) this output, operating_costs should still be 0 // Either this TX wasn't created, causing no operating costs, or it was yet it'd be // amortized assert_eq!(operating_costs, 0); operating_costs += running_operating_costs; } OperatingCostsDb::set_operating_costs(txn, operating_costs); (tx, post_fee_branches) }; for branch in post_fee_branches { let existing = self.existing.as_mut().unwrap(); let to_use = if key == existing.key { existing } else { let new = self .new .as_mut() .expect("plan wasn't for existing multisig yet there wasn't a new multisig"); assert_eq!(key, new.key); new }; to_use.scheduler.created_output::(txn, branch.expected, branch.actual); } if let Some((tx, eventuality)) = tx { // The main function we return to will send an event to the coordinator which must be // fired before these registered Eventualities have their Completions fired // Safety is derived from a mutable lock on the Scanner being preserved, preventing // scanning (and detection of Eventuality resolutions) before it's released // It's only released by the main function after it does what it will self .scanner .register_eventuality(key_bytes.as_ref(), block_number, id, eventuality.clone()) .await; res.push((key, id, tx, eventuality)); } // TODO: If the TX is None, restore its inputs to the scheduler for efficiency's sake // If this TODO is removed, also reduce the operating costs } res }; (acquired_lock, res) } pub async fn release_scanner_lock(&mut self) { self.scanner.release_lock().await; } pub async fn scanner_event_to_multisig_event( &self, txn: &mut D::Transaction<'_>, network: &N, msg: ScannerEvent, ) -> MultisigEvent { let (block_number, event) = match msg { ScannerEvent::Block { is_retirement_block, block, mut outputs } => { // Since the Scanner is asynchronous, the following is a concern for race conditions // We safely know the step of a block since keys are declared, and the Scanner is safe // with respect to the declaration of keys // Accordingly, the following calls regarding new keys and step should be safe let block_number = ScannerHandle::::block_number(txn, &block) .expect("didn't have the block number for a block we just scanned"); let step = self.current_rotation_step(block_number); // Instructions created from this block let mut instructions = vec![]; // If any of these outputs were forwarded, create their instruction now for output in &outputs { if output.kind() != OutputType::Forwarded { continue; } if let Some(instruction) = ForwardedOutputDb::take_forwarded_output(txn, output.balance()) { instructions.push(instruction); } } // If the remaining outputs aren't externally received funds, don't handle them as // instructions outputs.retain(|output| output.kind() == OutputType::External); // These plans are of limited context. They're only allowed the outputs newly received // within this block and are intended to handle forwarding transactions/refunds let mut plans = vec![]; // If the old multisig is explicitly only supposed to forward, create all such plans now if step == RotationStep::ForwardFromExisting { let mut i = 0; while i < outputs.len() { let output = &outputs[i]; let plans = &mut plans; let txn = &mut *txn; #[allow(clippy::redundant_closure_call)] let should_retain = (|| async move { // If this output doesn't belong to the existing multisig, it shouldn't be forwarded if output.key() != self.existing.as_ref().unwrap().key { return true; } let plans_at_start = plans.len(); let (refund_to, instruction) = instruction_from_output::(output); if let Some(mut instruction) = instruction { let Some(shimmed_plan) = N::Scheduler::shim_forward_plan( output.clone(), self.new.as_ref().expect("forwarding from existing yet no new multisig").key, ) else { // If this network doesn't need forwarding, report the output now return true; }; plans.push(PlanFromScanning::::Forward(output.clone())); // Set the instruction for this output to be returned // We need to set it under the amount it's forwarded with, so prepare its forwarding // TX to determine the fees involved let PreparedSend { tx, post_fee_branches: _, operating_costs } = prepare_send(network, block_number, shimmed_plan, 0).await; // operating_costs should not increase in a forwarding TX assert_eq!(operating_costs, 0); // If this actually forwarded any coins, save the output as forwarded // If this didn't create a TX, we don't bother saving the output as forwarded // The fact we already created and pushed a plan still using this output will cause // it to not be retained here, and later the plan will be dropped as this did here, // letting it die out if let Some(tx) = &tx { instruction.balance.amount.0 -= tx.0.fee(); /* Sending a Plan, with arbitrary data proxying the InInstruction, would require adding a flow for networks which drop their data to still embed arbitrary data. It'd also have edge cases causing failures (we'd need to manually provide the origin if it was implied, which may exceed the encoding limit). Instead, we save the InInstruction as we scan this output. Then, when the output is successfully forwarded, we simply read it from the local database. This also saves the costs of embedding arbitrary data. Since we can't rely on the Eventuality system to detect if it's a forwarded transaction, due to the asynchonicity of the Eventuality system, we instead interpret an Forwarded output which has an amount associated with an InInstruction which was forwarded as having been forwarded. */ ForwardedOutputDb::save_forwarded_output(txn, &instruction); } } else if let Some(refund_to) = refund_to { if let Ok(refund_to) = refund_to.consume().try_into() { // Build a dedicated Plan refunding this plans.push(PlanFromScanning::Refund(output.clone(), refund_to)); } } // Only keep if we didn't make a Plan consuming it plans_at_start == plans.len() })() .await; if should_retain { i += 1; continue; } outputs.remove(i); } } for output in outputs { // If this is an External transaction to the existing multisig, and we're either solely // forwarding or closing the existing multisig, drop it // In the case of the forwarding case, we'll report it once it hits the new multisig if (match step { RotationStep::UseExisting | RotationStep::NewAsChange => false, RotationStep::ForwardFromExisting | RotationStep::ClosingExisting => true, }) && (output.key() == self.existing.as_ref().unwrap().key) { continue; } let (refund_to, instruction) = instruction_from_output::(&output); let Some(instruction) = instruction else { if let Some(refund_to) = refund_to { if let Ok(refund_to) = refund_to.consume().try_into() { plans.push(PlanFromScanning::Refund(output.clone(), refund_to)); } } continue; }; // Delay External outputs received to new multisig earlier than expected if Some(output.key()) == self.new.as_ref().map(|new| new.key) { match step { RotationStep::UseExisting => { DelayedOutputDb::save_delayed_output(txn, &instruction); continue; } RotationStep::NewAsChange | RotationStep::ForwardFromExisting | RotationStep::ClosingExisting => {} } } instructions.push(instruction); } // Save the plans created while scanning // TODO: Should we combine all of these plans to reduce the fees incurred from their // execution? They're refunds and forwards. Neither should need isolate Plan/Eventualities. PlansFromScanningDb::set_plans_from_scanning(txn, block_number, plans); // If any outputs were delayed, append them into this block match step { RotationStep::UseExisting => {} RotationStep::NewAsChange | RotationStep::ForwardFromExisting | RotationStep::ClosingExisting => { instructions.extend(DelayedOutputDb::take_delayed_outputs(txn)); } } let mut block_hash = [0; 32]; block_hash.copy_from_slice(block.as_ref()); let mut batch_id = NextBatchDb::get(txn).unwrap_or_default(); // start with empty batch let mut batches = vec![Batch { network: N::NETWORK, id: batch_id, block: BlockHash(block_hash), instructions: vec![], }]; for instruction in instructions { let batch = batches.last_mut().unwrap(); batch.instructions.push(instruction); // check if batch is over-size if batch.encode().len() > MAX_BATCH_SIZE { // pop the last instruction so it's back in size let instruction = batch.instructions.pop().unwrap(); // bump the id for the new batch batch_id += 1; // make a new batch with this instruction included batches.push(Batch { network: N::NETWORK, id: batch_id, block: BlockHash(block_hash), instructions: vec![instruction], }); } } // Save the next batch ID NextBatchDb::set(txn, &(batch_id + 1)); ( block_number, MultisigEvent::Batches( if is_retirement_block { Some((self.existing.as_ref().unwrap().key, self.new.as_ref().unwrap().key)) } else { None }, batches, ), ) } // This must be emitted before ScannerEvent::Block for all completions of known Eventualities // within the block. Unknown Eventualities may have their Completed events emitted after // ScannerEvent::Block however. ScannerEvent::Completed(key, block_number, id, tx_id, completion) => { ResolvedDb::resolve_plan::(txn, &key, id, &tx_id); (block_number, MultisigEvent::Completed(key, id, completion)) } }; // If we either received a Block event (which will be the trigger when we have no // Plans/Eventualities leading into ClosingExisting), or we received the last Completed for // this multisig, set its retirement block let existing = self.existing.as_ref().unwrap(); // This multisig is closing let closing = self.current_rotation_step(block_number) == RotationStep::ClosingExisting; // There's nothing left in its Scheduler. This call is safe as: // 1) When ClosingExisting, all outputs should've been already forwarded, preventing // new UTXOs from accumulating. // 2) No new payments should be issued. // 3) While there may be plans, they'll be dropped to create Eventualities. // If this Eventuality is resolved, the Plan has already been dropped. // 4) If this Eventuality will trigger a Plan, it'll still be in the plans HashMap. let scheduler_is_empty = closing && existing.scheduler.empty(); // Nothing is still being signed let no_active_plans = scheduler_is_empty && PlanDb::active_plans::(txn, existing.key.to_bytes().as_ref()).is_empty(); self .scanner .multisig_completed // The above explicitly included their predecessor to ensure short-circuiting, yet their // names aren't defined as an aggregate check. Still including all three here ensures all are // used in the final value .send(closing && scheduler_is_empty && no_active_plans) .unwrap(); event } pub async fn next_scanner_event(&mut self) -> ScannerEvent { self.scanner.events.recv().await.unwrap() } } ================================================ FILE: processor/src/multisigs/scanner.rs ================================================ use core::marker::PhantomData; use std::{ sync::Arc, io::Read, time::Duration, collections::{VecDeque, HashSet, HashMap}, }; use ciphersuite::group::GroupEncoding; use frost::curve::Ciphersuite; use log::{info, debug, warn}; use tokio::{ sync::{RwLockReadGuard, RwLockWriteGuard, RwLock, mpsc}, time::sleep, }; use crate::{ Get, DbTxn, Db, networks::{Output, Transaction, Eventuality, EventualitiesTracker, Block, Network}, }; #[derive(Clone, Debug)] pub enum ScannerEvent { // Block scanned Block { is_retirement_block: bool, block: >::Id, outputs: Vec, }, // Eventuality completion found on-chain // TODO: Move this from a tuple Completed( Vec, usize, [u8; 32], >::Id, ::Completion, ), } pub type ScannerEventChannel = mpsc::UnboundedReceiver>; #[derive(Clone, Debug)] struct ScannerDb(PhantomData, PhantomData); impl ScannerDb { fn scanner_key(dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec { D::key(b"SCANNER", dst, key) } fn block_key(number: usize) -> Vec { Self::scanner_key(b"block_id", u64::try_from(number).unwrap().to_le_bytes()) } fn block_number_key(id: &>::Id) -> Vec { Self::scanner_key(b"block_number", id) } fn save_block(txn: &mut D::Transaction<'_>, number: usize, id: &>::Id) { txn.put(Self::block_number_key(id), u64::try_from(number).unwrap().to_le_bytes()); txn.put(Self::block_key(number), id); } fn block(getter: &G, number: usize) -> Option<>::Id> { getter.get(Self::block_key(number)).map(|id| { let mut res = >::Id::default(); res.as_mut().copy_from_slice(&id); res }) } fn block_number(getter: &G, id: &>::Id) -> Option { getter .get(Self::block_number_key(id)) .map(|number| u64::from_le_bytes(number.try_into().unwrap()).try_into().unwrap()) } fn keys_key() -> Vec { Self::scanner_key(b"keys", b"") } fn register_key( txn: &mut D::Transaction<'_>, activation_number: usize, key: ::G, ) { let mut keys = txn.get(Self::keys_key()).unwrap_or(vec![]); let key_bytes = key.to_bytes(); let key_len = key_bytes.as_ref().len(); assert_eq!(keys.len() % (8 + key_len), 0); // Sanity check this key isn't already present let mut i = 0; while i < keys.len() { if &keys[(i + 8) .. ((i + 8) + key_len)] == key_bytes.as_ref() { panic!("adding {} as a key yet it was already present", hex::encode(key_bytes)); } i += 8 + key_len; } keys.extend(u64::try_from(activation_number).unwrap().to_le_bytes()); keys.extend(key_bytes.as_ref()); txn.put(Self::keys_key(), keys); } fn keys(getter: &G) -> Vec<(usize, ::G)> { let bytes_vec = getter.get(Self::keys_key()).unwrap_or(vec![]); let mut bytes: &[u8] = bytes_vec.as_ref(); // Assumes keys will be 32 bytes when calculating the capacity // If keys are larger, this may allocate more memory than needed // If keys are smaller, this may require additional allocations // Either are fine let mut res = Vec::with_capacity(bytes.len() / (8 + 32)); while !bytes.is_empty() { let mut activation_number = [0; 8]; bytes.read_exact(&mut activation_number).unwrap(); let activation_number = u64::from_le_bytes(activation_number).try_into().unwrap(); res.push((activation_number, N::Curve::read_G(&mut bytes).unwrap())); } res } fn retire_key(txn: &mut D::Transaction<'_>) { let keys = Self::keys(txn); assert_eq!(keys.len(), 2); txn.del(Self::keys_key()); Self::register_key(txn, keys[1].0, keys[1].1); } fn seen_key(id: &>::Id) -> Vec { Self::scanner_key(b"seen", id) } fn seen(getter: &G, id: &>::Id) -> bool { getter.get(Self::seen_key(id)).is_some() } fn outputs_key(block: &>::Id) -> Vec { Self::scanner_key(b"outputs", block.as_ref()) } fn save_outputs( txn: &mut D::Transaction<'_>, block: &>::Id, outputs: &[N::Output], ) { let mut bytes = Vec::with_capacity(outputs.len() * 64); for output in outputs { output.write(&mut bytes).unwrap(); } txn.put(Self::outputs_key(block), bytes); } fn outputs( txn: &D::Transaction<'_>, block: &>::Id, ) -> Option> { let bytes_vec = txn.get(Self::outputs_key(block))?; let mut bytes: &[u8] = bytes_vec.as_ref(); let mut res = vec![]; while !bytes.is_empty() { res.push(N::Output::read(&mut bytes).unwrap()); } Some(res) } fn scanned_block_key() -> Vec { Self::scanner_key(b"scanned_block", []) } fn save_scanned_block(txn: &mut D::Transaction<'_>, block: usize) -> Vec { let id = Self::block(txn, block); // It may be None for the first key rotated to let outputs = if let Some(id) = id.as_ref() { Self::outputs(txn, id).unwrap_or(vec![]) } else { vec![] }; // Mark all the outputs from this block as seen for output in &outputs { txn.put(Self::seen_key(&output.id()), b""); } txn.put(Self::scanned_block_key(), u64::try_from(block).unwrap().to_le_bytes()); // Return this block's outputs so they can be pruned from the RAM cache outputs } fn latest_scanned_block(getter: &G) -> Option { getter .get(Self::scanned_block_key()) .map(|bytes| u64::from_le_bytes(bytes.try_into().unwrap()).try_into().unwrap()) } fn retirement_block_key(key: &::G) -> Vec { Self::scanner_key(b"retirement_block", key.to_bytes()) } fn save_retirement_block( txn: &mut D::Transaction<'_>, key: &::G, block: usize, ) { txn.put(Self::retirement_block_key(key), u64::try_from(block).unwrap().to_le_bytes()); } fn retirement_block(getter: &G, key: &::G) -> Option { getter .get(Self::retirement_block_key(key)) .map(|bytes| usize::try_from(u64::from_le_bytes(bytes.try_into().unwrap())).unwrap()) } } /// The Scanner emits events relating to the blockchain, notably received outputs. /// /// It WILL NOT fail to emit an event, even if it reboots at selected moments. /// /// It MAY fire the same event multiple times. #[derive(Debug)] pub struct Scanner { _db: PhantomData, keys: Vec<(usize, ::G)>, eventualities: HashMap, EventualitiesTracker>, ram_scanned: Option, ram_outputs: HashSet>, need_ack: VecDeque, events: mpsc::UnboundedSender>, } #[derive(Clone, Debug)] struct ScannerHold { scanner: Arc>>>, } impl ScannerHold { async fn read(&self) -> RwLockReadGuard<'_, Option>> { loop { let lock = self.scanner.read().await; if lock.is_none() { drop(lock); tokio::task::yield_now().await; continue; } return lock; } } async fn write(&self) -> RwLockWriteGuard<'_, Option>> { loop { let lock = self.scanner.write().await; if lock.is_none() { drop(lock); tokio::task::yield_now().await; continue; } return lock; } } // This is safe to not check if something else already acquired the Scanner as the only caller is // sequential. async fn long_term_acquire(&self) -> Scanner { self.scanner.write().await.take().unwrap() } async fn restore(&self, scanner: Scanner) { let _ = self.scanner.write().await.insert(scanner); } } #[derive(Debug)] pub struct ScannerHandle { scanner: ScannerHold, held_scanner: Option>, pub events: ScannerEventChannel, pub multisig_completed: mpsc::UnboundedSender, } impl ScannerHandle { pub async fn ram_scanned(&self) -> usize { self.scanner.read().await.as_ref().unwrap().ram_scanned.unwrap_or(0) } /// Register a key to scan for. pub async fn register_key( &mut self, txn: &mut D::Transaction<'_>, activation_number: usize, key: ::G, ) { info!("Registering key {} in scanner at {activation_number}", hex::encode(key.to_bytes())); let mut scanner_lock = self.scanner.write().await; let scanner = scanner_lock.as_mut().unwrap(); assert!( activation_number > scanner.ram_scanned.unwrap_or(0), "activation block of new keys was already scanned", ); if scanner.keys.is_empty() { assert!(scanner.ram_scanned.is_none()); scanner.ram_scanned = Some(activation_number); assert!(ScannerDb::::save_scanned_block(txn, activation_number).is_empty()); } ScannerDb::::register_key(txn, activation_number, key); scanner.keys.push((activation_number, key)); #[cfg(not(test))] // TODO: A test violates this. Improve the test with a better flow assert!(scanner.keys.len() <= 2); scanner.eventualities.insert(key.to_bytes().as_ref().to_vec(), EventualitiesTracker::new()); } pub fn db_scanned(getter: &G) -> Option { ScannerDb::::latest_scanned_block(getter) } // This perform a database read which isn't safe with regards to if the value is set or not // It may be set, when it isn't expected to be set, or not set, when it is expected to be set // Since the value is static, if it's set, it's correctly set pub fn block_number(getter: &G, id: &>::Id) -> Option { ScannerDb::::block_number(getter, id) } /// Acknowledge having handled a block. /// /// Creates a lock over the Scanner, preventing its independent scanning operations until /// released. /// /// This must only be called on blocks which have been scanned in-memory. pub async fn ack_block( &mut self, txn: &mut D::Transaction<'_>, id: >::Id, ) -> (bool, Vec) { debug!("block {} acknowledged", hex::encode(&id)); let mut scanner = self.scanner.long_term_acquire().await; // Get the number for this block let number = ScannerDb::::block_number(txn, &id) .expect("main loop trying to operate on data we haven't scanned"); log::trace!("block {} was {number}", hex::encode(&id)); let outputs = ScannerDb::::save_scanned_block(txn, number); // This has a race condition if we try to ack a block we scanned on a prior boot, and we have // yet to scan it on this boot assert!(number <= scanner.ram_scanned.unwrap()); for output in &outputs { assert!(scanner.ram_outputs.remove(output.id().as_ref())); } assert_eq!(scanner.need_ack.pop_front().unwrap(), number); self.held_scanner = Some(scanner); // Load the key from the DB, as it will have already been removed from RAM if retired let key = ScannerDb::::keys(txn)[0].1; let is_retirement_block = ScannerDb::::retirement_block(txn, &key) == Some(number); if is_retirement_block { ScannerDb::::retire_key(txn); } (is_retirement_block, outputs) } pub async fn register_eventuality( &mut self, key: &[u8], block_number: usize, id: [u8; 32], eventuality: N::Eventuality, ) { let mut lock; // We won't use held_scanner if we're re-registering on boot (if let Some(scanner) = self.held_scanner.as_mut() { scanner } else { lock = Some(self.scanner.write().await); lock.as_mut().unwrap().as_mut().unwrap() }) .eventualities .get_mut(key) .unwrap() .register(block_number, id, eventuality) } pub async fn release_lock(&mut self) { self.scanner.restore(self.held_scanner.take().unwrap()).await } } impl Scanner { #[allow(clippy::type_complexity, clippy::new_ret_no_self)] pub fn new( network: N, db: D, ) -> (ScannerHandle, Vec<(usize, ::G)>) { let (events_send, events_recv) = mpsc::unbounded_channel(); let (multisig_completed_send, multisig_completed_recv) = mpsc::unbounded_channel(); let keys = ScannerDb::::keys(&db); let mut eventualities = HashMap::new(); for key in &keys { eventualities.insert(key.1.to_bytes().as_ref().to_vec(), EventualitiesTracker::new()); } let ram_scanned = ScannerDb::::latest_scanned_block(&db); let scanner = ScannerHold { scanner: Arc::new(RwLock::new(Some(Scanner { _db: PhantomData, keys: keys.clone(), eventualities, ram_scanned, ram_outputs: HashSet::new(), need_ack: VecDeque::new(), events: events_send, }))), }; tokio::spawn(Scanner::run(db, network, scanner.clone(), multisig_completed_recv)); ( ScannerHandle { scanner, held_scanner: None, events: events_recv, multisig_completed: multisig_completed_send, }, keys, ) } fn emit(&mut self, event: ScannerEvent) -> bool { if self.events.send(event).is_err() { info!("Scanner handler was dropped. Shutting down?"); return false; } true } // An async function, to be spawned on a task, to discover and report outputs async fn run( mut db: D, network: N, scanner_hold: ScannerHold, mut multisig_completed: mpsc::UnboundedReceiver, ) { loop { let (ram_scanned, latest_block_to_scan) = { // Sleep 5 seconds to prevent hammering the node/scanner lock sleep(Duration::from_secs(5)).await; let ram_scanned = { let scanner_lock = scanner_hold.read().await; let scanner = scanner_lock.as_ref().unwrap(); // If we're not scanning for keys yet, wait until we are if scanner.keys.is_empty() { continue; } let ram_scanned = scanner.ram_scanned.unwrap(); // If a Batch has taken too long to be published, start waiting until it is before // continuing scanning // Solves a race condition around multisig rotation, documented in the relevant doc // and demonstrated with mini if let Some(needing_ack) = scanner.need_ack.front() { let next = ram_scanned + 1; let limit = needing_ack + N::CONFIRMATIONS; assert!(next <= limit); if next == limit { continue; } }; ram_scanned }; ( ram_scanned, loop { break match network.get_latest_block_number().await { // Only scan confirmed blocks, which we consider effectively finalized // CONFIRMATIONS - 1 as whatever's in the latest block already has 1 confirm Ok(latest) => latest.saturating_sub(N::CONFIRMATIONS.saturating_sub(1)), Err(_) => { warn!("couldn't get latest block number"); sleep(Duration::from_secs(60)).await; continue; } }; }, ) }; for block_being_scanned in (ram_scanned + 1) ..= latest_block_to_scan { // Redo the checks for if we're too far ahead { let needing_ack = { let scanner_lock = scanner_hold.read().await; let scanner = scanner_lock.as_ref().unwrap(); scanner.need_ack.front().copied() }; if let Some(needing_ack) = needing_ack { let limit = needing_ack + N::CONFIRMATIONS; assert!(block_being_scanned <= limit); if block_being_scanned == limit { break; } } } let Ok(block) = network.get_block(block_being_scanned).await else { warn!("couldn't get block {block_being_scanned}"); break; }; let block_id = block.id(); info!("scanning block: {} ({block_being_scanned})", hex::encode(&block_id)); // These DB calls are safe, despite not having a txn, since they're static values // There's no issue if they're written in advance of expected (such as on reboot) // They're also only expected here if let Some(id) = ScannerDb::::block(&db, block_being_scanned) { if id != block_id { panic!("reorg'd from finalized {} to {}", hex::encode(id), hex::encode(block_id)); } } else { // TODO: Move this to an unwrap if let Some(id) = ScannerDb::::block(&db, block_being_scanned.saturating_sub(1)) { if id != block.parent() { panic!( "block {} doesn't build off expected parent {}", hex::encode(block_id), hex::encode(id), ); } } let mut txn = db.txn(); ScannerDb::::save_block(&mut txn, block_being_scanned, &block_id); txn.commit(); } // Scan new blocks // TODO: This lock acquisition may be long-lived... let mut scanner_lock = scanner_hold.write().await; let scanner = scanner_lock.as_mut().unwrap(); let mut has_activation = false; let mut outputs = vec![]; let mut completion_block_numbers = vec![]; for (activation_number, key) in scanner.keys.clone() { if activation_number > block_being_scanned { continue; } if activation_number == block_being_scanned { has_activation = true; } let key_vec = key.to_bytes().as_ref().to_vec(); // TODO: These lines are the ones which will cause a really long-lived lock acquisition for output in network.get_outputs(&block, key).await { assert_eq!(output.key(), key); if output.balance().amount.0 >= N::DUST { outputs.push(output); } } for (id, (block_number, tx, completion)) in network .get_eventuality_completions(scanner.eventualities.get_mut(&key_vec).unwrap(), &block) .await { info!( "eventuality {} resolved by {}, as found on chain", hex::encode(id), hex::encode(tx.as_ref()) ); completion_block_numbers.push(block_number); // This must be before the mission of ScannerEvent::Block, per commentary in mod.rs if !scanner.emit(ScannerEvent::Completed( key_vec.clone(), block_number, id, tx, completion, )) { return; } } } // Panic if we've already seen these outputs for output in &outputs { let id = output.id(); info!( "block {} had output {} worth {:?}", hex::encode(&block_id), hex::encode(&id), output.balance(), ); // On Bitcoin, the output ID should be unique for a given chain // On Monero, it's trivial to make an output sharing an ID with another // We should only scan outputs with valid IDs however, which will be unique /* The safety of this code must satisfy the following conditions: 1) seen is not set for the first occurrence 2) seen is set for any future occurrence seen is only written to after this code completes. Accordingly, it cannot be set before the first occurrence UNLESSS it's set, yet the last scanned block isn't. They are both written in the same database transaction, preventing this. As for future occurrences, the RAM entry ensures they're handled properly even if the database has yet to be set. On reboot, which will clear the RAM, if seen wasn't set, neither was latest scanned block. Accordingly, this will scan from some prior block, re-populating the RAM. If seen was set, then this will be successfully read. There's also no concern ram_outputs was pruned, yet seen wasn't set, as pruning from ram_outputs will acquire a write lock (preventing this code from acquiring its own write lock and running), and during its holding of the write lock, it commits the transaction setting seen and the latest scanned block. This last case isn't true. Committing seen/latest_scanned_block happens after relinquishing the write lock. TODO2: Only update ram_outputs after committing the TXN in question. */ let seen = ScannerDb::::seen(&db, &id); let id = id.as_ref().to_vec(); if seen || scanner.ram_outputs.contains(&id) { panic!("scanned an output multiple times"); } scanner.ram_outputs.insert(id); } // We could remove this, if instead of doing the first block which passed // requirements + CONFIRMATIONS, we simply emitted an event for every block where // `number % CONFIRMATIONS == 0` (once at the final stage for the existing multisig) // There's no need at this point, yet the latter may be more suitable for modeling... async fn check_multisig_completed( db: &mut D, multisig_completed: &mut mpsc::UnboundedReceiver, block_number: usize, ) -> bool { match multisig_completed.recv().await { None => { info!("Scanner handler was dropped. Shutting down?"); false } Some(completed) => { // Set the retirement block as block_number + CONFIRMATIONS if completed { let mut txn = db.txn(); // The retiring key is the earliest one still around let retiring_key = ScannerDb::::keys(&txn)[0].1; // This value is static w.r.t. the key ScannerDb::::save_retirement_block( &mut txn, &retiring_key, block_number + N::CONFIRMATIONS, ); txn.commit(); } true } } } drop(scanner_lock); // Now that we've dropped the Scanner lock, we need to handle the multisig_completed // channel before we decide if this block should be fired or not // (holding the Scanner risks a deadlock) for block_number in completion_block_numbers { if !check_multisig_completed::(&mut db, &mut multisig_completed, block_number).await { return; }; } // Reacquire the scanner let mut scanner_lock = scanner_hold.write().await; let scanner = scanner_lock.as_mut().unwrap(); // Only emit an event if any of the following is true: // - This is an activation block // - This is a retirement block // - There's outputs // as only those blocks are meaningful and warrant obtaining synchrony over let is_retirement_block = ScannerDb::::retirement_block(&db, &scanner.keys[0].1) == Some(block_being_scanned); let sent_block = if has_activation || is_retirement_block || (!outputs.is_empty()) { // Save the outputs to disk let mut txn = db.txn(); ScannerDb::::save_outputs(&mut txn, &block_id, &outputs); txn.commit(); // Send all outputs if !scanner.emit(ScannerEvent::Block { is_retirement_block, block: block_id, outputs }) { return; } // Since we're creating a Batch, mark it as needing ack scanner.need_ack.push_back(block_being_scanned); true } else { false }; // Remove it from memory if is_retirement_block { let retired = scanner.keys.remove(0).1; scanner.eventualities.remove(retired.to_bytes().as_ref()); } // Update ram_scanned scanner.ram_scanned = Some(block_being_scanned); drop(scanner_lock); // If we sent a Block event, once again check multisig_completed if sent_block && (!check_multisig_completed::( &mut db, &mut multisig_completed, block_being_scanned, ) .await) { return; } } } } } ================================================ FILE: processor/src/multisigs/scheduler/mod.rs ================================================ use core::fmt::Debug; use std::io; use ciphersuite::Ciphersuite; use serai_client::primitives::{ExternalBalance, ExternalNetworkId}; use crate::{networks::Network, Db, Payment, Plan}; pub(crate) mod utxo; pub(crate) mod smart_contract; pub trait SchedulerAddendum: Send + Clone + PartialEq + Debug { fn read(reader: &mut R) -> io::Result; fn write(&self, writer: &mut W) -> io::Result<()>; } impl SchedulerAddendum for () { fn read(_: &mut R) -> io::Result { Ok(()) } fn write(&self, _: &mut W) -> io::Result<()> { Ok(()) } } pub trait Scheduler: Sized + Clone + PartialEq + Debug { type Addendum: SchedulerAddendum; /// Check if this Scheduler is empty. fn empty(&self) -> bool; /// Create a new Scheduler. fn new( txn: &mut D::Transaction<'_>, key: ::G, network: ExternalNetworkId, ) -> Self; /// Load a Scheduler from the DB. fn from_db( db: &D, key: ::G, network: ExternalNetworkId, ) -> io::Result; /// Check if a branch is usable. fn can_use_branch(&self, balance: ExternalBalance) -> bool; /// Schedule a series of outputs/payments. fn schedule( &mut self, txn: &mut D::Transaction<'_>, utxos: Vec, payments: Vec>, // TODO: Tighten this to multisig_for_any_change key_for_any_change: ::G, force_spend: bool, ) -> Vec>; /// Consume all payments still pending within this Scheduler, without scheduling them. fn consume_payments(&mut self, txn: &mut D::Transaction<'_>) -> Vec>; /// Note a branch output as having been created, with the amount it was actually created with, /// or not having been created due to being too small. fn created_output( &mut self, txn: &mut D::Transaction<'_>, expected: u64, actual: Option, ); /// Refund a specific output. fn refund_plan( &mut self, txn: &mut D::Transaction<'_>, output: N::Output, refund_to: N::Address, ) -> Plan; /// Shim the forwarding Plan as necessary to obtain a fee estimate. /// /// If this Scheduler is for a Network which requires forwarding, this must return Some with a /// plan with identical fee behavior. If forwarding isn't necessary, returns None. fn shim_forward_plan(output: N::Output, to: ::G) -> Option>; /// Forward a specific output to the new multisig. /// /// Returns None if no forwarding is necessary. Must return Some if forwarding is necessary. fn forward_plan( &mut self, txn: &mut D::Transaction<'_>, output: N::Output, to: ::G, ) -> Option>; } ================================================ FILE: processor/src/multisigs/scheduler/smart_contract.rs ================================================ use std::{io, collections::HashSet}; use ciphersuite::{group::GroupEncoding, Ciphersuite}; use serai_client::primitives::{ExternalBalance, ExternalCoin, ExternalNetworkId}; use crate::{ Get, DbTxn, Db, Payment, Plan, create_db, networks::{Output, Network}, multisigs::scheduler::{SchedulerAddendum, Scheduler as SchedulerTrait}, }; #[derive(Clone, PartialEq, Eq, Debug)] pub struct Scheduler { key: ::G, coins: HashSet, rotated: bool, } #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub enum Addendum { Nonce(u64), RotateTo { nonce: u64, new_key: ::G }, } impl SchedulerAddendum for Addendum { fn read(reader: &mut R) -> io::Result { let mut kind = [0xff]; reader.read_exact(&mut kind)?; match kind[0] { 0 => { let mut nonce = [0; 8]; reader.read_exact(&mut nonce)?; Ok(Addendum::Nonce(u64::from_le_bytes(nonce))) } 1 => { let mut nonce = [0; 8]; reader.read_exact(&mut nonce)?; let nonce = u64::from_le_bytes(nonce); let new_key = N::Curve::read_G(reader)?; Ok(Addendum::RotateTo { nonce, new_key }) } _ => Err(io::Error::other("reading unknown Addendum type"))?, } } fn write(&self, writer: &mut W) -> io::Result<()> { match self { Addendum::Nonce(nonce) => { writer.write_all(&[0])?; writer.write_all(&nonce.to_le_bytes()) } Addendum::RotateTo { nonce, new_key } => { writer.write_all(&[1])?; writer.write_all(&nonce.to_le_bytes())?; writer.write_all(new_key.to_bytes().as_ref()) } } } } create_db! { SchedulerDb { LastNonce: () -> u64, RotatedTo: (key: &[u8]) -> Vec, } } impl> SchedulerTrait for Scheduler { type Addendum = Addendum; /// Check if this Scheduler is empty. fn empty(&self) -> bool { self.rotated } /// Create a new Scheduler. fn new( _txn: &mut D::Transaction<'_>, key: ::G, network: ExternalNetworkId, ) -> Self { assert!(N::branch_address(key).is_none()); assert!(N::change_address(key).is_none()); assert!(N::forward_address(key).is_none()); Scheduler { key, coins: network.coins().iter().copied().collect(), rotated: false } } /// Load a Scheduler from the DB. fn from_db( db: &D, key: ::G, network: ExternalNetworkId, ) -> io::Result { Ok(Scheduler { key, coins: network.coins().iter().copied().collect(), rotated: RotatedTo::get(db, key.to_bytes().as_ref()).is_some(), }) } fn can_use_branch(&self, _balance: ExternalBalance) -> bool { false } fn schedule( &mut self, txn: &mut D::Transaction<'_>, utxos: Vec, payments: Vec>, key_for_any_change: ::G, force_spend: bool, ) -> Vec> { for utxo in utxos { assert!(self.coins.contains(&utxo.balance().coin)); } let mut nonce = LastNonce::get(txn).unwrap_or(1); let mut plans = vec![]; for chunk in payments.as_slice().chunks(N::MAX_OUTPUTS) { // Once we rotate, all further payments should be scheduled via the new multisig assert!(!self.rotated); plans.push(Plan { key: self.key, inputs: vec![], payments: chunk.to_vec(), change: None, scheduler_addendum: Addendum::Nonce(nonce), }); nonce += 1; } // If we're supposed to rotate to the new key, create an empty Plan which will signify the key // update if force_spend && (!self.rotated) { plans.push(Plan { key: self.key, inputs: vec![], payments: vec![], change: None, scheduler_addendum: Addendum::RotateTo { nonce, new_key: key_for_any_change }, }); nonce += 1; self.rotated = true; RotatedTo::set( txn, self.key.to_bytes().as_ref(), &key_for_any_change.to_bytes().as_ref().to_vec(), ); } LastNonce::set(txn, &nonce); plans } fn consume_payments(&mut self, _txn: &mut D::Transaction<'_>) -> Vec> { vec![] } fn created_output( &mut self, _txn: &mut D::Transaction<'_>, _expected: u64, _actual: Option, ) { panic!("Smart Contract Scheduler created a Branch output") } /// Refund a specific output. fn refund_plan( &mut self, txn: &mut D::Transaction<'_>, output: N::Output, refund_to: N::Address, ) -> Plan { let current_key = RotatedTo::get(txn, self.key.to_bytes().as_ref()) .and_then(|key_bytes| ::read_G(&mut key_bytes.as_slice()).ok()) .unwrap_or(self.key); let nonce = LastNonce::get(txn).map_or(1, |nonce| nonce + 1); LastNonce::set(txn, &(nonce + 1)); Plan { key: current_key, inputs: vec![], payments: vec![Payment { address: refund_to, data: None, balance: output.balance() }], change: None, scheduler_addendum: Addendum::Nonce(nonce), } } fn shim_forward_plan(_output: N::Output, _to: ::G) -> Option> { None } /// Forward a specific output to the new multisig. /// /// Returns None if no forwarding is necessary. fn forward_plan( &mut self, _txn: &mut D::Transaction<'_>, _output: N::Output, _to: ::G, ) -> Option> { None } } ================================================ FILE: processor/src/multisigs/scheduler/utxo.rs ================================================ use std::{ io::{self, Read}, collections::{VecDeque, HashMap}, }; use ciphersuite::{group::GroupEncoding, Ciphersuite}; use serai_client::primitives::{ExternalNetworkId, ExternalCoin, Amount, ExternalBalance}; use crate::{ DbTxn, Db, Payment, Plan, networks::{OutputType, Output, Network, UtxoNetwork}, multisigs::scheduler::Scheduler as SchedulerTrait, }; /// Deterministic output/payment manager. #[derive(Clone, PartialEq, Eq, Debug)] pub struct Scheduler { key: ::G, coin: ExternalCoin, // Serai, when it has more outputs expected than it can handle in a single transaction, will // schedule the outputs to be handled later. Immediately, it just creates additional outputs // which will eventually handle those outputs // // These maps map output amounts, which we'll receive in the future, to the payments they should // be used on // // When those output amounts appear, their payments should be scheduled // The Vec is for all payments that should be done per output instance // The VecDeque allows multiple sets of payments with the same sum amount to properly co-exist // // queued_plans are for outputs which we will create, yet when created, will have their amount // reduced by the fee it cost to be created. The Scheduler will then be told how what amount the // output actually has, and it'll be moved into plans queued_plans: HashMap>>>, plans: HashMap>>>, // UTXOs available utxos: Vec, // Payments awaiting scheduling due to the output availability problem payments: VecDeque>, } fn scheduler_key(key: &G) -> Vec { D::key(b"SCHEDULER", b"scheduler", key.to_bytes()) } impl> Scheduler { pub fn empty(&self) -> bool { self.queued_plans.is_empty() && self.plans.is_empty() && self.utxos.is_empty() && self.payments.is_empty() } fn read( key: ::G, coin: ExternalCoin, reader: &mut R, ) -> io::Result { let mut read_plans = || -> io::Result<_> { let mut all_plans = HashMap::new(); let mut all_plans_len = [0; 4]; reader.read_exact(&mut all_plans_len)?; for _ in 0 .. u32::from_le_bytes(all_plans_len) { let mut amount = [0; 8]; reader.read_exact(&mut amount)?; let amount = u64::from_le_bytes(amount); let mut plans = VecDeque::new(); let mut plans_len = [0; 4]; reader.read_exact(&mut plans_len)?; for _ in 0 .. u32::from_le_bytes(plans_len) { let mut payments = vec![]; let mut payments_len = [0; 4]; reader.read_exact(&mut payments_len)?; for _ in 0 .. u32::from_le_bytes(payments_len) { payments.push(Payment::read(reader)?); } plans.push_back(payments); } all_plans.insert(amount, plans); } Ok(all_plans) }; let queued_plans = read_plans()?; let plans = read_plans()?; let mut utxos = vec![]; let mut utxos_len = [0; 4]; reader.read_exact(&mut utxos_len)?; for _ in 0 .. u32::from_le_bytes(utxos_len) { utxos.push(N::Output::read(reader)?); } let mut payments = VecDeque::new(); let mut payments_len = [0; 4]; reader.read_exact(&mut payments_len)?; for _ in 0 .. u32::from_le_bytes(payments_len) { payments.push_back(Payment::read(reader)?); } Ok(Scheduler { key, coin, queued_plans, plans, utxos, payments }) } // TODO2: Get rid of this // We reserialize the entire scheduler on any mutation to save it to the DB which is horrible // We should have an incremental solution fn serialize(&self) -> Vec { let mut res = Vec::with_capacity(4096); let mut write_plans = |plans: &HashMap>>>| { res.extend(u32::try_from(plans.len()).unwrap().to_le_bytes()); for (amount, list_of_plans) in plans { res.extend(amount.to_le_bytes()); res.extend(u32::try_from(list_of_plans.len()).unwrap().to_le_bytes()); for plan in list_of_plans { res.extend(u32::try_from(plan.len()).unwrap().to_le_bytes()); for payment in plan { payment.write(&mut res).unwrap(); } } } }; write_plans(&self.queued_plans); write_plans(&self.plans); res.extend(u32::try_from(self.utxos.len()).unwrap().to_le_bytes()); for utxo in &self.utxos { utxo.write(&mut res).unwrap(); } res.extend(u32::try_from(self.payments.len()).unwrap().to_le_bytes()); for payment in &self.payments { payment.write(&mut res).unwrap(); } debug_assert_eq!(&Self::read(self.key, self.coin, &mut res.as_slice()).unwrap(), self); res } pub fn new( txn: &mut D::Transaction<'_>, key: ::G, network: ExternalNetworkId, ) -> Self { assert!(N::branch_address(key).is_some()); assert!(N::change_address(key).is_some()); assert!(N::forward_address(key).is_some()); let coin = { let coins = network.coins(); assert_eq!(coins.len(), 1); coins[0] }; let res = Scheduler { key, coin, queued_plans: HashMap::new(), plans: HashMap::new(), utxos: vec![], payments: VecDeque::new(), }; // Save it to disk so from_db won't panic if we don't mutate it before rebooting txn.put(scheduler_key::(&res.key), res.serialize()); res } pub fn from_db( db: &D, key: ::G, network: ExternalNetworkId, ) -> io::Result { let coin = { let coins = network.coins(); assert_eq!(coins.len(), 1); coins[0] }; let scheduler = db.get(scheduler_key::(&key)).unwrap_or_else(|| { panic!("loading scheduler from DB without scheduler for {}", hex::encode(key.to_bytes())) }); let mut reader_slice = scheduler.as_slice(); let reader = &mut reader_slice; Self::read(key, coin, reader) } pub fn can_use_branch(&self, balance: ExternalBalance) -> bool { assert_eq!(balance.coin, self.coin); self.plans.contains_key(&balance.amount.0) } fn execute( &mut self, inputs: Vec, mut payments: Vec>, key_for_any_change: ::G, ) -> Plan { let mut change = false; let mut max = N::MAX_OUTPUTS; let payment_amounts = |payments: &Vec>| { payments.iter().map(|payment| payment.balance.amount.0).sum::() }; // Requires a change output if inputs.iter().map(|output| output.balance().amount.0).sum::() != payment_amounts(&payments) { change = true; max -= 1; } let mut add_plan = |payments| { let amount = payment_amounts(&payments); self.queued_plans.entry(amount).or_insert(VecDeque::new()).push_back(payments); amount }; let branch_address = N::branch_address(self.key).unwrap(); // If we have more payments than we can handle in a single TX, create plans for them // TODO2: This isn't perfect. For 258 outputs, and a MAX_OUTPUTS of 16, this will create: // 15 branches of 16 leaves // 1 branch of: // - 1 branch of 16 leaves // - 2 leaves // If this was perfect, the heaviest branch would have 1 branch of 3 leaves and 15 leaves while payments.len() > max { // The resulting TX will have the remaining payments and a new branch payment let to_remove = (payments.len() + 1) - N::MAX_OUTPUTS; // Don't remove more than possible let to_remove = to_remove.min(N::MAX_OUTPUTS); // Create the plan let removed = payments.drain((payments.len() - to_remove) ..).collect::>(); assert_eq!(removed.len(), to_remove); let amount = add_plan(removed); // Create the payment for the plan // Push it to the front so it's not moved into a branch until all lower-depth items are payments.insert( 0, Payment { address: branch_address.clone(), data: None, balance: ExternalBalance { coin: self.coin, amount: Amount(amount) }, }, ); } Plan { key: self.key, inputs, payments, change: Some(N::change_address(key_for_any_change).unwrap()).filter(|_| change), scheduler_addendum: (), } } fn add_outputs( &mut self, mut utxos: Vec, key_for_any_change: ::G, ) -> Vec> { log::info!("adding {} outputs", utxos.len()); let mut txs = vec![]; for utxo in utxos.drain(..) { if utxo.kind() == OutputType::Branch { let amount = utxo.balance().amount.0; if let Some(plans) = self.plans.get_mut(&amount) { // Execute the first set of payments possible with an output of this amount let payments = plans.pop_front().unwrap(); // They won't be equal if we dropped payments due to being dust assert!(amount >= payments.iter().map(|payment| payment.balance.amount.0).sum::()); // If we've grabbed the last plan for this output amount, remove it from the map if plans.is_empty() { self.plans.remove(&amount); } // Create a TX for these payments txs.push(self.execute(vec![utxo], payments, key_for_any_change)); continue; } } self.utxos.push(utxo); } log::info!("{} planned TXs have had their required inputs confirmed", txs.len()); txs } // Schedule a series of outputs/payments. pub fn schedule( &mut self, txn: &mut D::Transaction<'_>, utxos: Vec, mut payments: Vec>, key_for_any_change: ::G, force_spend: bool, ) -> Vec> { for utxo in &utxos { assert_eq!(utxo.balance().coin, self.coin); } for payment in &payments { assert_eq!(payment.balance.coin, self.coin); } // Drop payments to our own branch address /* created_output will be called any time we send to a branch address. If it's called, and it wasn't expecting to be called, that's almost certainly an error. The only way to guarantee this however is to only have us send to a branch address when creating a branch, hence the dropping of pointless payments. This is not comprehensive as a payment may still be made to another active multisig's branch address, depending on timing. This is safe as the issue only occurs when a multisig sends to its *own* branch address, since created_output is called on the signer's Scheduler. */ { let branch_address = N::branch_address(self.key).unwrap(); payments = payments.drain(..).filter(|payment| payment.address != branch_address).collect::>(); } let mut plans = self.add_outputs(utxos, key_for_any_change); log::info!("scheduling {} new payments", payments.len()); // Add all new payments to the list of pending payments self.payments.extend(payments); let payments_at_start = self.payments.len(); log::info!("{} payments are now scheduled", payments_at_start); // If we don't have UTXOs available, don't try to continue if self.utxos.is_empty() { log::info!("no utxos currently available"); return plans; } // Sort UTXOs so the highest valued ones are first self.utxos.sort_by(|a, b| a.balance().amount.0.cmp(&b.balance().amount.0).reverse()); // We always want to aggregate our UTXOs into a single UTXO in the name of simplicity // We may have more UTXOs than will fit into a TX though // We use the most valuable UTXOs to handle our current payments, and we return aggregation TXs // for the rest of the inputs // Since we do multiple aggregation TXs at once, this will execute in logarithmic time let utxos = self.utxos.drain(..).collect::>(); let mut utxo_chunks = utxos.chunks(N::MAX_INPUTS).map(<[::Output]>::to_vec).collect::>(); // Use the first chunk for any scheduled payments, since it has the most value let utxos = utxo_chunks.remove(0); // If the last chunk exists and only has one output, don't try aggregating it // Set it to be restored to UTXO set let mut to_restore = None; if let Some(mut chunk) = utxo_chunks.pop() { if chunk.len() == 1 { to_restore = Some(chunk.pop().unwrap()); } else { utxo_chunks.push(chunk); } } for chunk in utxo_chunks.drain(..) { log::debug!("aggregating a chunk of {} inputs", chunk.len()); plans.push(Plan { key: self.key, inputs: chunk, payments: vec![], change: Some(N::change_address(key_for_any_change).unwrap()), scheduler_addendum: (), }) } // We want to use all possible UTXOs for all possible payments let mut balance = utxos.iter().map(|output| output.balance().amount.0).sum::(); // If we can't fulfill the next payment, we have encountered an instance of the UTXO // availability problem // This shows up in networks like Monero, where because we spent outputs, our change has yet to // re-appear. Since it has yet to re-appear, we only operate with a balance which is a subset // of our total balance // Despite this, we may be ordered to fulfill a payment which is our total balance // The solution is to wait for the temporarily unavailable change outputs to re-appear, // granting us access to our full balance let mut executing = vec![]; while !self.payments.is_empty() { let amount = self.payments[0].balance.amount.0; if balance.checked_sub(amount).is_some() { balance -= amount; executing.push(self.payments.pop_front().unwrap()); } else { // Doesn't check if other payments would fit into the current batch as doing so may never // let enough inputs become simultaneously availabile to enable handling of payments[0] break; } } // Now that we have the list of payments we can successfully handle right now, create the TX // for them if !executing.is_empty() { plans.push(self.execute(utxos, executing, key_for_any_change)); } else { // If we don't have any payments to execute, save these UTXOs for later self.utxos.extend(utxos); } // If we're instructed to force a spend, do so // This is used when an old multisig is retiring and we want to always transfer outputs to the // new one, regardless if we currently have payments if force_spend && (!self.utxos.is_empty()) { assert!(self.utxos.len() <= N::MAX_INPUTS); plans.push(Plan { key: self.key, inputs: self.utxos.drain(..).collect::>(), payments: vec![], change: Some(N::change_address(key_for_any_change).unwrap()), scheduler_addendum: (), }); } // If there's a UTXO to restore, restore it // This is done now as if there is a to_restore output, and it was inserted into self.utxos // earlier, self.utxos.len() may become `N::MAX_INPUTS + 1` // The prior block requires the len to be `<= N::MAX_INPUTS` if let Some(to_restore) = to_restore { self.utxos.push(to_restore); } txn.put(scheduler_key::(&self.key), self.serialize()); log::info!( "created {} plans containing {} payments to sign, with {} payments pending scheduling", plans.len(), payments_at_start - self.payments.len(), self.payments.len(), ); plans } pub fn consume_payments(&mut self, txn: &mut D::Transaction<'_>) -> Vec> { let res: Vec<_> = self.payments.drain(..).collect(); if !res.is_empty() { txn.put(scheduler_key::(&self.key), self.serialize()); } res } // Note a branch output as having been created, with the amount it was actually created with, // or not having been created due to being too small pub fn created_output( &mut self, txn: &mut D::Transaction<'_>, expected: u64, actual: Option, ) { log::debug!("output expected to have {} had {:?} after fees", expected, actual); // Get the payments this output is expected to handle let queued = self.queued_plans.get_mut(&expected).unwrap(); let mut payments = queued.pop_front().unwrap(); assert_eq!(expected, payments.iter().map(|payment| payment.balance.amount.0).sum::()); // If this was the last set of payments at this amount, remove it if queued.is_empty() { self.queued_plans.remove(&expected); } // If we didn't actually create this output, return, dropping the child payments let Some(actual) = actual else { return }; // Amortize the fee amongst all payments underneath this branch { let mut to_amortize = actual - expected; // If the payments are worth less than this fee we need to amortize, return, dropping them if payments.iter().map(|payment| payment.balance.amount.0).sum::() < to_amortize { return; } while to_amortize != 0 { let payments_len = u64::try_from(payments.len()).unwrap(); let per_payment = to_amortize / payments_len; let mut overage = to_amortize % payments_len; for payment in &mut payments { let to_subtract = per_payment + overage; // Only subtract the overage once overage = 0; let subtractable = payment.balance.amount.0.min(to_subtract); to_amortize -= subtractable; payment.balance.amount.0 -= subtractable; } } } // Drop payments now below the dust threshold let payments = payments .into_iter() .filter(|payment| payment.balance.amount.0 >= N::DUST) .collect::>(); // Sanity check this was done properly assert!(actual >= payments.iter().map(|payment| payment.balance.amount.0).sum::()); // If there's no payments left, return if payments.is_empty() { return; } self.plans.entry(actual).or_insert(VecDeque::new()).push_back(payments); // TODO2: This shows how ridiculous the serialize function is txn.put(scheduler_key::(&self.key), self.serialize()); } } impl> SchedulerTrait for Scheduler { type Addendum = (); /// Check if this Scheduler is empty. fn empty(&self) -> bool { Scheduler::empty(self) } /// Create a new Scheduler. fn new( txn: &mut D::Transaction<'_>, key: ::G, network: ExternalNetworkId, ) -> Self { Scheduler::new::(txn, key, network) } /// Load a Scheduler from the DB. fn from_db( db: &D, key: ::G, network: ExternalNetworkId, ) -> io::Result { Scheduler::from_db::(db, key, network) } /// Check if a branch is usable. fn can_use_branch(&self, balance: ExternalBalance) -> bool { Scheduler::can_use_branch(self, balance) } /// Schedule a series of outputs/payments. fn schedule( &mut self, txn: &mut D::Transaction<'_>, utxos: Vec, payments: Vec>, key_for_any_change: ::G, force_spend: bool, ) -> Vec> { Scheduler::schedule::(self, txn, utxos, payments, key_for_any_change, force_spend) } /// Consume all payments still pending within this Scheduler, without scheduling them. fn consume_payments(&mut self, txn: &mut D::Transaction<'_>) -> Vec> { Scheduler::consume_payments::(self, txn) } /// Note a branch output as having been created, with the amount it was actually created with, /// or not having been created due to being too small. // TODO: Move this to ExternalBalance. fn created_output( &mut self, txn: &mut D::Transaction<'_>, expected: u64, actual: Option, ) { Scheduler::created_output::(self, txn, expected, actual) } fn refund_plan( &mut self, _: &mut D::Transaction<'_>, output: N::Output, refund_to: N::Address, ) -> Plan { let output_id = output.id().as_ref().to_vec(); let res = Plan { key: output.key(), // Uses a payment as this will still be successfully sent due to fee amortization, // and because change is currently always a Serai key payments: vec![Payment { address: refund_to, data: None, balance: output.balance() }], inputs: vec![output], change: None, scheduler_addendum: (), }; log::info!("refund plan for {} has ID {}", hex::encode(output_id), hex::encode(res.id())); res } fn shim_forward_plan(output: N::Output, to: ::G) -> Option> { Some(Plan { key: output.key(), payments: vec![Payment { address: N::forward_address(to).unwrap(), data: None, balance: output.balance(), }], inputs: vec![output], change: None, scheduler_addendum: (), }) } fn forward_plan( &mut self, _: &mut D::Transaction<'_>, output: N::Output, to: ::G, ) -> Option> { assert_eq!(self.key, output.key()); // Call shim as shim returns the actual Self::shim_forward_plan(output, to) } } ================================================ FILE: processor/src/networks/bitcoin.rs ================================================ use std::{sync::OnceLock, time::Duration, io, collections::HashMap}; use async_trait::async_trait; use scale::{Encode, Decode}; use ciphersuite::group::ff::PrimeField; use k256::{ProjectivePoint, Scalar}; use frost::{ curve::{Curve, Secp256k1}, ThresholdKeys, }; use tokio::time::sleep; use bitcoin_serai::{ bitcoin::{ hashes::Hash as HashTrait, key::{Parity, XOnlyPublicKey}, consensus::{Encodable, Decodable}, script::Instruction, Transaction, Block, ScriptBuf, opcodes::all::{OP_SHA256, OP_EQUALVERIFY}, }, wallet::{ tweak_keys, p2tr_script_buf, ReceivedOutput, Scanner, TransactionError, SignableTransaction as BSignableTransaction, TransactionMachine, }, rpc::{RpcError, Rpc}, }; #[cfg(test)] use bitcoin_serai::bitcoin::{ secp256k1::{SECP256K1, SecretKey, Message}, PrivateKey, PublicKey, sighash::{EcdsaSighashType, SighashCache}, script::PushBytesBuf, absolute::LockTime, Amount as BAmount, Sequence, Script, Witness, OutPoint, transaction::Version, blockdata::transaction::{TxIn, TxOut}, }; use serai_client::{ primitives::{MAX_DATA_LEN, ExternalCoin, ExternalNetworkId, Amount, ExternalBalance}, networks::bitcoin::Address, }; use crate::{ networks::{ NetworkError, Block as BlockTrait, OutputType, Output as OutputTrait, Transaction as TransactionTrait, SignableTransaction as SignableTransactionTrait, Eventuality as EventualityTrait, EventualitiesTracker, Network, UtxoNetwork, }, Payment, multisigs::scheduler::utxo::Scheduler, }; #[derive(Clone, PartialEq, Eq, Debug)] pub struct OutputId(pub [u8; 36]); impl Default for OutputId { fn default() -> Self { Self([0; 36]) } } impl AsRef<[u8]> for OutputId { fn as_ref(&self) -> &[u8] { self.0.as_ref() } } impl AsMut<[u8]> for OutputId { fn as_mut(&mut self) -> &mut [u8] { self.0.as_mut() } } #[derive(Clone, PartialEq, Eq, Debug)] pub struct Output { kind: OutputType, presumed_origin: Option
, output: ReceivedOutput, data: Vec, } impl OutputTrait for Output { type Id = OutputId; fn kind(&self) -> OutputType { self.kind } fn id(&self) -> Self::Id { let mut res = OutputId::default(); self.output.outpoint().consensus_encode(&mut res.as_mut()).unwrap(); debug_assert_eq!( { let mut outpoint = vec![]; self.output.outpoint().consensus_encode(&mut outpoint).unwrap(); outpoint }, res.as_ref().to_vec() ); res } fn tx_id(&self) -> [u8; 32] { let mut hash = *self.output.outpoint().txid.as_raw_hash().as_byte_array(); hash.reverse(); hash } fn key(&self) -> ProjectivePoint { let script = &self.output.output().script_pubkey; assert!(script.is_p2tr()); let Instruction::PushBytes(key) = script.instructions_minimal().last().unwrap().unwrap() else { panic!("last item in v1 Taproot script wasn't bytes") }; let key = XOnlyPublicKey::from_slice(key.as_ref()) .expect("last item in v1 Taproot script wasn't x-only public key"); Secp256k1::read_G(&mut key.public_key(Parity::Even).serialize().as_slice()).unwrap() - (ProjectivePoint::GENERATOR * self.output.offset()) } fn presumed_origin(&self) -> Option
{ self.presumed_origin.clone() } fn balance(&self) -> ExternalBalance { ExternalBalance { coin: ExternalCoin::Bitcoin, amount: Amount(self.output.value()) } } fn data(&self) -> &[u8] { &self.data } fn write(&self, writer: &mut W) -> io::Result<()> { self.kind.write(writer)?; let presumed_origin: Option> = self.presumed_origin.clone().map(Into::into); writer.write_all(&presumed_origin.encode())?; self.output.write(writer)?; writer.write_all(&u16::try_from(self.data.len()).unwrap().to_le_bytes())?; writer.write_all(&self.data) } fn read(mut reader: &mut R) -> io::Result { Ok(Output { kind: OutputType::read(reader)?, presumed_origin: { let mut io_reader = scale::IoReader(reader); let res = Option::>::decode(&mut io_reader) .unwrap() .map(|address| Address::try_from(address).unwrap()); reader = io_reader.0; res }, output: ReceivedOutput::read(reader)?, data: { let mut data_len = [0; 2]; reader.read_exact(&mut data_len)?; let mut data = vec![0; usize::from(u16::from_le_bytes(data_len))]; reader.read_exact(&mut data)?; data }, }) } } #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub struct Fee(u64); #[async_trait] impl TransactionTrait for Transaction { type Id = [u8; 32]; fn id(&self) -> Self::Id { let mut hash = *self.compute_txid().as_raw_hash().as_byte_array(); hash.reverse(); hash } #[cfg(test)] async fn fee(&self, network: &Bitcoin) -> u64 { let mut value = 0; for input in &self.input { let output = input.previous_output; let mut hash = *output.txid.as_raw_hash().as_byte_array(); hash.reverse(); value += network.rpc.get_transaction(&hash).await.unwrap().output [usize::try_from(output.vout).unwrap()] .value .to_sat(); } for output in &self.output { value -= output.value.to_sat(); } value } } #[derive(Clone, PartialEq, Eq, Debug)] pub struct Eventuality([u8; 32]); #[derive(Clone, PartialEq, Eq, Default, Debug)] pub struct EmptyClaim; impl AsRef<[u8]> for EmptyClaim { fn as_ref(&self) -> &[u8] { &[] } } impl AsMut<[u8]> for EmptyClaim { fn as_mut(&mut self) -> &mut [u8] { &mut [] } } impl EventualityTrait for Eventuality { type Claim = EmptyClaim; type Completion = Transaction; fn lookup(&self) -> Vec { self.0.to_vec() } fn read(reader: &mut R) -> io::Result { let mut id = [0; 32]; reader .read_exact(&mut id) .map_err(|_| io::Error::other("couldn't decode ID in eventuality"))?; Ok(Eventuality(id)) } fn serialize(&self) -> Vec { self.0.to_vec() } fn claim(_: &Transaction) -> EmptyClaim { EmptyClaim } fn serialize_completion(completion: &Transaction) -> Vec { let mut buf = vec![]; completion.consensus_encode(&mut buf).unwrap(); buf } fn read_completion(reader: &mut R) -> io::Result { Transaction::consensus_decode(&mut io::BufReader::with_capacity(0, reader)) .map_err(|e| io::Error::other(format!("{e}"))) } } #[derive(Clone, Debug)] pub struct SignableTransaction { actual: BSignableTransaction, } impl PartialEq for SignableTransaction { fn eq(&self, other: &SignableTransaction) -> bool { self.actual == other.actual } } impl Eq for SignableTransaction {} impl SignableTransactionTrait for SignableTransaction { fn fee(&self) -> u64 { self.actual.fee() } } #[async_trait] impl BlockTrait for Block { type Id = [u8; 32]; fn id(&self) -> Self::Id { let mut hash = *self.block_hash().as_raw_hash().as_byte_array(); hash.reverse(); hash } fn parent(&self) -> Self::Id { let mut hash = *self.header.prev_blockhash.as_raw_hash().as_byte_array(); hash.reverse(); hash } async fn time(&self, rpc: &Bitcoin) -> u64 { // Use the network median time defined in BIP-0113 since the in-block time isn't guaranteed to // be monotonic let mut timestamps = vec![u64::from(self.header.time)]; let mut parent = self.parent(); // BIP-0113 uses a median of the prior 11 blocks while timestamps.len() < 11 { let mut parent_block; while { parent_block = rpc.rpc.get_block(&parent).await; parent_block.is_err() } { log::error!("couldn't get parent block when trying to get block time: {parent_block:?}"); sleep(Duration::from_secs(5)).await; } let parent_block = parent_block.unwrap(); timestamps.push(u64::from(parent_block.header.time)); parent = parent_block.parent(); if parent == [0; 32] { break; } } timestamps.sort(); timestamps[timestamps.len() / 2] } } const KEY_DST: &[u8] = b"Serai Bitcoin Output Offset"; static BRANCH_OFFSET: OnceLock = OnceLock::new(); static CHANGE_OFFSET: OnceLock = OnceLock::new(); static FORWARD_OFFSET: OnceLock = OnceLock::new(); // Always construct the full scanner in order to ensure there's no collisions fn scanner( key: ProjectivePoint, ) -> (Scanner, HashMap, HashMap, OutputType>) { let mut scanner = Scanner::new(key).unwrap(); let mut offsets = HashMap::from([(OutputType::External, Scalar::ZERO)]); let zero = Scalar::ZERO.to_repr(); let zero_ref: &[u8] = zero.as_ref(); let mut kinds = HashMap::from([(zero_ref.to_vec(), OutputType::External)]); let mut register = |kind, offset| { let offset = scanner.register_offset(offset).expect("offset collision"); offsets.insert(kind, offset); let offset = offset.to_repr(); let offset_ref: &[u8] = offset.as_ref(); kinds.insert(offset_ref.to_vec(), kind); }; register( OutputType::Branch, *BRANCH_OFFSET.get_or_init(|| Secp256k1::hash_to_F(KEY_DST, b"branch")), ); register( OutputType::Change, *CHANGE_OFFSET.get_or_init(|| Secp256k1::hash_to_F(KEY_DST, b"change")), ); register( OutputType::Forwarded, *FORWARD_OFFSET.get_or_init(|| Secp256k1::hash_to_F(KEY_DST, b"forward")), ); (scanner, offsets, kinds) } #[derive(Clone, Debug)] pub struct Bitcoin { pub(crate) rpc: Rpc, } // Shim required for testing/debugging purposes due to generic arguments also necessitating trait // bounds impl PartialEq for Bitcoin { fn eq(&self, _: &Self) -> bool { true } } impl Eq for Bitcoin {} impl Bitcoin { pub async fn new(url: String) -> Bitcoin { let mut res = Rpc::new(url.clone()).await; while let Err(e) = res { log::error!("couldn't connect to Bitcoin node: {e:?}"); sleep(Duration::from_secs(5)).await; res = Rpc::new(url.clone()).await; } Bitcoin { rpc: res.unwrap() } } #[cfg(test)] pub async fn fresh_chain(&self) { if self.rpc.get_latest_block_number().await.unwrap() > 0 { self .rpc .rpc_call( "invalidateblock", serde_json::json!([hex::encode(self.rpc.get_block_hash(1).await.unwrap())]), ) .await .unwrap() } } // This function panics on a node which doesn't follow the Bitcoin protocol, which is deemed fine async fn median_fee(&self, block: &Block) -> Result { let mut fees = vec![]; if block.txdata.len() > 1 { for tx in &block.txdata[1 ..] { let mut in_value = 0; for input in &tx.input { let mut input_tx = input.previous_output.txid.to_raw_hash().to_byte_array(); input_tx.reverse(); in_value += self .rpc .get_transaction(&input_tx) .await .map_err(|_| NetworkError::ConnectionError)? .output[usize::try_from(input.previous_output.vout).unwrap()] .value .to_sat(); } let out = tx.output.iter().map(|output| output.value.to_sat()).sum::(); fees.push((in_value - out) / u64::try_from(tx.vsize()).unwrap()); } } fees.sort(); let fee = fees.get(fees.len() / 2).copied().unwrap_or(0); // The DUST constant documentation notes a relay rule practically enforcing a // 1000 sat/kilo-vbyte minimum fee. Ok(Fee(fee.max(1))) } async fn make_signable_transaction( &self, block_number: usize, inputs: &[Output], payments: &[Payment], change: &Option
, calculating_fee: bool, ) -> Result, NetworkError> { for payment in payments { assert_eq!(payment.balance.coin, ExternalCoin::Bitcoin); } // TODO2: Use an fee representative of several blocks, cached inside Self let block_for_fee = self.get_block(block_number).await?; let fee = self.median_fee(&block_for_fee).await?; let payments = payments .iter() .map(|payment| { ( payment.address.clone().into(), // If we're solely estimating the fee, don't specify the actual amount // This won't affect the fee calculation yet will ensure we don't hit a not enough funds // error if calculating_fee { Self::DUST } else { payment.balance.amount.0 }, ) }) .collect::>(); match BSignableTransaction::new( inputs.iter().map(|input| input.output.clone()).collect(), &payments, change.clone().map(Into::into), None, fee.0, ) { Ok(signable) => Ok(Some(signable)), Err(TransactionError::NoInputs) => { panic!("trying to create a bitcoin transaction without inputs") } // No outputs left and the change isn't worth enough/not even enough funds to pay the fee Err(TransactionError::NoOutputs | TransactionError::NotEnoughFunds { .. }) => Ok(None), // amortize_fee removes payments which fall below the dust threshold Err(TransactionError::DustPayment) => panic!("dust payment despite removing dust"), Err(TransactionError::TooMuchData) => { panic!("too much data despite not specifying data") } Err(TransactionError::TooLowFee) => { panic!("created a transaction whose fee is below the minimum") } Err(TransactionError::TooLargeTransaction) => { panic!("created a too large transaction despite limiting inputs/outputs") } } } // Expected script has to start with SHA256 PUSH MSG_HASH OP_EQUALVERIFY .. fn segwit_data_pattern(script: &ScriptBuf) -> Option { let mut ins = script.instructions(); // first item should be SHA256 code if ins.next()?.ok()?.opcode()? != OP_SHA256 { return Some(false); } // next should be a data push ins.next()?.ok()?.push_bytes()?; // next should be a equality check if ins.next()?.ok()?.opcode()? != OP_EQUALVERIFY { return Some(false); } Some(true) } fn extract_serai_data(tx: &Transaction) -> Vec { // check outputs let mut data = (|| { for output in &tx.output { if output.script_pubkey.is_op_return() { match output.script_pubkey.instructions_minimal().last() { Some(Ok(Instruction::PushBytes(data))) => return data.as_bytes().to_vec(), _ => continue, } } } vec![] })(); // check inputs if data.is_empty() { for input in &tx.input { let witness = input.witness.to_vec(); // expected witness at least has to have 2 items, msg and the redeem script. if witness.len() >= 2 { let redeem_script = ScriptBuf::from_bytes(witness.last().unwrap().clone()); if Self::segwit_data_pattern(&redeem_script) == Some(true) { data.clone_from(&witness[witness.len() - 2]); // len() - 1 is the redeem_script break; } } } } data.truncate(MAX_DATA_LEN.try_into().unwrap()); data } #[cfg(test)] pub fn sign_btc_input_for_p2pkh( tx: &Transaction, input_index: usize, private_key: &PrivateKey, ) -> ScriptBuf { use bitcoin_serai::bitcoin::{Network as BNetwork, Address as BAddress}; let public_key = PublicKey::from_private_key(SECP256K1, private_key); let main_addr = BAddress::p2pkh(public_key, BNetwork::Regtest); let mut der = SECP256K1 .sign_ecdsa_low_r( &Message::from_digest_slice( SighashCache::new(tx) .legacy_signature_hash( input_index, &main_addr.script_pubkey(), EcdsaSighashType::All.to_u32(), ) .unwrap() .to_raw_hash() .as_ref(), ) .unwrap(), &private_key.inner, ) .serialize_der() .to_vec(); der.push(1); ScriptBuf::builder() .push_slice(PushBytesBuf::try_from(der).unwrap()) .push_key(&public_key) .into_script() } } // Bitcoin has a max weight of 400,000 (MAX_STANDARD_TX_WEIGHT) // A non-SegWit TX will have 4 weight units per byte, leaving a max size of 100,000 bytes // While our inputs are entirely SegWit, such fine tuning is not necessary and could create // issues in the future (if the size decreases or we misevaluate it) // It also offers a minimal amount of benefit when we are able to logarithmically accumulate // inputs // For 128-byte inputs (36-byte output specification, 64-byte signature, whatever overhead) and // 64-byte outputs (40-byte script, 8-byte amount, whatever overhead), they together take up 192 // bytes // 100,000 / 192 = 520 // 520 * 192 leaves 160 bytes of overhead for the transaction structure itself const MAX_INPUTS: usize = 520; const MAX_OUTPUTS: usize = 520; fn address_from_key(key: ProjectivePoint) -> Address { Address::new( p2tr_script_buf(key).expect("creating address from key which isn't properly tweaked"), ) .expect("couldn't create Serai-representable address for P2TR script") } #[async_trait] impl Network for Bitcoin { type Curve = Secp256k1; type Transaction = Transaction; type Block = Block; type Output = Output; type SignableTransaction = SignableTransaction; type Eventuality = Eventuality; type TransactionMachine = TransactionMachine; type Scheduler = Scheduler; type Address = Address; const NETWORK: ExternalNetworkId = ExternalNetworkId::Bitcoin; const ID: &'static str = "Bitcoin"; const ESTIMATED_BLOCK_TIME_IN_SECONDS: usize = 600; const CONFIRMATIONS: usize = 6; /* A Taproot input is: - 36 bytes for the OutPoint - 0 bytes for the script (+1 byte for the length) - 4 bytes for the sequence Per https://developer.bitcoin.org/reference/transactions.html#raw-transaction-format There's also: - 1 byte for the witness length - 1 byte for the signature length - 64 bytes for the signature which have the SegWit discount. (4 * (36 + 1 + 4)) + (1 + 1 + 64) = 164 + 66 = 230 weight units 230 ceil div 4 = 57 vbytes Bitcoin defines multiple minimum feerate constants *per kilo-vbyte*. Currently, these are: - 1000 sat/kilo-vbyte for a transaction to be relayed - Each output's value must exceed the fee of the TX spending it at 3000 sat/kilo-vbyte The DUST constant needs to be determined by the latter. Since these are solely relay rules, and may be raised, we require all outputs be spendable under a 5000 sat/kilo-vbyte fee rate. 5000 sat/kilo-vbyte = 5 sat/vbyte 5 * 57 = 285 sats/spent-output Even if an output took 100 bytes (it should be just ~29-43), taking 400 weight units, adding 100 vbytes, tripling the transaction size, then the sats/tx would be < 1000. Increase by an order of magnitude, in order to ensure this is actually worth our time, and we get 10,000 satoshis. */ const DUST: u64 = 10_000; // 2 inputs should be 2 * 230 = 460 weight units // The output should be ~36 bytes, or 144 weight units // The overhead should be ~20 bytes at most, or 80 weight units // 684 weight units, 171 vbytes, round up to 200 // 200 vbytes at 1 sat/weight (our current minimum fee, 4 sat/vbyte) = 800 sat fee for the // aggregation TX const COST_TO_AGGREGATE: u64 = 800; const MAX_OUTPUTS: usize = MAX_OUTPUTS; fn tweak_keys(keys: &mut ThresholdKeys) { *keys = tweak_keys(keys.clone()); // Also create a scanner to assert these keys, and all expected paths, are usable scanner(keys.group_key()); } #[cfg(test)] async fn external_address(&self, key: ProjectivePoint) -> Address { address_from_key(key) } fn branch_address(key: ProjectivePoint) -> Option
{ let (_, offsets, _) = scanner(key); Some(address_from_key(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Branch]))) } fn change_address(key: ProjectivePoint) -> Option
{ let (_, offsets, _) = scanner(key); Some(address_from_key(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Change]))) } fn forward_address(key: ProjectivePoint) -> Option
{ let (_, offsets, _) = scanner(key); Some(address_from_key(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Forwarded]))) } async fn get_latest_block_number(&self) -> Result { self.rpc.get_latest_block_number().await.map_err(|_| NetworkError::ConnectionError) } async fn get_block(&self, number: usize) -> Result { let block_hash = self.rpc.get_block_hash(number).await.map_err(|_| NetworkError::ConnectionError)?; self.rpc.get_block(&block_hash).await.map_err(|_| NetworkError::ConnectionError) } async fn get_outputs(&self, block: &Self::Block, key: ProjectivePoint) -> Vec { let (scanner, _, kinds) = scanner(key); let mut outputs = vec![]; // Skip the coinbase transaction which is burdened by maturity for tx in &block.txdata[1 ..] { for output in scanner.scan_transaction(tx) { let offset_repr = output.offset().to_repr(); let offset_repr_ref: &[u8] = offset_repr.as_ref(); let kind = kinds[offset_repr_ref]; let output = Output { kind, presumed_origin: None, output, data: vec![] }; assert_eq!(output.tx_id(), tx.id()); outputs.push(output); } if outputs.is_empty() { continue; } // populate the outputs with the origin and data let presumed_origin = { // This may identify the P2WSH output *embedding the InInstruction* as the origin, which // would be a bit trickier to spend that a traditional output... // There's no risk of the InInstruction going missing as it'd already be on-chain though // We *could* parse out the script *without the InInstruction prefix* and declare that the // origin // TODO let spent_output = { let input = &tx.input[0]; let mut spent_tx = input.previous_output.txid.as_raw_hash().to_byte_array(); spent_tx.reverse(); let mut tx; while { tx = self.rpc.get_transaction(&spent_tx).await; tx.is_err() } { log::error!("couldn't get transaction from bitcoin node: {tx:?}"); sleep(Duration::from_secs(5)).await; } tx.unwrap().output.swap_remove(usize::try_from(input.previous_output.vout).unwrap()) }; Address::new(spent_output.script_pubkey) }; let data = Self::extract_serai_data(tx); for output in &mut outputs { if output.kind == OutputType::External { output.data.clone_from(&data); } output.presumed_origin.clone_from(&presumed_origin); } } outputs } async fn get_eventuality_completions( &self, eventualities: &mut EventualitiesTracker, block: &Self::Block, ) -> HashMap<[u8; 32], (usize, [u8; 32], Transaction)> { let mut res = HashMap::new(); if eventualities.map.is_empty() { return res; } fn check_block( eventualities: &mut EventualitiesTracker, block: &Block, res: &mut HashMap<[u8; 32], (usize, [u8; 32], Transaction)>, ) { for tx in &block.txdata[1 ..] { if let Some((plan, _)) = eventualities.map.remove(tx.id().as_slice()) { res.insert(plan, (eventualities.block_number, tx.id(), tx.clone())); } } eventualities.block_number += 1; } let this_block_hash = block.id(); let this_block_num = (async { loop { match self.rpc.get_block_number(&this_block_hash).await { Ok(number) => return number, Err(e) => { log::error!("couldn't get the block number for {}: {}", hex::encode(this_block_hash), e) } } sleep(Duration::from_secs(60)).await; } }) .await; for block_num in (eventualities.block_number + 1) .. this_block_num { let block = { let mut block; while { block = self.get_block(block_num).await; block.is_err() } { log::error!("couldn't get block {}: {}", block_num, block.err().unwrap()); sleep(Duration::from_secs(60)).await; } block.unwrap() }; check_block(eventualities, &block, &mut res); } // Also check the current block check_block(eventualities, block, &mut res); assert_eq!(eventualities.block_number, this_block_num); res } async fn needed_fee( &self, block_number: usize, inputs: &[Output], payments: &[Payment], change: &Option
, ) -> Result, NetworkError> { Ok( self .make_signable_transaction(block_number, inputs, payments, change, true) .await? .map(|signable| signable.needed_fee()), ) } async fn signable_transaction( &self, block_number: usize, _plan_id: &[u8; 32], _key: ProjectivePoint, inputs: &[Output], payments: &[Payment], change: &Option
, (): &(), ) -> Result, NetworkError> { Ok(self.make_signable_transaction(block_number, inputs, payments, change, false).await?.map( |signable| { let eventuality = Eventuality(signable.txid()); (SignableTransaction { actual: signable }, eventuality) }, )) } async fn attempt_sign( &self, keys: ThresholdKeys, transaction: Self::SignableTransaction, ) -> Result { Ok(transaction.actual.clone().multisig(&keys).expect("used the wrong keys")) } async fn publish_completion(&self, tx: &Transaction) -> Result<(), NetworkError> { match self.rpc.send_raw_transaction(tx).await { Ok(_) => (), Err(RpcError::ConnectionError) => Err(NetworkError::ConnectionError)?, // TODO: Distinguish already in pool vs double spend (other signing attempt succeeded) vs // invalid transaction Err(e) => panic!("failed to publish TX {}: {e}", tx.compute_txid()), } Ok(()) } async fn confirm_completion( &self, eventuality: &Self::Eventuality, _: &EmptyClaim, ) -> Result, NetworkError> { Ok(Some( self.rpc.get_transaction(&eventuality.0).await.map_err(|_| NetworkError::ConnectionError)?, )) } #[cfg(test)] async fn get_block_number(&self, id: &[u8; 32]) -> usize { self.rpc.get_block_number(id).await.unwrap() } #[cfg(test)] async fn check_eventuality_by_claim( &self, eventuality: &Self::Eventuality, _: &EmptyClaim, ) -> bool { self.rpc.get_transaction(&eventuality.0).await.is_ok() } #[cfg(test)] async fn get_transaction_by_eventuality(&self, _: usize, id: &Eventuality) -> Transaction { self.rpc.get_transaction(&id.0).await.unwrap() } #[cfg(test)] async fn mine_block(&self) { use bitcoin_serai::bitcoin::{Network as BNetwork, Address as BAddress}; self .rpc .rpc_call::>( "generatetoaddress", serde_json::json!([1, BAddress::p2sh(Script::new(), BNetwork::Regtest).unwrap()]), ) .await .unwrap(); } #[cfg(test)] async fn test_send(&self, address: Address) -> Block { use bitcoin_serai::bitcoin::{Network as BNetwork, Address as BAddress}; let secret_key = SecretKey::new(&mut rand_core::OsRng); let private_key = PrivateKey::new(secret_key, BNetwork::Regtest); let public_key = PublicKey::from_private_key(SECP256K1, &private_key); let main_addr = BAddress::p2pkh(public_key, BNetwork::Regtest); let new_block = self.get_latest_block_number().await.unwrap() + 1; self .rpc .rpc_call::>("generatetoaddress", serde_json::json!([100, main_addr])) .await .unwrap(); let tx = self.get_block(new_block).await.unwrap().txdata.swap_remove(0); let mut tx = Transaction { version: Version(2), lock_time: LockTime::ZERO, input: vec![TxIn { previous_output: OutPoint { txid: tx.compute_txid(), vout: 0 }, script_sig: Script::new().into(), sequence: Sequence(u32::MAX), witness: Witness::default(), }], output: vec![TxOut { value: tx.output[0].value - BAmount::from_sat(10000), script_pubkey: address.clone().into(), }], }; tx.input[0].script_sig = Self::sign_btc_input_for_p2pkh(&tx, 0, &private_key); let block = self.get_latest_block_number().await.unwrap() + 1; self.rpc.send_raw_transaction(&tx).await.unwrap(); for _ in 0 .. Self::CONFIRMATIONS { self.mine_block().await; } self.get_block(block).await.unwrap() } } impl UtxoNetwork for Bitcoin { const MAX_INPUTS: usize = MAX_INPUTS; } ================================================ FILE: processor/src/networks/ethereum.rs ================================================ #![allow(deprecated)] use core::{fmt, time::Duration}; use std::{ sync::Arc, collections::{HashSet, HashMap}, io, }; use async_trait::async_trait; use ciphersuite::{group::GroupEncoding, Ciphersuite}; use ciphersuite_kp256::Secp256k1; use frost::ThresholdKeys; use ethereum_serai::{ alloy::{ primitives::U256, rpc_types::{BlockTransactionsKind, BlockNumberOrTag, Transaction}, simple_request_transport::SimpleRequest, rpc_client::ClientBuilder, provider::{Provider, RootProvider}, }, crypto::{PublicKey, Signature}, erc20::Erc20, deployer::Deployer, router::{Router, Coin as EthereumCoin, InInstruction as EthereumInInstruction}, machine::*, }; #[cfg(test)] use ethereum_serai::alloy::primitives::B256; use tokio::{ time::sleep, sync::{RwLock, RwLockReadGuard}, }; #[cfg(not(test))] use tokio::{ io::{AsyncReadExt, AsyncWriteExt}, net::TcpStream, }; use serai_client::{ primitives::{ExternalCoin, Amount, ExternalBalance, ExternalNetworkId}, validator_sets::primitives::Session, }; use crate::{ Db, Payment, networks::{ OutputType, Output, Transaction as TransactionTrait, SignableTransaction, Block, Eventuality as EventualityTrait, EventualitiesTracker, NetworkError, Network, }, key_gen::NetworkKeyDb, multisigs::scheduler::{ Scheduler as SchedulerTrait, smart_contract::{Addendum, Scheduler}, }, }; #[cfg(not(test))] const DAI: [u8; 20] = match const_hex::const_decode_to_array(b"0x6B175474E89094C44Da98b954EedeAC495271d0F") { Ok(res) => res, Err(_) => panic!("invalid non-test DAI hex address"), }; #[cfg(test)] // TODO const DAI: [u8; 20] = match const_hex::const_decode_to_array(b"0000000000000000000000000000000000000000") { Ok(res) => res, Err(_) => panic!("invalid test DAI hex address"), }; fn coin_to_serai_coin(coin: &EthereumCoin) -> Option { match coin { EthereumCoin::Ether => Some(ExternalCoin::Ether), EthereumCoin::Erc20(token) => { if *token == DAI { return Some(ExternalCoin::Dai); } None } } } fn amount_to_serai_amount(coin: ExternalCoin, amount: U256) -> Amount { assert_eq!(coin.network(), ExternalNetworkId::Ethereum); assert_eq!(coin.decimals(), 8); // Remove 10 decimals so we go from 18 decimals to 8 decimals let divisor = U256::from(10_000_000_000u64); // This is valid up to 184b, which is assumed for the coins allowed Amount(u64::try_from(amount / divisor).unwrap()) } fn balance_to_ethereum_amount(balance: ExternalBalance) -> U256 { assert_eq!(balance.coin.network(), ExternalNetworkId::Ethereum); assert_eq!(balance.coin.decimals(), 8); // Restore 10 decimals so we go from 8 decimals to 18 decimals let factor = U256::from(10_000_000_000u64); U256::from(balance.amount.0) * factor } #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub struct Address(pub [u8; 20]); impl TryFrom> for Address { type Error = (); fn try_from(bytes: Vec) -> Result { if bytes.len() != 20 { Err(())?; } let mut res = [0; 20]; res.copy_from_slice(&bytes); Ok(Address(res)) } } impl TryInto> for Address { type Error = (); fn try_into(self) -> Result, ()> { Ok(self.0.to_vec()) } } impl fmt::Display for Address { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { ethereum_serai::alloy::primitives::Address::from(self.0).fmt(f) } } impl SignableTransaction for RouterCommand { fn fee(&self) -> u64 { // Return a fee of 0 as we'll handle amortization on our end 0 } } #[async_trait] impl TransactionTrait> for Transaction { type Id = [u8; 32]; fn id(&self) -> Self::Id { self.hash.0 } #[cfg(test)] async fn fee(&self, _network: &Ethereum) -> u64 { // Return a fee of 0 as we'll handle amortization on our end 0 } } // We use 32-block Epochs to represent blocks. #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub struct Epoch { // The hash of the block which ended the prior Epoch. prior_end_hash: [u8; 32], // The first block number within this Epoch. start: u64, // The hash of the last block within this Epoch. end_hash: [u8; 32], // The monotonic time for this Epoch. time: u64, } impl Epoch { fn end(&self) -> u64 { self.start + 31 } } #[async_trait] impl Block> for Epoch { type Id = [u8; 32]; fn id(&self) -> [u8; 32] { self.end_hash } fn parent(&self) -> [u8; 32] { self.prior_end_hash } async fn time(&self, _: &Ethereum) -> u64 { self.time } } impl Output> for EthereumInInstruction { type Id = [u8; 32]; fn kind(&self) -> OutputType { OutputType::External } fn id(&self) -> Self::Id { let mut id = [0; 40]; id[.. 32].copy_from_slice(&self.id.0); id[32 ..].copy_from_slice(&self.id.1.to_le_bytes()); *ethereum_serai::alloy::primitives::keccak256(id) } fn tx_id(&self) -> [u8; 32] { self.id.0 } fn key(&self) -> ::G { self.key_at_end_of_block } fn presumed_origin(&self) -> Option
{ Some(Address(self.from)) } fn balance(&self) -> ExternalBalance { let coin = coin_to_serai_coin(&self.coin).unwrap_or_else(|| { panic!( "requesting coin for an EthereumInInstruction with a coin {}", "we don't handle. this never should have been yielded" ) }); ExternalBalance { coin, amount: amount_to_serai_amount(coin, self.amount) } } fn data(&self) -> &[u8] { &self.data } fn write(&self, writer: &mut W) -> io::Result<()> { EthereumInInstruction::write(self, writer) } fn read(reader: &mut R) -> io::Result { EthereumInInstruction::read(reader) } } #[derive(Clone, PartialEq, Eq, Debug)] pub struct Claim { signature: [u8; 64], } impl AsRef<[u8]> for Claim { fn as_ref(&self) -> &[u8] { &self.signature } } impl AsMut<[u8]> for Claim { fn as_mut(&mut self) -> &mut [u8] { &mut self.signature } } impl Default for Claim { fn default() -> Self { Self { signature: [0; 64] } } } impl From<&Signature> for Claim { fn from(sig: &Signature) -> Self { Self { signature: sig.to_bytes() } } } #[derive(Clone, PartialEq, Eq, Debug)] pub struct Eventuality(PublicKey, RouterCommand); impl EventualityTrait for Eventuality { type Claim = Claim; type Completion = SignedRouterCommand; fn lookup(&self) -> Vec { match self.1 { RouterCommand::UpdateSeraiKey { nonce, .. } | RouterCommand::Execute { nonce, .. } => { nonce.as_le_bytes().to_vec() } } } fn read(reader: &mut R) -> io::Result { let point = Secp256k1::read_G(reader)?; let command = RouterCommand::read(reader)?; Ok(Eventuality( PublicKey::new(point).ok_or(io::Error::other("unusable key within Eventuality"))?, command, )) } fn serialize(&self) -> Vec { let mut res = vec![]; res.extend(self.0.point().to_bytes().as_slice()); self.1.write(&mut res).unwrap(); res } fn claim(completion: &Self::Completion) -> Self::Claim { Claim::from(completion.signature()) } fn serialize_completion(completion: &Self::Completion) -> Vec { let mut res = vec![]; completion.write(&mut res).unwrap(); res } fn read_completion(reader: &mut R) -> io::Result { SignedRouterCommand::read(reader) } } #[derive(Clone)] pub struct Ethereum { // This DB is solely used to access the first key generated, as needed to determine the Router's // address. Accordingly, all methods present are consistent to a Serai chain with a finalized // first key (regardless of local state), and this is safe. db: D, #[cfg_attr(test, allow(unused))] relayer_url: String, provider: Arc>, deployer: Deployer, router: Arc>>, } impl PartialEq for Ethereum { fn eq(&self, _other: &Ethereum) -> bool { true } } impl fmt::Debug for Ethereum { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt .debug_struct("Ethereum") .field("deployer", &self.deployer) .field("router", &self.router) .finish_non_exhaustive() } } impl Ethereum { pub async fn new(db: D, daemon_url: String, relayer_url: String) -> Self { let provider = Arc::new(RootProvider::new( ClientBuilder::default().transport(SimpleRequest::new(daemon_url), true), )); let mut deployer = Deployer::new(provider.clone()).await; while !matches!(deployer, Ok(Some(_))) { log::error!("Deployer wasn't deployed yet or networking error"); sleep(Duration::from_secs(5)).await; deployer = Deployer::new(provider.clone()).await; } let deployer = deployer.unwrap().unwrap(); dbg!(&relayer_url); dbg!(relayer_url.len()); Ethereum { db, relayer_url, provider, deployer, router: Arc::new(RwLock::new(None)) } } // Obtain a reference to the Router, sleeping until it's deployed if it hasn't already been. // This is guaranteed to return Some. pub async fn router(&self) -> RwLockReadGuard<'_, Option> { // If we've already instantiated the Router, return a read reference { let router = self.router.read().await; if router.is_some() { return router; } } // Instantiate it let mut router = self.router.write().await; // If another attempt beat us to it, return if router.is_some() { drop(router); return self.router.read().await; } // Get the first key from the DB let first_key = NetworkKeyDb::get(&self.db, Session(0)).expect("getting outputs before confirming a key"); let key = Secp256k1::read_G(&mut first_key.as_slice()).unwrap(); let public_key = PublicKey::new(key).unwrap(); // Find the router let mut found = self.deployer.find_router(self.provider.clone(), &public_key).await; while !matches!(found, Ok(Some(_))) { log::error!("Router wasn't deployed yet or networking error"); sleep(Duration::from_secs(5)).await; found = self.deployer.find_router(self.provider.clone(), &public_key).await; } // Set it *router = Some(found.unwrap().unwrap()); // Downgrade to a read lock // Explicitly doesn't use `downgrade` so that another pending write txn can realize it's no // longer necessary drop(router); self.router.read().await } } #[async_trait] impl Network for Ethereum { type Curve = Secp256k1; type Transaction = Transaction; type Block = Epoch; type Output = EthereumInInstruction; type SignableTransaction = RouterCommand; type Eventuality = Eventuality; type TransactionMachine = RouterCommandMachine; type Scheduler = Scheduler; type Address = Address; const NETWORK: ExternalNetworkId = ExternalNetworkId::Ethereum; const ID: &'static str = "Ethereum"; const ESTIMATED_BLOCK_TIME_IN_SECONDS: usize = 32 * 12; const CONFIRMATIONS: usize = 1; const DUST: u64 = 0; // TODO const COST_TO_AGGREGATE: u64 = 0; // TODO: usize::max, with a merkle tree in the router const MAX_OUTPUTS: usize = 256; fn tweak_keys(keys: &mut ThresholdKeys) { while PublicKey::new(keys.group_key()).is_none() { *keys = keys.clone().offset(::F::ONE); } } #[cfg(test)] async fn external_address(&self, _key: ::G) -> Address { Address(self.router().await.as_ref().unwrap().address()) } fn branch_address(_key: ::G) -> Option
{ None } fn change_address(_key: ::G) -> Option
{ None } fn forward_address(_key: ::G) -> Option
{ None } async fn get_latest_block_number(&self) -> Result { let actual_number = self .provider .get_block(BlockNumberOrTag::Finalized.into(), BlockTransactionsKind::Hashes) .await .map_err(|_| NetworkError::ConnectionError)? .ok_or(NetworkError::ConnectionError)? .header .number; // Error if there hasn't been a full epoch yet if actual_number < 32 { Err(NetworkError::ConnectionError)? } // If this is 33, the division will return 1, yet 1 is the epoch in progress let latest_full_epoch = (actual_number / 32).saturating_sub(1); Ok(latest_full_epoch.try_into().unwrap()) } async fn get_block(&self, number: usize) -> Result { let latest_finalized = self.get_latest_block_number().await?; if number > latest_finalized { Err(NetworkError::ConnectionError)? } let start = number * 32; let prior_end_hash = if start == 0 { [0; 32] } else { self .provider .get_block(u64::try_from(start - 1).unwrap().into(), BlockTransactionsKind::Hashes) .await .ok() .flatten() .ok_or(NetworkError::ConnectionError)? .header .hash .into() }; let end_header = self .provider .get_block(u64::try_from(start + 31).unwrap().into(), BlockTransactionsKind::Hashes) .await .ok() .flatten() .ok_or(NetworkError::ConnectionError)? .header; let end_hash = end_header.hash.into(); let time = end_header.timestamp; Ok(Epoch { prior_end_hash, start: start.try_into().unwrap(), end_hash, time }) } async fn get_outputs( &self, block: &Self::Block, _: ::G, ) -> Vec { let router = self.router().await; let router = router.as_ref().unwrap(); // Grab the key at the end of the epoch let key_at_end_of_block = loop { match router.key_at_end_of_block(block.start + 31).await { Ok(Some(key)) => break key, Ok(None) => return vec![], Err(e) => { log::error!("couldn't connect to router for the key at the end of the block: {e:?}"); sleep(Duration::from_secs(5)).await; continue; } } }; let mut all_events = vec![]; let mut top_level_txids = HashSet::new(); for erc20_addr in [DAI] { let erc20 = Erc20::new(self.provider.clone(), erc20_addr); for block in block.start .. (block.start + 32) { let transfers = loop { match erc20.top_level_transfers(block, router.address()).await { Ok(transfers) => break transfers, Err(e) => { log::error!("couldn't connect to Ethereum node for the top-level transfers: {e:?}"); sleep(Duration::from_secs(5)).await; continue; } } }; for transfer in transfers { top_level_txids.insert(transfer.id); all_events.push(EthereumInInstruction { id: (transfer.id, 0), from: transfer.from, coin: EthereumCoin::Erc20(erc20_addr), amount: transfer.amount, data: transfer.data, key_at_end_of_block, }); } } } for block in block.start .. (block.start + 32) { let mut events = router.in_instructions(block, &HashSet::from([DAI])).await; while let Err(e) = events { log::error!("couldn't connect to Ethereum node for the Router's events: {e:?}"); sleep(Duration::from_secs(5)).await; events = router.in_instructions(block, &HashSet::from([DAI])).await; } let mut events = events.unwrap(); for event in &mut events { // A transaction should either be a top-level transfer or a Router InInstruction if top_level_txids.contains(&event.id.0) { panic!("top-level transfer had {} and router had {:?}", hex::encode(event.id.0), event); } // Overwrite the key at end of block to key at end of epoch event.key_at_end_of_block = key_at_end_of_block; } all_events.extend(events); } for event in &all_events { assert!( coin_to_serai_coin(&event.coin).is_some(), "router yielded events for unrecognized coins" ); } all_events } async fn get_eventuality_completions( &self, eventualities: &mut EventualitiesTracker, block: &Self::Block, ) -> HashMap< [u8; 32], ( usize, >::Id, ::Completion, ), > { let mut res = HashMap::new(); if eventualities.map.is_empty() { return res; } let router = self.router().await; let router = router.as_ref().unwrap(); let past_scanned_epoch = loop { match self.get_block(eventualities.block_number).await { Ok(block) => break block, Err(e) => log::error!("couldn't get the last scanned block in the tracker: {}", e), } sleep(Duration::from_secs(10)).await; }; assert_eq!( past_scanned_epoch.start / 32, u64::try_from(eventualities.block_number).unwrap(), "assumption of tracker block number's relation to epoch start is incorrect" ); // Iterate from after the epoch number in the tracker to the end of this epoch for block_num in (past_scanned_epoch.end() + 1) ..= block.end() { let executed = loop { match router.executed_commands(block_num).await { Ok(executed) => break executed, Err(e) => log::error!("couldn't get the executed commands in block {block_num}: {e}"), } sleep(Duration::from_secs(10)).await; }; for executed in executed { let lookup = executed.nonce.to_le_bytes().to_vec(); if let Some((plan_id, eventuality)) = eventualities.map.get(&lookup) { if let Some(command) = SignedRouterCommand::new(&eventuality.0, eventuality.1.clone(), &executed.signature) { res.insert(*plan_id, (block_num.try_into().unwrap(), executed.tx_id, command)); eventualities.map.remove(&lookup); } } } } eventualities.block_number = (block.start / 32).try_into().unwrap(); res } async fn needed_fee( &self, _block_number: usize, inputs: &[Self::Output], _payments: &[Payment], _change: &Option, ) -> Result, NetworkError> { assert_eq!(inputs.len(), 0); // Claim no fee is needed so we can perform amortization ourselves Ok(Some(0)) } async fn signable_transaction( &self, _block_number: usize, _plan_id: &[u8; 32], key: ::G, inputs: &[Self::Output], payments: &[Payment], change: &Option, scheduler_addendum: &>::Addendum, ) -> Result, NetworkError> { assert_eq!(inputs.len(), 0); assert!(change.is_none()); let chain_id = self.provider.get_chain_id().await.map_err(|_| NetworkError::ConnectionError)?; // TODO: Perform fee amortization (in scheduler? // TODO: Make this function internal and have needed_fee properly return None as expected? // TODO: signable_transaction is written as cannot return None if needed_fee returns Some // TODO: Why can this return None at all if it isn't allowed to return None? let command = match scheduler_addendum { Addendum::Nonce(nonce) => RouterCommand::Execute { chain_id: U256::try_from(chain_id).unwrap(), nonce: U256::try_from(*nonce).unwrap(), outs: payments .iter() .filter_map(|payment| { Some(OutInstruction { target: if let Some(data) = payment.data.as_ref() { // This introspects the Call serialization format, expecting the first 20 bytes to // be the address // This avoids wasting the 20-bytes allocated within address let full_data = [payment.address.0.as_slice(), data].concat(); let mut reader = full_data.as_slice(); let mut calls = vec![]; while !reader.is_empty() { calls.push(Call::read(&mut reader).ok()?) } // The above must have executed at least once since reader contains the address assert_eq!(calls[0].to, payment.address.0); OutInstructionTarget::Calls(calls) } else { OutInstructionTarget::Direct(payment.address.0) }, value: { assert_eq!(payment.balance.coin, ExternalCoin::Ether); // TODO balance_to_ethereum_amount(payment.balance) }, }) }) .collect(), }, Addendum::RotateTo { nonce, new_key } => { assert!(payments.is_empty()); RouterCommand::UpdateSeraiKey { chain_id: U256::try_from(chain_id).unwrap(), nonce: U256::try_from(*nonce).unwrap(), key: PublicKey::new(*new_key).expect("new key wasn't a valid ETH public key"), } } }; Ok(Some(( command.clone(), Eventuality(PublicKey::new(key).expect("key wasn't a valid ETH public key"), command), ))) } async fn attempt_sign( &self, keys: ThresholdKeys, transaction: Self::SignableTransaction, ) -> Result { Ok( RouterCommandMachine::new(keys, transaction) .expect("keys weren't usable to sign router commands"), ) } async fn publish_completion( &self, completion: &::Completion, ) -> Result<(), NetworkError> { // Publish this to the dedicated TX server for a solver to actually publish #[cfg(not(test))] { let mut msg = vec![]; match completion.command() { RouterCommand::UpdateSeraiKey { nonce, .. } | RouterCommand::Execute { nonce, .. } => { msg.extend(&u32::try_from(nonce).unwrap().to_le_bytes()); } } completion.write(&mut msg).unwrap(); let Ok(mut socket) = TcpStream::connect(&self.relayer_url).await else { log::warn!("couldn't connect to the relayer server"); Err(NetworkError::ConnectionError)? }; let Ok(()) = socket.write_all(&u32::try_from(msg.len()).unwrap().to_le_bytes()).await else { log::warn!("couldn't send the message's len to the relayer server"); Err(NetworkError::ConnectionError)? }; let Ok(()) = socket.write_all(&msg).await else { log::warn!("couldn't write the message to the relayer server"); Err(NetworkError::ConnectionError)? }; if socket.read_u8().await.ok() != Some(1) { log::warn!("didn't get the ack from the relayer server"); Err(NetworkError::ConnectionError)?; } Ok(()) } // Publish this using a dummy account we fund with magic RPC commands #[cfg(test)] { let router = self.router().await; let router = router.as_ref().unwrap(); let mut tx = match completion.command() { RouterCommand::UpdateSeraiKey { key, .. } => { router.update_serai_key(key, completion.signature()) } RouterCommand::Execute { outs, .. } => router.execute( &outs.iter().cloned().map(Into::into).collect::>(), completion.signature(), ), }; tx.gas_limit = 1_000_000u64; tx.gas_price = 1_000_000_000u64.into(); let tx = ethereum_serai::crypto::deterministically_sign(&tx); if self.provider.get_transaction_by_hash(*tx.hash()).await.unwrap().is_none() { self .provider .raw_request::<_, ()>( "anvil_setBalance".into(), [ tx.recover_signer().unwrap().to_string(), (U256::from(tx.tx().gas_limit) * U256::from(tx.tx().gas_price)).to_string(), ], ) .await .unwrap(); let (tx, sig, _) = tx.into_parts(); let mut bytes = vec![]; tx.encode_with_signature_fields(&sig, &mut bytes); let pending_tx = self.provider.send_raw_transaction(&bytes).await.unwrap(); self.mine_block().await; assert!(pending_tx.get_receipt().await.unwrap().status()); } Ok(()) } } async fn confirm_completion( &self, eventuality: &Self::Eventuality, claim: &::Claim, ) -> Result::Completion>, NetworkError> { Ok(SignedRouterCommand::new(&eventuality.0, eventuality.1.clone(), &claim.signature)) } #[cfg(test)] async fn get_block_number(&self, id: &>::Id) -> usize { self .provider .get_block(B256::from(*id).into(), BlockTransactionsKind::Hashes) .await .unwrap() .unwrap() .header .number .try_into() .unwrap() } #[cfg(test)] async fn check_eventuality_by_claim( &self, eventuality: &Self::Eventuality, claim: &::Claim, ) -> bool { SignedRouterCommand::new(&eventuality.0, eventuality.1.clone(), &claim.signature).is_some() } #[cfg(test)] async fn get_transaction_by_eventuality( &self, block: usize, eventuality: &Self::Eventuality, ) -> Self::Transaction { // We mine 96 blocks to ensure the 32 blocks relevant are finalized // Back-check the prior two epochs in response to this // TODO: Review why this is sub(3) and not sub(2) for block in block.saturating_sub(3) ..= block { match eventuality.1 { RouterCommand::UpdateSeraiKey { nonce, .. } | RouterCommand::Execute { nonce, .. } => { let router = self.router().await; let router = router.as_ref().unwrap(); let block = u64::try_from(block).unwrap(); let filter = router .key_updated_filter() .from_block(block * 32) .to_block(((block + 1) * 32) - 1) .topic1(nonce); let logs = self.provider.get_logs(&filter).await.unwrap(); if let Some(log) = logs.first() { return self .provider .get_transaction_by_hash(log.clone().transaction_hash.unwrap()) .await .unwrap() .unwrap(); }; let filter = router .executed_filter() .from_block(block * 32) .to_block(((block + 1) * 32) - 1) .topic1(nonce); let logs = self.provider.get_logs(&filter).await.unwrap(); if logs.is_empty() { continue; } return self .provider .get_transaction_by_hash(logs[0].transaction_hash.unwrap()) .await .unwrap() .unwrap(); } } } panic!("couldn't find completion in any three of checked blocks"); } #[cfg(test)] async fn mine_block(&self) { self.provider.raw_request::<_, ()>("anvil_mine".into(), [96]).await.unwrap(); } #[cfg(test)] async fn test_send(&self, send_to: Self::Address) -> Self::Block { use rand_core::OsRng; use ciphersuite::group::ff::Field; use ethereum_serai::alloy::sol_types::SolCall; let key = ::F::random(&mut OsRng); let address = ethereum_serai::crypto::address(&(Secp256k1::generator() * key)); // Set a 1.1 ETH balance self .provider .raw_request::<_, ()>( "anvil_setBalance".into(), [Address(address).to_string(), "1100000000000000000".into()], ) .await .unwrap(); let value = U256::from_str_radix("1000000000000000000", 10).unwrap(); let tx = ethereum_serai::alloy::consensus::TxLegacy { chain_id: None, nonce: 0, gas_price: 1_000_000_000u128, gas_limit: 200_000, to: ethereum_serai::alloy::primitives::TxKind::Call(send_to.0.into()), // 1 ETH value, input: ethereum_serai::router::abi::inInstructionCall::new(( [0; 20].into(), value, vec![].into(), )) .abi_encode() .into(), }; use ethereum_serai::alloy::{ primitives::{Parity, Signature}, consensus::SignableTransaction, }; let sig = k256::ecdsa::SigningKey::from(k256::elliptic_curve::NonZeroScalar::new(key).unwrap()) .sign_prehash_recoverable(tx.signature_hash().as_ref()) .unwrap(); let mut bytes = vec![]; let parity = Parity::NonEip155(Parity::from(sig.1).y_parity()); tx.encode_with_signature_fields(&Signature::from(sig).with_parity(parity), &mut bytes); let pending_tx = self.provider.send_raw_transaction(&bytes).await.ok().unwrap(); // Mine an epoch containing this TX self.mine_block().await; assert!(pending_tx.get_receipt().await.unwrap().status()); // Yield the freshly mined block self.get_block(self.get_latest_block_number().await.unwrap()).await.unwrap() } } ================================================ FILE: processor/src/networks/mod.rs ================================================ use core::{fmt::Debug, time::Duration}; use std::{io, collections::HashMap}; use async_trait::async_trait; use thiserror::Error; use frost::{ curve::{Ciphersuite, Curve}, ThresholdKeys, sign::PreprocessMachine, }; use serai_client::primitives::{ExternalBalance, ExternalNetworkId}; use log::error; use tokio::time::sleep; #[cfg(feature = "bitcoin")] pub mod bitcoin; #[cfg(feature = "bitcoin")] pub use self::bitcoin::Bitcoin; #[cfg(feature = "ethereum")] pub mod ethereum; #[cfg(feature = "ethereum")] pub use ethereum::Ethereum; #[cfg(feature = "monero")] pub mod monero; #[cfg(feature = "monero")] pub use monero::Monero; use crate::{Payment, Plan, multisigs::scheduler::Scheduler}; #[derive(Clone, Copy, Error, Debug)] pub enum NetworkError { #[error("failed to connect to network daemon")] ConnectionError, } pub trait Id: Send + Sync + Clone + Default + PartialEq + AsRef<[u8]> + AsMut<[u8]> + Debug { } impl + AsMut<[u8]> + Debug> Id for I {} #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] pub enum OutputType { // Needs to be processed/sent up to Substrate External, // Given a known output set, and a known series of outbound transactions, we should be able to // form a completely deterministic schedule S. The issue is when S has TXs which spend prior TXs // in S (which is needed for our logarithmic scheduling). In order to have the descendant TX, say // S[1], build off S[0], we need to observe when S[0] is included on-chain. // // We cannot. // // Monero (and other privacy coins) do not expose their UTXO graphs. Even if we know how to // create S[0], and the actual payment info behind it, we cannot observe it on the blockchain // unless we participated in creating it. Locking the entire schedule, when we cannot sign for // the entire schedule at once, to a single signing set isn't feasible. // // While any member of the active signing set can provide data enabling other signers to // participate, it's several KB of data which we then have to code communication for. // The other option is to simply not observe S[0]. Instead, observe a TX with an identical output // to the one in S[0] we intended to use for S[1]. It's either from S[0], or Eve, a malicious // actor, has sent us a forged TX which is... equally as usable? so who cares? // // The only issue is if we have multiple outputs on-chain with identical amounts and purposes. // Accordingly, when the scheduler makes a plan for when a specific output is available, it // shouldn't write that plan. It should *push* that plan to a queue of plans to perform when // instances of that output occur. Branch, // Should be added to the available UTXO pool with no further action Change, // Forwarded output from the prior multisig Forwarded, } impl OutputType { fn write(&self, writer: &mut W) -> io::Result<()> { writer.write_all(&[match self { OutputType::External => 0, OutputType::Branch => 1, OutputType::Change => 2, OutputType::Forwarded => 3, }]) } fn read(reader: &mut R) -> io::Result { let mut byte = [0; 1]; reader.read_exact(&mut byte)?; Ok(match byte[0] { 0 => OutputType::External, 1 => OutputType::Branch, 2 => OutputType::Change, 3 => OutputType::Forwarded, _ => Err(io::Error::other("invalid OutputType"))?, }) } } pub trait Output: Send + Sync + Sized + Clone + PartialEq + Eq + Debug { type Id: 'static + Id; fn kind(&self) -> OutputType; fn id(&self) -> Self::Id; fn tx_id(&self) -> >::Id; // TODO: Review use of fn key(&self) -> ::G; fn presumed_origin(&self) -> Option; fn balance(&self) -> ExternalBalance; fn data(&self) -> &[u8]; fn write(&self, writer: &mut W) -> io::Result<()>; fn read(reader: &mut R) -> io::Result; } #[async_trait] pub trait Transaction: Send + Sync + Sized + Clone + PartialEq + Debug { type Id: 'static + Id; fn id(&self) -> Self::Id; // TODO: Move to ExternalBalance #[cfg(test)] async fn fee(&self, network: &N) -> u64; } pub trait SignableTransaction: Send + Sync + Clone + Debug { // TODO: Move to ExternalBalance fn fee(&self) -> u64; } pub trait Eventuality: Send + Sync + Clone + PartialEq + Debug { type Claim: Send + Sync + Clone + PartialEq + Default + AsRef<[u8]> + AsMut<[u8]> + Debug; type Completion: Send + Sync + Clone + PartialEq + Debug; fn lookup(&self) -> Vec; fn read(reader: &mut R) -> io::Result; fn serialize(&self) -> Vec; fn claim(completion: &Self::Completion) -> Self::Claim; // TODO: Make a dedicated Completion trait fn serialize_completion(completion: &Self::Completion) -> Vec; fn read_completion(reader: &mut R) -> io::Result; } #[derive(Clone, PartialEq, Eq, Debug)] pub struct EventualitiesTracker { // Lookup property (input, nonce, TX extra...) -> (plan ID, eventuality) map: HashMap, ([u8; 32], E)>, // Block number we've scanned these eventualities too block_number: usize, } impl EventualitiesTracker { pub fn new() -> Self { EventualitiesTracker { map: HashMap::new(), block_number: usize::MAX } } pub fn register(&mut self, block_number: usize, id: [u8; 32], eventuality: E) { log::info!("registering eventuality for {}", hex::encode(id)); let lookup = eventuality.lookup(); if self.map.contains_key(&lookup) { panic!("registering an eventuality multiple times or lookup collision"); } self.map.insert(lookup, (id, eventuality)); // If our self tracker already went past this block number, set it back self.block_number = self.block_number.min(block_number); } pub fn drop(&mut self, id: [u8; 32]) { // O(n) due to the lack of a reverse lookup let mut found_key = None; for (key, value) in &self.map { if value.0 == id { found_key = Some(key.clone()); break; } } if let Some(key) = found_key { self.map.remove(&key); } } } impl Default for EventualitiesTracker { fn default() -> Self { Self::new() } } #[async_trait] pub trait Block: Send + Sync + Sized + Clone + Debug { // This is currently bounded to being 32 bytes. type Id: 'static + Id; fn id(&self) -> Self::Id; fn parent(&self) -> Self::Id; /// The monotonic network time at this block. /// /// This call is presumed to be expensive and should only be called sparingly. async fn time(&self, rpc: &N) -> u64; } // The post-fee value of an expected branch. pub struct PostFeeBranch { pub expected: u64, pub actual: Option, } // Return the PostFeeBranches needed when dropping a transaction fn drop_branches( key: ::G, payments: &[Payment], ) -> Vec { let mut branch_outputs = vec![]; for payment in payments { if Some(&payment.address) == N::branch_address(key).as_ref() { branch_outputs.push(PostFeeBranch { expected: payment.balance.amount.0, actual: None }); } } branch_outputs } pub struct PreparedSend { /// None for the transaction if the SignableTransaction was dropped due to lack of value. pub tx: Option<(N::SignableTransaction, N::Eventuality)>, pub post_fee_branches: Vec, /// The updated operating costs after preparing this transaction. pub operating_costs: u64, } #[async_trait] pub trait Network: 'static + Send + Sync + Clone + PartialEq + Debug { /// The elliptic curve used for this network. type Curve: Curve; /// The type representing the transaction for this network. type Transaction: Transaction; // TODO: Review use of /// The type representing the block for this network. type Block: Block; /// The type containing all information on a scanned output. // This is almost certainly distinct from the network's native output type. type Output: Output; /// The type containing all information on a planned transaction, waiting to be signed. type SignableTransaction: SignableTransaction; /// The type containing all information to check if a plan was completed. /// /// This must be binding to both the outputs expected and the plan ID. type Eventuality: Eventuality; /// The FROST machine to sign a transaction. type TransactionMachine: PreprocessMachine< Signature = ::Completion, >; /// The scheduler for this network. type Scheduler: Scheduler; /// The type representing an address. // This should NOT be a String, yet a tailored type representing an efficient binary encoding, // as detailed in the integration documentation. type Address: Send + Sync + Clone + PartialEq + Eq + Debug + ToString + TryInto> + TryFrom>; /// Network ID for this network. const NETWORK: ExternalNetworkId; /// String ID for this network. const ID: &'static str; /// The estimated amount of time a block will take. const ESTIMATED_BLOCK_TIME_IN_SECONDS: usize; /// The amount of confirmations required to consider a block 'final'. const CONFIRMATIONS: usize; /// The maximum amount of outputs which will fit in a TX. /// This should be equal to MAX_INPUTS unless one is specifically limited. /// A TX with MAX_INPUTS and MAX_OUTPUTS must not exceed the max size. const MAX_OUTPUTS: usize; /// Minimum output value which will be handled. /// /// For any received output, there's the cost to spend the output. This value MUST exceed the /// cost to spend said output, and should by a notable margin (not just 2x, yet an order of /// magnitude). // TODO: Dust needs to be diversified per ExternalCoin const DUST: u64; /// The cost to perform input aggregation with a 2-input 1-output TX. const COST_TO_AGGREGATE: u64; /// Tweak keys for this network. fn tweak_keys(key: &mut ThresholdKeys); /// Address for the given group key to receive external coins to. #[cfg(test)] async fn external_address(&self, key: ::G) -> Self::Address; /// Address for the given group key to use for scheduled branches. fn branch_address(key: ::G) -> Option; /// Address for the given group key to use for change. fn change_address(key: ::G) -> Option; /// Address for forwarded outputs from prior multisigs. /// /// forward_address must only return None if explicit forwarding isn't necessary. fn forward_address(key: ::G) -> Option; /// Get the latest block's number. async fn get_latest_block_number(&self) -> Result; /// Get a block by its number. async fn get_block(&self, number: usize) -> Result; /// Get the latest block's number, retrying until success. async fn get_latest_block_number_with_retries(&self) -> usize { loop { match self.get_latest_block_number().await { Ok(number) => { return number; } Err(e) => { error!( "couldn't get the latest block number in the with retry get_latest_block_number: {e:?}", ); sleep(Duration::from_secs(10)).await; } } } } /// Get a block, retrying until success. async fn get_block_with_retries(&self, block_number: usize) -> Self::Block { loop { match self.get_block(block_number).await { Ok(block) => { return block; } Err(e) => { error!("couldn't get block {block_number} in the with retry get_block: {:?}", e); sleep(Duration::from_secs(10)).await; } } } } /// Get the outputs within a block for a specific key. async fn get_outputs( &self, block: &Self::Block, key: ::G, ) -> Vec; /// Get the registered eventualities completed within this block, and any prior blocks which /// registered eventualities may have been completed in. /// /// This may panic if not fed a block greater than the tracker's block number. /// /// Plan ID -> (block number, TX ID, completion) // TODO: get_eventuality_completions_internal + provided get_eventuality_completions for common // code // TODO: Consider having this return the Transaction + the Completion? // Or Transaction with extract_completion? async fn get_eventuality_completions( &self, eventualities: &mut EventualitiesTracker, block: &Self::Block, ) -> HashMap< [u8; 32], ( usize, >::Id, ::Completion, ), >; /// Returns the needed fee to fulfill this Plan at this fee rate. /// /// Returns None if this Plan isn't fulfillable (such as when the fee exceeds the input value). async fn needed_fee( &self, block_number: usize, inputs: &[Self::Output], payments: &[Payment], change: &Option, ) -> Result, NetworkError>; /// Create a SignableTransaction for the given Plan. /// /// The expected flow is: /// 1) Call needed_fee /// 2) If the Plan is fulfillable, amortize the fee /// 3) Call signable_transaction *which MUST NOT return None if the above was done properly* /// /// This takes a destructured Plan as some of these arguments are malleated from the original /// Plan. // TODO: Explicit AmortizedPlan? #[allow(clippy::too_many_arguments)] async fn signable_transaction( &self, block_number: usize, plan_id: &[u8; 32], key: ::G, inputs: &[Self::Output], payments: &[Payment], change: &Option, scheduler_addendum: &>::Addendum, ) -> Result, NetworkError>; /// Prepare a SignableTransaction for a transaction. /// /// This must not persist anything as we will prepare Plans we never intend to execute. async fn prepare_send( &self, block_number: usize, plan: Plan, operating_costs: u64, ) -> Result, NetworkError> { // Sanity check this has at least one output planned assert!((!plan.payments.is_empty()) || plan.change.is_some()); let plan_id = plan.id(); let Plan { key, inputs, mut payments, change, scheduler_addendum } = plan; let theoretical_change_amount = if change.is_some() { inputs.iter().map(|input| input.balance().amount.0).sum::() - payments.iter().map(|payment| payment.balance.amount.0).sum::() } else { 0 }; let Some(tx_fee) = self.needed_fee(block_number, &inputs, &payments, &change).await? else { // This Plan is not fulfillable // TODO: Have Plan explicitly distinguish payments and branches in two separate Vecs? return Ok(PreparedSend { tx: None, // Have all of its branches dropped post_fee_branches: drop_branches(key, &payments), // This plan expects a change output valued at sum(inputs) - sum(outputs) // Since we can no longer create this change output, it becomes an operating cost // TODO: Look at input restoration to reduce this operating cost operating_costs: operating_costs + if change.is_some() { theoretical_change_amount } else { 0 }, }); }; // Amortize the fee over the plan's payments let (post_fee_branches, mut operating_costs) = (|| { // If we're creating a change output, letting us recoup coins, amortize the operating costs // as well let total_fee = tx_fee + if change.is_some() { operating_costs } else { 0 }; let original_outputs = payments.iter().map(|payment| payment.balance.amount.0).sum::(); // If this isn't enough for the total fee, drop and move on if original_outputs < total_fee { let mut remaining_operating_costs = operating_costs; if change.is_some() { // Operating costs increase by the TX fee remaining_operating_costs += tx_fee; // Yet decrease by the payments we managed to drop remaining_operating_costs = remaining_operating_costs.saturating_sub(original_outputs); } return (drop_branches(key, &payments), remaining_operating_costs); } let initial_payment_amounts = payments.iter().map(|payment| payment.balance.amount.0).collect::>(); // Amortize the transaction fee across outputs let mut remaining_fee = total_fee; // Run as many times as needed until we can successfully subtract this fee while remaining_fee != 0 { // This shouldn't be a / by 0 as these payments have enough value to cover the fee let this_iter_fee = remaining_fee / u64::try_from(payments.len()).unwrap(); let mut overage = remaining_fee % u64::try_from(payments.len()).unwrap(); for payment in &mut payments { let this_payment_fee = this_iter_fee + overage; // Only subtract the overage once overage = 0; let subtractable = payment.balance.amount.0.min(this_payment_fee); remaining_fee -= subtractable; payment.balance.amount.0 -= subtractable; } } // If any payment is now below the dust threshold, set its value to 0 so it'll be dropped for payment in &mut payments { if payment.balance.amount.0 < Self::DUST { payment.balance.amount.0 = 0; } } // Note the branch outputs' new values let mut branch_outputs = vec![]; for (initial_amount, payment) in initial_payment_amounts.into_iter().zip(&payments) { if Some(&payment.address) == Self::branch_address(key).as_ref() { branch_outputs.push(PostFeeBranch { expected: initial_amount, actual: if payment.balance.amount.0 == 0 { None } else { Some(payment.balance.amount.0) }, }); } } // Drop payments now worth 0 payments = payments .drain(..) .filter(|payment| { if payment.balance.amount.0 != 0 { true } else { log::debug!("dropping dust payment from plan {}", hex::encode(plan_id)); false } }) .collect(); // Sanity check the fee was successfully amortized let new_outputs = payments.iter().map(|payment| payment.balance.amount.0).sum::(); assert!((new_outputs + total_fee) <= original_outputs); ( branch_outputs, if change.is_none() { // If the change is None, this had no effect on the operating costs operating_costs } else { // Since the change is some, and we successfully amortized, the operating costs were // recouped 0 }, ) })(); let Some(tx) = self .signable_transaction( block_number, &plan_id, key, &inputs, &payments, &change, &scheduler_addendum, ) .await? else { panic!( "{}. {}: {}, {}: {:?}, {}: {:?}, {}: {:?}, {}: {}, {}: {:?}", "signable_transaction returned None for a TX we prior successfully calculated the fee for", "id", hex::encode(plan_id), "inputs", inputs, "post-amortization payments", payments, "change", change, "successfully amoritized fee", tx_fee, "scheduler's addendum", scheduler_addendum, ) }; if change.is_some() { let on_chain_expected_change = inputs.iter().map(|input| input.balance().amount.0).sum::() - payments.iter().map(|payment| payment.balance.amount.0).sum::() - tx_fee; // If the change value is less than the dust threshold, it becomes an operating cost // This may be slightly inaccurate as dropping payments may reduce the fee, raising the // change above dust // That's fine since it'd have to be in a very precarious state AND then it's over-eager in // tabulating costs if on_chain_expected_change < Self::DUST { operating_costs += theoretical_change_amount; } } Ok(PreparedSend { tx: Some(tx), post_fee_branches, operating_costs }) } /// Attempt to sign a SignableTransaction. async fn attempt_sign( &self, keys: ThresholdKeys, transaction: Self::SignableTransaction, ) -> Result; /// Publish a completion. async fn publish_completion( &self, completion: &::Completion, ) -> Result<(), NetworkError>; /// Confirm a plan was completed by the specified transaction, per our bounds. /// /// Returns Err if there was an error with the confirmation methodology. /// Returns Ok(None) if this is not a valid completion. /// Returns Ok(Some(_)) with the completion if it's valid. async fn confirm_completion( &self, eventuality: &Self::Eventuality, claim: &::Claim, ) -> Result::Completion>, NetworkError>; /// Get a block's number by its ID. #[cfg(test)] async fn get_block_number(&self, id: &>::Id) -> usize; /// Check an Eventuality is fulfilled by a claim. #[cfg(test)] async fn check_eventuality_by_claim( &self, eventuality: &Self::Eventuality, claim: &::Claim, ) -> bool; /// Get a transaction by the Eventuality it completes. #[cfg(test)] async fn get_transaction_by_eventuality( &self, block: usize, eventuality: &Self::Eventuality, ) -> Self::Transaction; #[cfg(test)] async fn mine_block(&self); /// Sends to the specified address. /// Additionally mines enough blocks so that the TX is past the confirmation depth. #[cfg(test)] async fn test_send(&self, key: Self::Address) -> Self::Block; } pub trait UtxoNetwork: Network { /// The maximum amount of inputs which will fit in a TX. /// This should be equal to MAX_OUTPUTS unless one is specifically limited. /// A TX with MAX_INPUTS and MAX_OUTPUTS must not exceed the max size. const MAX_INPUTS: usize; } ================================================ FILE: processor/src/networks/monero.rs ================================================ use std::{time::Duration, collections::HashMap, io}; use async_trait::async_trait; use zeroize::Zeroizing; use rand_core::SeedableRng; use rand_chacha::ChaCha20Rng; use transcript::{Transcript, RecommendedTranscript}; use ciphersuite::group::{ff::Field, Group}; use dalek_ff_group::{Scalar, EdwardsPoint}; use frost::{curve::Ed25519, ThresholdKeys}; use monero_simple_request_rpc::SimpleRequestRpc; use monero_wallet::{ ringct::RctType, transaction::Transaction, block::Block, rpc::{FeeRate, RpcError, Rpc}, address::{Network as MoneroNetwork, SubaddressIndex}, ViewPair, GuaranteedViewPair, WalletOutput, OutputWithDecoys, GuaranteedScanner, send::{ SendError, Change, SignableTransaction as MSignableTransaction, Eventuality, TransactionMachine, }, }; #[cfg(test)] use monero_wallet::Scanner; use tokio::time::sleep; pub use serai_client::{ primitives::{MAX_DATA_LEN, ExternalCoin, ExternalNetworkId, Amount, ExternalBalance}, networks::monero::Address, }; use crate::{ Payment, additional_key, networks::{ NetworkError, Block as BlockTrait, OutputType, Output as OutputTrait, Transaction as TransactionTrait, SignableTransaction as SignableTransactionTrait, Eventuality as EventualityTrait, EventualitiesTracker, Network, UtxoNetwork, }, multisigs::scheduler::utxo::Scheduler, }; #[derive(Clone, PartialEq, Eq, Debug)] pub struct Output(WalletOutput); const EXTERNAL_SUBADDRESS: Option = SubaddressIndex::new(0, 0); const BRANCH_SUBADDRESS: Option = SubaddressIndex::new(1, 0); const CHANGE_SUBADDRESS: Option = SubaddressIndex::new(2, 0); const FORWARD_SUBADDRESS: Option = SubaddressIndex::new(3, 0); impl OutputTrait for Output { // While we could use (tx, o), using the key ensures we won't be susceptible to the burning bug. // While we already are immune, thanks to using featured address, this doesn't hurt and is // technically more efficient. type Id = [u8; 32]; fn kind(&self) -> OutputType { match self.0.subaddress() { EXTERNAL_SUBADDRESS => OutputType::External, BRANCH_SUBADDRESS => OutputType::Branch, CHANGE_SUBADDRESS => OutputType::Change, FORWARD_SUBADDRESS => OutputType::Forwarded, _ => panic!("unrecognized address was scanned for"), } } fn id(&self) -> Self::Id { self.0.key().compress().to_bytes() } fn tx_id(&self) -> [u8; 32] { self.0.transaction() } fn key(&self) -> EdwardsPoint { EdwardsPoint(self.0.key() - (EdwardsPoint::generator().0 * self.0.key_offset())) } fn presumed_origin(&self) -> Option
{ None } fn balance(&self) -> ExternalBalance { ExternalBalance { coin: ExternalCoin::Monero, amount: Amount(self.0.commitment().amount) } } fn data(&self) -> &[u8] { let Some(data) = self.0.arbitrary_data().first() else { return &[] }; // If the data is too large, prune it // This should cause decoding the instruction to fail, and trigger a refund as appropriate if data.len() > usize::try_from(MAX_DATA_LEN).unwrap() { return &[]; } data } fn write(&self, writer: &mut W) -> io::Result<()> { self.0.write(writer)?; Ok(()) } fn read(reader: &mut R) -> io::Result { Ok(Output(WalletOutput::read(reader)?)) } } // TODO: Consider ([u8; 32], TransactionPruned) #[async_trait] impl TransactionTrait for Transaction { type Id = [u8; 32]; fn id(&self) -> Self::Id { self.hash() } #[cfg(test)] async fn fee(&self, _: &Monero) -> u64 { match self { Transaction::V1 { .. } => panic!("v1 TX in test-only function"), Transaction::V2 { ref proofs, .. } => proofs.as_ref().unwrap().base.fee, } } } impl EventualityTrait for Eventuality { type Claim = [u8; 32]; type Completion = Transaction; // Use the TX extra to look up potential matches // While anyone can forge this, a transaction with distinct outputs won't actually match // Extra includess the one time keys which are derived from the plan ID, so a collision here is a // hash collision fn lookup(&self) -> Vec { self.extra() } fn read(reader: &mut R) -> io::Result { Eventuality::read(reader) } fn serialize(&self) -> Vec { self.serialize() } fn claim(tx: &Transaction) -> [u8; 32] { tx.id() } fn serialize_completion(completion: &Transaction) -> Vec { completion.serialize() } fn read_completion(reader: &mut R) -> io::Result { Transaction::read(reader) } } #[derive(Clone, Debug)] pub struct SignableTransaction(MSignableTransaction); impl SignableTransactionTrait for SignableTransaction { fn fee(&self) -> u64 { self.0.necessary_fee() } } #[async_trait] impl BlockTrait for Block { type Id = [u8; 32]; fn id(&self) -> Self::Id { self.hash() } fn parent(&self) -> Self::Id { self.header.previous } async fn time(&self, rpc: &Monero) -> u64 { // Constant from Monero const BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW: usize = 60; // If Monero doesn't have enough blocks to build a window, it doesn't define a network time if (self.number().unwrap() + 1) < BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW { // Use the block number as the time return u64::try_from(self.number().unwrap()).unwrap(); } let mut timestamps = vec![self.header.timestamp]; let mut parent = self.parent(); while timestamps.len() < BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW { let mut parent_block; while { parent_block = rpc.rpc.get_block(parent).await; parent_block.is_err() } { log::error!("couldn't get parent block when trying to get block time: {parent_block:?}"); sleep(Duration::from_secs(5)).await; } let parent_block = parent_block.unwrap(); timestamps.push(parent_block.header.timestamp); parent = parent_block.parent(); if parent_block.number().unwrap() == 0 { break; } } timestamps.sort(); // Because 60 has two medians, Monero's epee picks the in-between value, calculated by the // following formula (from the "get_mid" function) let n = timestamps.len() / 2; let a = timestamps[n - 1]; let b = timestamps[n]; #[rustfmt::skip] // Enables Ctrl+F'ing for everything after the `= ` let res = (a/2) + (b/2) + ((a - 2*(a/2)) + (b - 2*(b/2)))/2; // Technically, res may be 1 if all prior blocks had a timestamp by 0, which would break // monotonicity with our above definition of height as time // Monero also solely requires the block's time not be less than the median, it doesn't ensure // it advances the median forward // Ensure monotonicity despite both these issues by adding the block number to the median time res + u64::try_from(self.number().unwrap()).unwrap() } } #[derive(Clone, Debug)] pub struct Monero { rpc: SimpleRequestRpc, } // Shim required for testing/debugging purposes due to generic arguments also necessitating trait // bounds impl PartialEq for Monero { fn eq(&self, _: &Self) -> bool { true } } impl Eq for Monero {} #[allow(clippy::needless_pass_by_value)] // Needed to satisfy API expectations fn map_rpc_err(err: RpcError) -> NetworkError { if let RpcError::InvalidNode(reason) = &err { log::error!("Monero RpcError::InvalidNode({reason})"); } else { log::debug!("Monero RpcError {err:?}"); } NetworkError::ConnectionError } enum MakeSignableTransactionResult { Fee(u64), SignableTransaction(MSignableTransaction), } impl Monero { pub async fn new(url: String) -> Monero { let mut res = SimpleRequestRpc::new(url.clone()).await; while let Err(e) = res { log::error!("couldn't connect to Monero node: {e:?}"); tokio::time::sleep(Duration::from_secs(5)).await; res = SimpleRequestRpc::new(url.clone()).await; } Monero { rpc: res.unwrap() } } fn view_pair(spend: EdwardsPoint) -> GuaranteedViewPair { GuaranteedViewPair::new(spend.0, Zeroizing::new(additional_key::(0).0)).unwrap() } fn address_internal(spend: EdwardsPoint, subaddress: Option) -> Address { Address::new(Self::view_pair(spend).address(MoneroNetwork::Mainnet, subaddress, None)).unwrap() } fn scanner(spend: EdwardsPoint) -> GuaranteedScanner { let mut scanner = GuaranteedScanner::new(Self::view_pair(spend)); debug_assert!(EXTERNAL_SUBADDRESS.is_none()); scanner.register_subaddress(BRANCH_SUBADDRESS.unwrap()); scanner.register_subaddress(CHANGE_SUBADDRESS.unwrap()); scanner.register_subaddress(FORWARD_SUBADDRESS.unwrap()); scanner } async fn median_fee(&self, block: &Block) -> Result { let mut fees = vec![]; for tx_hash in &block.transactions { let tx = self.rpc.get_transaction(*tx_hash).await.map_err(|_| NetworkError::ConnectionError)?; // Only consider fees from RCT transactions, else the fee property read wouldn't be accurate let fee = match &tx { Transaction::V2 { proofs: Some(proofs), .. } => proofs.base.fee, _ => continue, }; fees.push(fee / u64::try_from(tx.weight()).unwrap()); } fees.sort(); let fee = fees.get(fees.len() / 2).copied().unwrap_or(0); // TODO: Set a sane minimum fee const MINIMUM_FEE: u64 = 1_500_000; Ok(FeeRate::new(fee.max(MINIMUM_FEE), 10000).unwrap()) } async fn make_signable_transaction( &self, block_number: usize, plan_id: &[u8; 32], inputs: &[Output], payments: &[Payment], change: &Option
, calculating_fee: bool, ) -> Result, NetworkError> { for payment in payments { assert_eq!(payment.balance.coin, ExternalCoin::Monero); } // TODO2: Use an fee representative of several blocks, cached inside Self let block_for_fee = self.get_block(block_number).await?; let fee_rate = self.median_fee(&block_for_fee).await?; // Determine the RCT proofs to make based off the hard fork // TODO: Make a fn for this block which is duplicated with tests let rct_type = match block_for_fee.header.hardfork_version { 14 => RctType::ClsagBulletproof, 15 | 16 => RctType::ClsagBulletproofPlus, _ => panic!("Monero hard forked and the processor wasn't updated for it"), }; let mut transcript = RecommendedTranscript::new(b"Serai Processor Monero Transaction Transcript"); transcript.append_message(b"plan", plan_id); // All signers need to select the same decoys // All signers use the same height and a seeded RNG to make sure they do so. let mut inputs_actual = Vec::with_capacity(inputs.len()); for input in inputs { inputs_actual.push( OutputWithDecoys::fingerprintable_deterministic_new( &mut ChaCha20Rng::from_seed(transcript.rng_seed(b"decoys")), &self.rpc, // TODO: Have Decoys take RctType match rct_type { RctType::ClsagBulletproof => 11, RctType::ClsagBulletproofPlus => 16, _ => panic!("selecting decoys for an unsupported RctType"), }, block_number + 1, input.0.clone(), ) .await .map_err(map_rpc_err)?, ); } // Monero requires at least two outputs // If we only have one output planned, add a dummy payment let mut payments = payments.to_vec(); let outputs = payments.len() + usize::from(u8::from(change.is_some())); if outputs == 0 { return Ok(None); } else if outputs == 1 { payments.push(Payment { address: Address::new( ViewPair::new(EdwardsPoint::generator().0, Zeroizing::new(Scalar::ONE.0)) .unwrap() .legacy_address(MoneroNetwork::Mainnet), ) .unwrap(), balance: ExternalBalance { coin: ExternalCoin::Monero, amount: Amount(0) }, data: None, }); } let payments = payments .into_iter() .map(|payment| (payment.address.into(), payment.balance.amount.0)) .collect::>(); match MSignableTransaction::new( rct_type, // Use the plan ID as the outgoing view key Zeroizing::new(*plan_id), inputs_actual, payments, Change::fingerprintable(change.as_ref().map(|change| change.clone().into())), vec![], fee_rate, ) { Ok(signable) => Ok(Some({ if calculating_fee { MakeSignableTransactionResult::Fee(signable.necessary_fee()) } else { MakeSignableTransactionResult::SignableTransaction(signable) } })), // AmountsUnrepresentable is unreachable on Monero without 100% of the supply before tail // emission or fundamental corruption Err(e) => match e { SendError::UnsupportedRctType => { panic!("trying to use an RctType unsupported by monero-wallet") } SendError::NoInputs | SendError::InvalidDecoyQuantity | SendError::NoOutputs | SendError::TooManyOutputs | SendError::AmountsUnrepresentable { .. } | SendError::NoChange | SendError::TooMuchArbitraryData | SendError::TooLargeTransaction | SendError::WrongPrivateKey => { panic!("created an invalid Monero transaction: {e}"); } SendError::MultiplePaymentIds => { panic!("multiple payment IDs despite not supporting integrated addresses"); } SendError::NotEnoughFunds { inputs, outputs, necessary_fee } => { log::debug!( "Monero NotEnoughFunds. inputs: {:?}, outputs: {:?}, necessary_fee: {necessary_fee:?}", inputs, outputs ); match necessary_fee { Some(necessary_fee) => { // If we're solely calculating the fee, return the fee this TX will cost if calculating_fee { Ok(Some(MakeSignableTransactionResult::Fee(necessary_fee))) } else { // If we're actually trying to make the TX, return None Ok(None) } } // We didn't have enough funds to even cover the outputs None => { // Ensure we're not misinterpreting this assert!(outputs > inputs); Ok(None) } } } SendError::MaliciousSerialization | SendError::ClsagError(_) | SendError::FrostError(_) => { panic!("supposedly unreachable (at this time) Monero error: {e}"); } }, } } #[cfg(test)] fn test_view_pair() -> ViewPair { ViewPair::new(*EdwardsPoint::generator(), Zeroizing::new(Scalar::ONE.0)).unwrap() } #[cfg(test)] fn test_scanner() -> Scanner { Scanner::new(Self::test_view_pair()) } #[cfg(test)] fn test_address() -> Address { Address::new(Self::test_view_pair().legacy_address(MoneroNetwork::Mainnet)).unwrap() } } #[async_trait] impl Network for Monero { type Curve = Ed25519; type Transaction = Transaction; type Block = Block; type Output = Output; type SignableTransaction = SignableTransaction; type Eventuality = Eventuality; type TransactionMachine = TransactionMachine; type Scheduler = Scheduler; type Address = Address; const NETWORK: ExternalNetworkId = ExternalNetworkId::Monero; const ID: &'static str = "Monero"; const ESTIMATED_BLOCK_TIME_IN_SECONDS: usize = 120; const CONFIRMATIONS: usize = 10; const MAX_OUTPUTS: usize = 16; // 0.01 XMR const DUST: u64 = 10000000000; // TODO const COST_TO_AGGREGATE: u64 = 0; // Monero doesn't require/benefit from tweaking fn tweak_keys(_: &mut ThresholdKeys) {} #[cfg(test)] async fn external_address(&self, key: EdwardsPoint) -> Address { Self::address_internal(key, EXTERNAL_SUBADDRESS) } fn branch_address(key: EdwardsPoint) -> Option
{ Some(Self::address_internal(key, BRANCH_SUBADDRESS)) } fn change_address(key: EdwardsPoint) -> Option
{ Some(Self::address_internal(key, CHANGE_SUBADDRESS)) } fn forward_address(key: EdwardsPoint) -> Option
{ Some(Self::address_internal(key, FORWARD_SUBADDRESS)) } async fn get_latest_block_number(&self) -> Result { // Monero defines height as chain length, so subtract 1 for block number Ok(self.rpc.get_height().await.map_err(map_rpc_err)? - 1) } async fn get_block(&self, number: usize) -> Result { Ok( self .rpc .get_block(self.rpc.get_block_hash(number).await.map_err(map_rpc_err)?) .await .map_err(map_rpc_err)?, ) } async fn get_outputs(&self, block: &Block, key: EdwardsPoint) -> Vec { let outputs = loop { match self .rpc .get_scannable_block(block.clone()) .await .map_err(|e| format!("{e:?}")) .and_then(|block| Self::scanner(key).scan(block).map_err(|e| format!("{e:?}"))) { Ok(outputs) => break outputs, Err(e) => { log::error!("couldn't scan block {}: {e:?}", hex::encode(block.id())); sleep(Duration::from_secs(60)).await; continue; } } }; // Miner transactions are required to explicitly state their timelock, so this does exclude // those (which have an extended timelock we don't want to deal with) let raw_outputs = outputs.not_additionally_locked(); let mut outputs = Vec::with_capacity(raw_outputs.len()); for output in raw_outputs { // This should be pointless as we shouldn't be able to scan for any other subaddress // This just helps ensures nothing invalid makes it through assert!([EXTERNAL_SUBADDRESS, BRANCH_SUBADDRESS, CHANGE_SUBADDRESS, FORWARD_SUBADDRESS] .contains(&output.subaddress())); outputs.push(Output(output)); } outputs } async fn get_eventuality_completions( &self, eventualities: &mut EventualitiesTracker, block: &Block, ) -> HashMap<[u8; 32], (usize, [u8; 32], Transaction)> { let mut res = HashMap::new(); if eventualities.map.is_empty() { return res; } async fn check_block( network: &Monero, eventualities: &mut EventualitiesTracker, block: &Block, res: &mut HashMap<[u8; 32], (usize, [u8; 32], Transaction)>, ) { for hash in &block.transactions { let tx = { let mut tx; while { tx = network.rpc.get_transaction(*hash).await; tx.is_err() } { log::error!("couldn't get transaction {}: {}", hex::encode(hash), tx.err().unwrap()); sleep(Duration::from_secs(60)).await; } tx.unwrap() }; if let Some((_, eventuality)) = eventualities.map.get(&tx.prefix().extra) { if eventuality.matches(&tx.clone().into()) { res.insert( eventualities.map.remove(&tx.prefix().extra).unwrap().0, (block.number().unwrap(), tx.id(), tx), ); } } } eventualities.block_number += 1; assert_eq!(eventualities.block_number, block.number().unwrap()); } for block_num in (eventualities.block_number + 1) .. block.number().unwrap() { let block = { let mut block; while { block = self.get_block(block_num).await; block.is_err() } { log::error!("couldn't get block {}: {}", block_num, block.err().unwrap()); sleep(Duration::from_secs(60)).await; } block.unwrap() }; check_block(self, eventualities, &block, &mut res).await; } // Also check the current block check_block(self, eventualities, block, &mut res).await; assert_eq!(eventualities.block_number, block.number().unwrap()); res } async fn needed_fee( &self, block_number: usize, inputs: &[Output], payments: &[Payment], change: &Option
, ) -> Result, NetworkError> { let res = self .make_signable_transaction(block_number, &[0; 32], inputs, payments, change, true) .await?; let Some(res) = res else { return Ok(None) }; let MakeSignableTransactionResult::Fee(fee) = res else { panic!("told make_signable_transaction calculating_fee and got transaction") }; Ok(Some(fee)) } async fn signable_transaction( &self, block_number: usize, plan_id: &[u8; 32], _key: EdwardsPoint, inputs: &[Output], payments: &[Payment], change: &Option
, (): &(), ) -> Result, NetworkError> { let res = self .make_signable_transaction(block_number, plan_id, inputs, payments, change, false) .await?; let Some(res) = res else { return Ok(None) }; let MakeSignableTransactionResult::SignableTransaction(signable) = res else { panic!("told make_signable_transaction not calculating_fee and got fee") }; let signable = SignableTransaction(signable); let eventuality = signable.0.clone().into(); Ok(Some((signable, eventuality))) } async fn attempt_sign( &self, keys: ThresholdKeys, transaction: SignableTransaction, ) -> Result { match transaction.0.clone().multisig(keys) { Ok(machine) => Ok(machine), Err(e) => panic!("failed to create a multisig machine for TX: {e}"), } } async fn publish_completion(&self, tx: &Transaction) -> Result<(), NetworkError> { match self.rpc.publish_transaction(tx).await { Ok(()) => Ok(()), Err(RpcError::ConnectionError(e)) => { log::debug!("Monero ConnectionError: {e}"); Err(NetworkError::ConnectionError)? } // TODO: Distinguish already in pool vs double spend (other signing attempt succeeded) vs // invalid transaction Err(e) => panic!("failed to publish TX {}: {e}", hex::encode(tx.hash())), } } async fn confirm_completion( &self, eventuality: &Eventuality, id: &[u8; 32], ) -> Result, NetworkError> { let tx = self.rpc.get_transaction(*id).await.map_err(map_rpc_err)?; if eventuality.matches(&tx.clone().into()) { Ok(Some(tx)) } else { Ok(None) } } #[cfg(test)] async fn get_block_number(&self, id: &[u8; 32]) -> usize { self.rpc.get_block(*id).await.unwrap().number().unwrap() } #[cfg(test)] async fn check_eventuality_by_claim( &self, eventuality: &Self::Eventuality, claim: &[u8; 32], ) -> bool { return eventuality.matches(&self.rpc.get_pruned_transaction(*claim).await.unwrap()); } #[cfg(test)] async fn get_transaction_by_eventuality( &self, block: usize, eventuality: &Eventuality, ) -> Transaction { let block = self.rpc.get_block_by_number(block).await.unwrap(); for tx in &block.transactions { let tx = self.rpc.get_transaction(*tx).await.unwrap(); if eventuality.matches(&tx.clone().into()) { return tx; } } panic!("block didn't have a transaction for this eventuality") } #[cfg(test)] async fn mine_block(&self) { // https://github.com/serai-dex/serai/issues/198 sleep(std::time::Duration::from_millis(100)).await; self.rpc.generate_blocks(&Self::test_address().into(), 1).await.unwrap(); } #[cfg(test)] async fn test_send(&self, address: Address) -> Block { use zeroize::Zeroizing; use rand_core::{RngCore, OsRng}; use monero_wallet::rpc::FeePriority; let new_block = self.get_latest_block_number().await.unwrap() + 1; for _ in 0 .. 80 { self.mine_block().await; } let new_block = self.rpc.get_block_by_number(new_block).await.unwrap(); let mut outputs = Self::test_scanner() .scan(self.rpc.get_scannable_block(new_block.clone()).await.unwrap()) .unwrap() .ignore_additional_timelock(); let output = outputs.swap_remove(0); let amount = output.commitment().amount; // The dust should always be sufficient for the fee let fee = Monero::DUST; let rct_type = match new_block.header.hardfork_version { 14 => RctType::ClsagBulletproof, 15 | 16 => RctType::ClsagBulletproofPlus, _ => panic!("Monero hard forked and the processor wasn't updated for it"), }; let output = OutputWithDecoys::fingerprintable_deterministic_new( &mut OsRng, &self.rpc, match rct_type { RctType::ClsagBulletproof => 11, RctType::ClsagBulletproofPlus => 16, _ => panic!("selecting decoys for an unsupported RctType"), }, self.rpc.get_height().await.unwrap(), output, ) .await .unwrap(); let mut outgoing_view_key = Zeroizing::new([0; 32]); OsRng.fill_bytes(outgoing_view_key.as_mut()); let tx = MSignableTransaction::new( rct_type, outgoing_view_key, vec![output], vec![(address.into(), amount - fee)], Change::fingerprintable(Some(Self::test_address().into())), vec![], self.rpc.get_fee_rate(FeePriority::Unimportant).await.unwrap(), ) .unwrap() .sign(&mut OsRng, &Zeroizing::new(Scalar::ONE.0)) .unwrap(); let block = self.get_latest_block_number().await.unwrap() + 1; self.rpc.publish_transaction(&tx).await.unwrap(); for _ in 0 .. 10 { self.mine_block().await; } self.get_block(block).await.unwrap() } } impl UtxoNetwork for Monero { // wallet2 will not create a transaction larger than 100kb, and Monero won't relay a transaction // larger than 150kb. This fits within the 100kb mark // Technically, it can be ~124, yet a small bit of buffer is appreciated // TODO: Test creating a TX this big const MAX_INPUTS: usize = 120; } ================================================ FILE: processor/src/plan.rs ================================================ use std::io; use scale::{Encode, Decode}; use transcript::{Transcript, RecommendedTranscript}; use ciphersuite::group::GroupEncoding; use frost::curve::Ciphersuite; use serai_client::primitives::ExternalBalance; use crate::{ networks::{Output, Network}, multisigs::scheduler::{SchedulerAddendum, Scheduler}, }; #[derive(Clone, PartialEq, Eq, Debug)] pub struct Payment { pub address: N::Address, pub data: Option>, pub balance: ExternalBalance, } impl Payment { pub fn transcript(&self, transcript: &mut T) { transcript.domain_separate(b"payment"); transcript.append_message(b"address", self.address.to_string().as_bytes()); if let Some(data) = self.data.as_ref() { transcript.append_message(b"data", data); } transcript.append_message(b"coin", self.balance.coin.encode()); transcript.append_message(b"amount", self.balance.amount.0.to_le_bytes()); } pub fn write(&self, writer: &mut W) -> io::Result<()> { // TODO: Don't allow creating Payments with an Address which can't be serialized let address: Vec = self .address .clone() .try_into() .map_err(|_| io::Error::other("address couldn't be serialized"))?; writer.write_all(&u32::try_from(address.len()).unwrap().to_le_bytes())?; writer.write_all(&address)?; writer.write_all(&[u8::from(self.data.is_some())])?; if let Some(data) = &self.data { writer.write_all(&u32::try_from(data.len()).unwrap().to_le_bytes())?; writer.write_all(data)?; } writer.write_all(&self.balance.encode()) } pub fn read(reader: &mut R) -> io::Result { let mut buf = [0; 4]; reader.read_exact(&mut buf)?; let mut address = vec![0; usize::try_from(u32::from_le_bytes(buf)).unwrap()]; reader.read_exact(&mut address)?; let address = N::Address::try_from(address).map_err(|_| io::Error::other("invalid address"))?; let mut buf = [0; 1]; reader.read_exact(&mut buf)?; let data = if buf[0] == 1 { let mut buf = [0; 4]; reader.read_exact(&mut buf)?; let mut data = vec![0; usize::try_from(u32::from_le_bytes(buf)).unwrap()]; reader.read_exact(&mut data)?; Some(data) } else { None }; let balance = ExternalBalance::decode(&mut scale::IoReader(reader)) .map_err(|_| io::Error::other("invalid balance"))?; Ok(Payment { address, data, balance }) } } #[derive(Clone, PartialEq)] pub struct Plan { pub key: ::G, pub inputs: Vec, /// The payments this Plan is intended to create. /// /// This should only contain payments leaving Serai. While it is acceptable for users to enter /// Serai's address(es) as the payment address, as that'll be handled by anything which expects /// certain properties, Serai as a system MUST NOT use payments for internal transfers. Doing /// so will cause a reduction in their value by the TX fee/operating costs, creating an /// incomplete transfer. pub payments: Vec>, /// The change this Plan should use. /// /// This MUST contain a Serai address. Operating costs may be deducted from the payments in this /// Plan on the premise that the change address is Serai's, and accordingly, Serai will recoup /// the operating costs. // // TODO: Consider moving to ::G? pub change: Option, /// The scheduler's additional data. pub scheduler_addendum: >::Addendum, } impl core::fmt::Debug for Plan { fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { fmt .debug_struct("Plan") .field("key", &hex::encode(self.key.to_bytes())) .field("inputs", &self.inputs) .field("payments", &self.payments) .field("change", &self.change.as_ref().map(ToString::to_string)) .field("scheduler_addendum", &self.scheduler_addendum) .finish() } } impl Plan { pub fn transcript(&self) -> RecommendedTranscript { let mut transcript = RecommendedTranscript::new(b"Serai Processor Plan ID"); transcript.domain_separate(b"meta"); transcript.append_message(b"network", N::ID); transcript.append_message(b"key", self.key.to_bytes()); transcript.domain_separate(b"inputs"); for input in &self.inputs { transcript.append_message(b"input", input.id()); } transcript.domain_separate(b"payments"); for payment in &self.payments { payment.transcript(&mut transcript); } if let Some(change) = &self.change { transcript.append_message(b"change", change.to_string()); } let mut addendum_bytes = vec![]; self.scheduler_addendum.write(&mut addendum_bytes).unwrap(); transcript.append_message(b"scheduler_addendum", addendum_bytes); transcript } pub fn id(&self) -> [u8; 32] { let challenge = self.transcript().challenge(b"id"); let mut res = [0; 32]; res.copy_from_slice(&challenge[.. 32]); res } pub fn write(&self, writer: &mut W) -> io::Result<()> { writer.write_all(self.key.to_bytes().as_ref())?; writer.write_all(&u32::try_from(self.inputs.len()).unwrap().to_le_bytes())?; for input in &self.inputs { input.write(writer)?; } writer.write_all(&u32::try_from(self.payments.len()).unwrap().to_le_bytes())?; for payment in &self.payments { payment.write(writer)?; } // TODO: Have Plan construction fail if change cannot be serialized let change = if let Some(change) = &self.change { change.clone().try_into().map_err(|_| { io::Error::other(format!( "an address we said to use as change couldn't be converted to a Vec: {}", change.to_string(), )) })? } else { vec![] }; assert!(serai_client::primitives::MAX_ADDRESS_LEN <= u8::MAX.into()); writer.write_all(&[u8::try_from(change.len()).unwrap()])?; writer.write_all(&change)?; self.scheduler_addendum.write(writer) } pub fn read(reader: &mut R) -> io::Result { let key = N::Curve::read_G(reader)?; let mut inputs = vec![]; let mut buf = [0; 4]; reader.read_exact(&mut buf)?; for _ in 0 .. u32::from_le_bytes(buf) { inputs.push(N::Output::read(reader)?); } let mut payments = vec![]; reader.read_exact(&mut buf)?; for _ in 0 .. u32::from_le_bytes(buf) { payments.push(Payment::::read(reader)?); } let mut len = [0; 1]; reader.read_exact(&mut len)?; let mut change = vec![0; usize::from(len[0])]; reader.read_exact(&mut change)?; let change = if change.is_empty() { None } else { Some(N::Address::try_from(change).map_err(|_| { io::Error::other("couldn't deserialize an Address serialized into a Plan") })?) }; let scheduler_addendum = >::Addendum::read(reader)?; Ok(Plan { key, inputs, payments, change, scheduler_addendum }) } } ================================================ FILE: processor/src/signer.rs ================================================ use core::{marker::PhantomData, fmt}; use std::collections::HashMap; use rand_core::OsRng; use frost::{ ThresholdKeys, FrostError, sign::{Writable, PreprocessMachine, SignMachine, SignatureMachine}, }; use log::{info, debug, warn, error}; use serai_client::validator_sets::primitives::Session; use messages::sign::*; pub use serai_db::*; use crate::{ Get, DbTxn, Db, networks::{Eventuality, Network}, }; create_db!( SignerDb { CompletionsDb: (id: [u8; 32]) -> Vec, EventualityDb: (id: [u8; 32]) -> Vec, AttemptDb: (id: &SignId) -> (), CompletionDb: (claim: &[u8]) -> Vec, ActiveSignsDb: () -> Vec<[u8; 32]>, CompletedOnChainDb: (id: &[u8; 32]) -> (), } ); impl ActiveSignsDb { fn add_active_sign(txn: &mut impl DbTxn, id: &[u8; 32]) { if CompletedOnChainDb::get(txn, id).is_some() { return; } let mut active = ActiveSignsDb::get(txn).unwrap_or_default(); active.push(*id); ActiveSignsDb::set(txn, &active); } } impl CompletedOnChainDb { fn complete_on_chain(txn: &mut impl DbTxn, id: &[u8; 32]) { CompletedOnChainDb::set(txn, id, &()); ActiveSignsDb::set( txn, &ActiveSignsDb::get(txn) .unwrap_or_default() .into_iter() .filter(|active| active != id) .collect::>(), ); } } impl CompletionsDb { fn completions( getter: &impl Get, id: [u8; 32], ) -> Vec<::Claim> { let Some(completions) = Self::get(getter, id) else { return vec![] }; // If this was set yet is empty, it's because it's the encoding of a claim with a length of 0 if completions.is_empty() { let default = ::Claim::default(); assert_eq!(default.as_ref().len(), 0); return vec![default]; } let mut completions_ref = completions.as_slice(); let mut res = vec![]; while !completions_ref.is_empty() { let mut id = ::Claim::default(); let id_len = id.as_ref().len(); id.as_mut().copy_from_slice(&completions_ref[.. id_len]); completions_ref = &completions_ref[id_len ..]; res.push(id); } res } fn complete( txn: &mut impl DbTxn, id: [u8; 32], completion: &::Completion, ) { // Completions can be completed by multiple signatures // Save every solution in order to be robust CompletionDb::save_completion::(txn, completion); let claim = N::Eventuality::claim(completion); let claim: &[u8] = claim.as_ref(); // If claim has a 0-byte encoding, the set key, even if empty, is the claim if claim.is_empty() { Self::set(txn, id, &vec![]); return; } let mut existing = Self::get(txn, id).unwrap_or_default(); assert_eq!(existing.len() % claim.len(), 0); // Don't add this completion if it's already present let mut i = 0; while i < existing.len() { if &existing[i .. (i + claim.len())] == claim { return; } i += claim.len(); } existing.extend(claim); Self::set(txn, id, &existing); } } impl EventualityDb { fn save_eventuality( txn: &mut impl DbTxn, id: [u8; 32], eventuality: &N::Eventuality, ) { txn.put(Self::key(id), eventuality.serialize()); } fn eventuality(getter: &impl Get, id: [u8; 32]) -> Option { Some(N::Eventuality::read(&mut getter.get(Self::key(id))?.as_slice()).unwrap()) } } impl CompletionDb { fn save_completion( txn: &mut impl DbTxn, completion: &::Completion, ) { let claim = N::Eventuality::claim(completion); let claim: &[u8] = claim.as_ref(); Self::set(txn, claim, &N::Eventuality::serialize_completion(completion)); } fn completion( getter: &impl Get, claim: &::Claim, ) -> Option<::Completion> { Self::get(getter, claim.as_ref()) .map(|completion| N::Eventuality::read_completion::<&[u8]>(&mut completion.as_ref()).unwrap()) } } type PreprocessFor = <::TransactionMachine as PreprocessMachine>::Preprocess; type SignMachineFor = <::TransactionMachine as PreprocessMachine>::SignMachine; type SignatureShareFor = as SignMachine< <::Eventuality as Eventuality>::Completion, >>::SignatureShare; type SignatureMachineFor = as SignMachine< <::Eventuality as Eventuality>::Completion, >>::SignatureMachine; pub struct Signer { db: PhantomData, network: N, session: Session, keys: Vec>, signable: HashMap<[u8; 32], N::SignableTransaction>, attempt: HashMap<[u8; 32], u32>, #[allow(clippy::type_complexity)] preprocessing: HashMap<[u8; 32], (Vec>, Vec>)>, #[allow(clippy::type_complexity)] signing: HashMap<[u8; 32], (SignatureMachineFor, Vec>)>, } impl fmt::Debug for Signer { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt .debug_struct("Signer") .field("network", &self.network) .field("signable", &self.signable) .field("attempt", &self.attempt) .finish_non_exhaustive() } } impl Signer { /// Rebroadcast already signed TXs which haven't had their completions mined into a sufficiently /// confirmed block. pub async fn rebroadcast_task(db: D, network: N) { log::info!("rebroadcasting transactions for plans whose completions yet to be confirmed..."); loop { for active in ActiveSignsDb::get(&db).unwrap_or_default() { for claim in CompletionsDb::completions::(&db, active) { log::info!("rebroadcasting completion with claim {}", hex::encode(claim.as_ref())); // TODO: Don't drop the error entirely. Check for invariants let _ = network.publish_completion(&CompletionDb::completion::(&db, &claim).unwrap()).await; } } // Only run every five minutes so we aren't frequently loading tens to hundreds of KB from // the DB tokio::time::sleep(core::time::Duration::from_secs(5 * 60)).await; } } pub fn new(network: N, session: Session, keys: Vec>) -> Signer { assert!(!keys.is_empty()); Signer { db: PhantomData, network, session, keys, signable: HashMap::new(), attempt: HashMap::new(), preprocessing: HashMap::new(), signing: HashMap::new(), } } fn verify_id(&self, id: &SignId) -> Result<(), ()> { // Check the attempt lines up match self.attempt.get(&id.id) { // If we don't have an attempt logged, it's because the coordinator is faulty OR because we // rebooted OR we detected the signed transaction on chain, so there's notable network // latency/a malicious validator None => { warn!( "not attempting {} #{}. this is an error if we didn't reboot", hex::encode(id.id), id.attempt ); Err(())?; } Some(attempt) => { if attempt != &id.attempt { warn!( "sent signing data for {} #{} yet we have attempt #{}", hex::encode(id.id), id.attempt, attempt ); Err(())?; } } } Ok(()) } #[must_use] fn already_completed(txn: &mut D::Transaction<'_>, id: [u8; 32]) -> bool { if !CompletionsDb::completions::(txn, id).is_empty() { debug!( "SignTransaction/Reattempt order for {}, which we've already completed signing", hex::encode(id) ); true } else { false } } #[must_use] fn complete( &mut self, id: [u8; 32], claim: &::Claim, ) -> ProcessorMessage { // Assert we're actively signing for this TX assert!(self.signable.remove(&id).is_some(), "completed a TX we weren't signing for"); assert!(self.attempt.remove(&id).is_some(), "attempt had an ID signable didn't have"); // If we weren't selected to participate, we'll have a preprocess self.preprocessing.remove(&id); // If we were selected, the signature will only go through if we contributed a share // Despite this, we then need to get everyone's shares, and we may get a completion before // we get everyone's shares // This would be if the coordinator fails and we find the eventuality completion on-chain self.signing.remove(&id); // Emit the event for it ProcessorMessage::Completed { session: self.session, id, tx: claim.as_ref().to_vec() } } #[must_use] pub fn completed( &mut self, txn: &mut D::Transaction<'_>, id: [u8; 32], completion: &::Completion, ) -> Option { let first_completion = !Self::already_completed(txn, id); // Save this completion to the DB CompletedOnChainDb::complete_on_chain(txn, &id); CompletionsDb::complete::(txn, id, completion); if first_completion { Some(self.complete(id, &N::Eventuality::claim(completion))) } else { None } } /// Returns Some if the first completion. // Doesn't use any loops/retries since we'll eventually get this from the Scanner anyways #[must_use] async fn claimed_eventuality_completion( &mut self, txn: &mut D::Transaction<'_>, id: [u8; 32], claim: &::Claim, ) -> Option { if let Some(eventuality) = EventualityDb::eventuality::(txn, id) { match self.network.confirm_completion(&eventuality, claim).await { Ok(Some(completion)) => { info!( "signer eventuality for {} resolved in {}", hex::encode(id), hex::encode(claim.as_ref()) ); let first_completion = !Self::already_completed(txn, id); // Save this completion to the DB CompletionsDb::complete::(txn, id, &completion); if first_completion { return Some(self.complete(id, claim)); } } Ok(None) => { warn!( "a validator claimed {} completed {} when it did not", hex::encode(claim.as_ref()), hex::encode(id), ); } Err(_) => { // Transaction hasn't hit our mempool/was dropped for a different signature // The latter can happen given certain latency conditions/a single malicious signer // In the case of a single malicious signer, they can drag multiple honest validators down // with them, so we unfortunately can't slash on this case warn!( "a validator claimed {} completed {} yet we couldn't check that claim", hex::encode(claim.as_ref()), hex::encode(id), ); } } } else { warn!( "informed of completion {} for eventuality {}, when we didn't have that eventuality", hex::encode(claim.as_ref()), hex::encode(id), ); } None } #[must_use] async fn attempt( &mut self, txn: &mut D::Transaction<'_>, id: [u8; 32], attempt: u32, ) -> Option { if Self::already_completed(txn, id) { return None; } // Check if we're already working on this attempt if let Some(curr_attempt) = self.attempt.get(&id) { if curr_attempt >= &attempt { warn!( "told to attempt {} #{} yet we're already working on {}", hex::encode(id), attempt, curr_attempt ); return None; } } // Start this attempt // Clone the TX so we don't have an immutable borrow preventing the below mutable actions // (also because we do need an owned tx anyways) let Some(tx) = self.signable.get(&id).cloned() else { warn!("told to attempt a TX we aren't currently signing for"); return None; }; // Delete any existing machines self.preprocessing.remove(&id); self.signing.remove(&id); // Update the attempt number self.attempt.insert(id, attempt); let id = SignId { session: self.session, id, attempt }; info!("signing for {} #{}", hex::encode(id.id), id.attempt); // If we reboot mid-sign, the current design has us abort all signs and wait for latter // attempts/new signing protocols // This is distinct from the DKG which will continue DKG sessions, even on reboot // This is because signing is tolerant of failures of up to 1/3rd of the group // The DKG requires 100% participation // While we could apply similar tricks as the DKG (a seeded RNG) to achieve support for // reboots, it's not worth the complexity when messing up here leaks our secret share // // Despite this, on reboot, we'll get told of active signing items, and may be in this // branch again for something we've already attempted // // Only run if this hasn't already been attempted // TODO: This isn't complete as this txn may not be committed with the expected timing if AttemptDb::get(txn, &id).is_some() { warn!( "already attempted {} #{}. this is an error if we didn't reboot", hex::encode(id.id), id.attempt ); return None; } AttemptDb::set(txn, &id, &()); // Attempt to create the TX let mut machines = vec![]; let mut preprocesses = vec![]; let mut serialized_preprocesses = vec![]; for keys in &self.keys { let machine = match self.network.attempt_sign(keys.clone(), tx.clone()).await { Err(e) => { error!("failed to attempt {}, #{}: {:?}", hex::encode(id.id), id.attempt, e); return None; } Ok(machine) => machine, }; let (machine, preprocess) = machine.preprocess(&mut OsRng); machines.push(machine); serialized_preprocesses.push(preprocess.serialize()); preprocesses.push(preprocess); } self.preprocessing.insert(id.id, (machines, preprocesses)); // Broadcast our preprocess Some(ProcessorMessage::Preprocess { id, preprocesses: serialized_preprocesses }) } #[must_use] pub async fn sign_transaction( &mut self, txn: &mut D::Transaction<'_>, id: [u8; 32], tx: N::SignableTransaction, eventuality: &N::Eventuality, ) -> Option { // The caller is expected to re-issue sign orders on reboot // This is solely used by the rebroadcast task ActiveSignsDb::add_active_sign(txn, &id); if Self::already_completed(txn, id) { return None; } EventualityDb::save_eventuality::(txn, id, eventuality); self.signable.insert(id, tx); self.attempt(txn, id, 0).await } #[must_use] pub async fn handle( &mut self, txn: &mut D::Transaction<'_>, msg: CoordinatorMessage, ) -> Option { match msg { CoordinatorMessage::Preprocesses { id, preprocesses } => { if self.verify_id(&id).is_err() { return None; } let (machines, our_preprocesses) = match self.preprocessing.remove(&id.id) { // Either rebooted or RPC error, or some invariant None => { warn!( "not preprocessing for {}. this is an error if we didn't reboot", hex::encode(id.id) ); return None; } Some(machine) => machine, }; let mut parsed = HashMap::new(); for l in { let mut keys = preprocesses.keys().copied().collect::>(); keys.sort(); keys } { let mut preprocess_ref = preprocesses.get(&l).unwrap().as_slice(); let Ok(res) = machines[0].read_preprocess(&mut preprocess_ref) else { return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); }; if !preprocess_ref.is_empty() { return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); } parsed.insert(l, res); } let preprocesses = parsed; // Only keep a single machine as we only need one to get the signature let mut signature_machine = None; let mut shares = vec![]; let mut serialized_shares = vec![]; for (m, machine) in machines.into_iter().enumerate() { let mut preprocesses = preprocesses.clone(); for (i, our_preprocess) in our_preprocesses.clone().into_iter().enumerate() { if i != m { assert!(preprocesses.insert(self.keys[i].params().i(), our_preprocess).is_none()); } } // Use an empty message, as expected of TransactionMachines let (machine, share) = match machine.sign(preprocesses, &[]) { Ok(res) => res, Err(e) => match e { FrostError::InternalError(_) | FrostError::InvalidParticipant(_, _) | FrostError::InvalidSigningSet(_) | FrostError::InvalidParticipantQuantity(_, _) | FrostError::DuplicatedParticipant(_) | FrostError::MissingParticipant(_) => unreachable!(), FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => { return Some(ProcessorMessage::InvalidParticipant { id, participant: l }) } }, }; if m == 0 { signature_machine = Some(machine); } serialized_shares.push(share.serialize()); shares.push(share); } self.signing.insert(id.id, (signature_machine.unwrap(), shares)); // Broadcast our shares Some(ProcessorMessage::Share { id, shares: serialized_shares }) } CoordinatorMessage::Shares { id, shares } => { if self.verify_id(&id).is_err() { return None; } let (machine, our_shares) = match self.signing.remove(&id.id) { // Rebooted, RPC error, or some invariant None => { // If preprocessing has this ID, it means we were never sent the preprocess by the // coordinator if self.preprocessing.contains_key(&id.id) { panic!("never preprocessed yet signing?"); } warn!( "not preprocessing for {}. this is an error if we didn't reboot", hex::encode(id.id) ); return None; } Some(machine) => machine, }; let mut parsed = HashMap::new(); for l in { let mut keys = shares.keys().copied().collect::>(); keys.sort(); keys } { let mut share_ref = shares.get(&l).unwrap().as_slice(); let Ok(res) = machine.read_share(&mut share_ref) else { return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); }; if !share_ref.is_empty() { return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); } parsed.insert(l, res); } let mut shares = parsed; for (i, our_share) in our_shares.into_iter().enumerate().skip(1) { assert!(shares.insert(self.keys[i].params().i(), our_share).is_none()); } let completion = match machine.complete(shares) { Ok(res) => res, Err(e) => match e { FrostError::InternalError(_) | FrostError::InvalidParticipant(_, _) | FrostError::InvalidSigningSet(_) | FrostError::InvalidParticipantQuantity(_, _) | FrostError::DuplicatedParticipant(_) | FrostError::MissingParticipant(_) => unreachable!(), FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => { return Some(ProcessorMessage::InvalidParticipant { id, participant: l }) } }, }; // Save the completion in case it's needed for recovery CompletionsDb::complete::(txn, id.id, &completion); // Publish it if let Err(e) = self.network.publish_completion(&completion).await { error!("couldn't publish completion for plan {}: {:?}", hex::encode(id.id), e); } else { info!("published completion for plan {}", hex::encode(id.id)); } // Stop trying to sign for this TX Some(self.complete(id.id, &N::Eventuality::claim(&completion))) } CoordinatorMessage::Reattempt { id } => self.attempt(txn, id.id, id.attempt).await, CoordinatorMessage::Completed { session: _, id, tx: mut claim_vec } => { let mut claim = ::Claim::default(); if claim.as_ref().len() != claim_vec.len() { let true_len = claim_vec.len(); claim_vec.truncate(2 * claim.as_ref().len()); warn!( "a validator claimed {}... (actual length {}) completed {} yet {}", hex::encode(&claim_vec), true_len, hex::encode(id), "that's not a valid Claim", ); return None; } claim.as_mut().copy_from_slice(&claim_vec); self.claimed_eventuality_completion(txn, id, &claim).await } } } } ================================================ FILE: processor/src/slash_report_signer.rs ================================================ use core::fmt; use std::collections::HashMap; use rand_core::OsRng; use frost::{ curve::Ristretto, ThresholdKeys, FrostError, algorithm::Algorithm, sign::{ Writable, PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine, AlgorithmSignMachine, AlgorithmSignatureMachine, }, }; use frost_schnorrkel::Schnorrkel; use log::{info, warn}; use serai_client::{ primitives::ExternalNetworkId, validator_sets::primitives::{report_slashes_message, ExternalValidatorSet, Session}, Public, }; use messages::coordinator::*; use crate::{Get, DbTxn, create_db}; create_db! { SlashReportSignerDb { Completed: (session: Session) -> (), Attempt: (session: Session, attempt: u32) -> (), } } type Preprocess = as PreprocessMachine>::Preprocess; type SignatureShare = as SignMachine< >::Signature, >>::SignatureShare; pub struct SlashReportSigner { network: ExternalNetworkId, session: Session, keys: Vec>, report: Vec<([u8; 32], u32)>, attempt: u32, #[allow(clippy::type_complexity)] preprocessing: Option<(Vec>, Vec)>, #[allow(clippy::type_complexity)] signing: Option<(AlgorithmSignatureMachine, Vec)>, } impl fmt::Debug for SlashReportSigner { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt .debug_struct("SlashReportSigner") .field("session", &self.session) .field("report", &self.report) .field("attempt", &self.attempt) .field("preprocessing", &self.preprocessing.is_some()) .field("signing", &self.signing.is_some()) .finish_non_exhaustive() } } impl SlashReportSigner { pub fn new( txn: &mut impl DbTxn, network: ExternalNetworkId, session: Session, keys: Vec>, report: Vec<([u8; 32], u32)>, attempt: u32, ) -> Option<(SlashReportSigner, ProcessorMessage)> { assert!(!keys.is_empty()); if Completed::get(txn, session).is_some() { return None; } if Attempt::get(txn, session, attempt).is_some() { warn!( "already attempted signing slash report for session {:?}, attempt #{}. {}", session, attempt, "this is an error if we didn't reboot", ); return None; } Attempt::set(txn, session, attempt, &()); info!("signing slash report for session {:?} with attempt #{}", session, attempt); let mut machines = vec![]; let mut preprocesses = vec![]; let mut serialized_preprocesses = vec![]; for keys in &keys { // b"substrate" is a literal from sp-core let machine = AlgorithmMachine::new(Schnorrkel::new(b"substrate"), keys.clone()); let (machine, preprocess) = machine.preprocess(&mut OsRng); machines.push(machine); serialized_preprocesses.push(preprocess.serialize().try_into().unwrap()); preprocesses.push(preprocess); } let preprocessing = Some((machines, preprocesses)); let substrate_sign_id = SubstrateSignId { session, id: SubstrateSignableId::SlashReport, attempt }; Some(( SlashReportSigner { network, session, keys, report, attempt, preprocessing, signing: None }, ProcessorMessage::SlashReportPreprocess { id: substrate_sign_id, preprocesses: serialized_preprocesses, }, )) } #[must_use] pub fn handle( &mut self, txn: &mut impl DbTxn, msg: CoordinatorMessage, ) -> Option { match msg { CoordinatorMessage::CosignSubstrateBlock { .. } => { panic!("SlashReportSigner passed CosignSubstrateBlock") } CoordinatorMessage::SignSlashReport { .. } => { panic!("SlashReportSigner passed SignSlashReport") } CoordinatorMessage::SubstratePreprocesses { id, preprocesses } => { assert_eq!(id.session, self.session); assert_eq!(id.id, SubstrateSignableId::SlashReport); if id.attempt != self.attempt { panic!("given preprocesses for a distinct attempt than SlashReportSigner is signing") } let (machines, our_preprocesses) = match self.preprocessing.take() { // Either rebooted or RPC error, or some invariant None => { warn!("not preprocessing. this is an error if we didn't reboot"); return None; } Some(preprocess) => preprocess, }; let mut parsed = HashMap::new(); for l in { let mut keys = preprocesses.keys().copied().collect::>(); keys.sort(); keys } { let mut preprocess_ref = preprocesses.get(&l).unwrap().as_slice(); let Ok(res) = machines[0].read_preprocess(&mut preprocess_ref) else { return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); }; if !preprocess_ref.is_empty() { return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); } parsed.insert(l, res); } let preprocesses = parsed; // Only keep a single machine as we only need one to get the signature let mut signature_machine = None; let mut shares = vec![]; let mut serialized_shares = vec![]; for (m, machine) in machines.into_iter().enumerate() { let mut preprocesses = preprocesses.clone(); for (i, our_preprocess) in our_preprocesses.clone().into_iter().enumerate() { if i != m { assert!(preprocesses.insert(self.keys[i].params().i(), our_preprocess).is_none()); } } let (machine, share) = match machine.sign( preprocesses, &report_slashes_message( &ExternalValidatorSet { network: self.network, session: self.session }, &self .report .clone() .into_iter() .map(|(validator, points)| (Public::from(validator), points)) .collect::>(), ), ) { Ok(res) => res, Err(e) => match e { FrostError::InternalError(_) | FrostError::InvalidParticipant(_, _) | FrostError::InvalidSigningSet(_) | FrostError::InvalidParticipantQuantity(_, _) | FrostError::DuplicatedParticipant(_) | FrostError::MissingParticipant(_) => unreachable!(), FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => { return Some(ProcessorMessage::InvalidParticipant { id, participant: l }) } }, }; if m == 0 { signature_machine = Some(machine); } let mut share_bytes = [0; 32]; share_bytes.copy_from_slice(&share.serialize()); serialized_shares.push(share_bytes); shares.push(share); } self.signing = Some((signature_machine.unwrap(), shares)); // Broadcast our shares Some(ProcessorMessage::SubstrateShare { id, shares: serialized_shares }) } CoordinatorMessage::SubstrateShares { id, shares } => { assert_eq!(id.session, self.session); assert_eq!(id.id, SubstrateSignableId::SlashReport); if id.attempt != self.attempt { panic!("given preprocesses for a distinct attempt than SlashReportSigner is signing") } let (machine, our_shares) = match self.signing.take() { // Rebooted, RPC error, or some invariant None => { // If preprocessing has this ID, it means we were never sent the preprocess by the // coordinator if self.preprocessing.is_some() { panic!("never preprocessed yet signing?"); } warn!("not preprocessing. this is an error if we didn't reboot"); return None; } Some(signing) => signing, }; let mut parsed = HashMap::new(); for l in { let mut keys = shares.keys().copied().collect::>(); keys.sort(); keys } { let mut share_ref = shares.get(&l).unwrap().as_slice(); let Ok(res) = machine.read_share(&mut share_ref) else { return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); }; if !share_ref.is_empty() { return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); } parsed.insert(l, res); } let mut shares = parsed; for (i, our_share) in our_shares.into_iter().enumerate().skip(1) { assert!(shares.insert(self.keys[i].params().i(), our_share).is_none()); } let sig = match machine.complete(shares) { Ok(res) => res, Err(e) => match e { FrostError::InternalError(_) | FrostError::InvalidParticipant(_, _) | FrostError::InvalidSigningSet(_) | FrostError::InvalidParticipantQuantity(_, _) | FrostError::DuplicatedParticipant(_) | FrostError::MissingParticipant(_) => unreachable!(), FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => { return Some(ProcessorMessage::InvalidParticipant { id, participant: l }) } }, }; info!("signed slash report for session {:?} with attempt #{}", self.session, id.attempt); Completed::set(txn, self.session, &()); Some(ProcessorMessage::SignedSlashReport { session: self.session, signature: sig.to_bytes().to_vec(), }) } CoordinatorMessage::BatchReattempt { .. } => { panic!("BatchReattempt passed to SlashReportSigner") } } } } ================================================ FILE: processor/src/tests/addresses.rs ================================================ use core::{time::Duration, pin::Pin, future::Future}; use std::collections::HashMap; use rand_core::OsRng; use frost::{Participant, ThresholdKeys}; use tokio::time::timeout; use serai_client::validator_sets::primitives::Session; use serai_db::{DbTxn, MemDb}; use crate::{ Plan, Db, networks::{OutputType, Output, Block, UtxoNetwork}, multisigs::{ scheduler::Scheduler, scanner::{ScannerEvent, Scanner, ScannerHandle}, }, tests::sign, }; async fn spend( db: &mut D, network: &N, keys: &HashMap>, scanner: &mut ScannerHandle, outputs: Vec, ) where >::Addendum: From<()>, { let key = keys[&Participant::new(1).unwrap()].group_key(); let mut keys_txs = HashMap::new(); for (i, keys) in keys { keys_txs.insert( *i, ( keys.clone(), network .prepare_send( network.get_latest_block_number().await.unwrap() - N::CONFIRMATIONS, // Send to a change output Plan { key, inputs: outputs.clone(), payments: vec![], change: Some(N::change_address(key).unwrap()), scheduler_addendum: ().into(), }, 0, ) .await .unwrap() .tx .unwrap(), ), ); } sign(network.clone(), Session(0), keys_txs).await; for _ in 0 .. N::CONFIRMATIONS { network.mine_block().await; } match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() { ScannerEvent::Block { is_retirement_block, block, outputs } => { scanner.multisig_completed.send(false).unwrap(); assert!(!is_retirement_block); assert_eq!(outputs.len(), 1); // Make sure this is actually a change output assert_eq!(outputs[0].kind(), OutputType::Change); assert_eq!(outputs[0].key(), key); let mut txn = db.txn(); assert_eq!(scanner.ack_block(&mut txn, block).await.1, outputs); scanner.release_lock().await; txn.commit(); } ScannerEvent::Completed(_, _, _, _, _) => { panic!("unexpectedly got eventuality completion"); } } } pub async fn test_addresses( new_network: impl Fn(MemDb) -> Pin>>, ) where >::Addendum: From<()>, { let mut keys = frost::tests::key_gen::<_, N::Curve>(&mut OsRng); for keys in keys.values_mut() { N::tweak_keys(keys); } let key = keys[&Participant::new(1).unwrap()].group_key(); let mut db = MemDb::new(); let network = new_network(db.clone()).await; // Mine blocks so there's a confirmed block for _ in 0 .. N::CONFIRMATIONS { network.mine_block().await; } let (mut scanner, current_keys) = Scanner::new(network.clone(), db.clone()); assert!(current_keys.is_empty()); let mut txn = db.txn(); scanner.register_key(&mut txn, network.get_latest_block_number().await.unwrap(), key).await; txn.commit(); for _ in 0 .. N::CONFIRMATIONS { network.mine_block().await; } // Receive funds to the various addresses and make sure they're properly identified let mut received_outputs = vec![]; for (kind, address) in [ (OutputType::External, N::external_address(&network, key).await), (OutputType::Branch, N::branch_address(key).unwrap()), (OutputType::Change, N::change_address(key).unwrap()), (OutputType::Forwarded, N::forward_address(key).unwrap()), ] { let block_id = network.test_send(address).await.id(); // Verify the Scanner picked them up match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() { ScannerEvent::Block { is_retirement_block, block, outputs } => { scanner.multisig_completed.send(false).unwrap(); assert!(!is_retirement_block); assert_eq!(block, block_id); assert_eq!(outputs.len(), 1); assert_eq!(outputs[0].kind(), kind); assert_eq!(outputs[0].key(), key); let mut txn = db.txn(); assert_eq!(scanner.ack_block(&mut txn, block).await.1, outputs); scanner.release_lock().await; txn.commit(); received_outputs.extend(outputs); } ScannerEvent::Completed(_, _, _, _, _) => { panic!("unexpectedly got eventuality completion"); } }; } // Spend the branch output, creating a change output and ensuring we actually get change spend(&mut db, &network, &keys, &mut scanner, received_outputs).await; } ================================================ FILE: processor/src/tests/batch_signer.rs ================================================ use std::collections::HashMap; use rand_core::{RngCore, OsRng}; use ciphersuite::group::GroupEncoding; use frost::{ curve::Ristretto, Participant, tests::{key_gen, clone_without}, }; use sp_application_crypto::{RuntimePublic, sr25519::Public}; use serai_db::{DbTxn, Db, MemDb}; #[rustfmt::skip] use serai_client::{primitives::*, in_instructions::primitives::*, validator_sets::primitives::Session}; use messages::{ substrate, coordinator::{self, SubstrateSignableId, SubstrateSignId, CoordinatorMessage}, ProcessorMessage, }; use crate::batch_signer::BatchSigner; #[test] fn test_batch_signer() { let keys = key_gen::<_, Ristretto>(&mut OsRng); let participant_one = Participant::new(1).unwrap(); let id: u32 = 5; let block = BlockHash([0xaa; 32]); let batch = Batch { network: ExternalNetworkId::Monero, id, block, instructions: vec![ InInstructionWithBalance { instruction: InInstruction::Transfer(SeraiAddress([0xbb; 32])), balance: ExternalBalance { coin: ExternalCoin::Bitcoin, amount: Amount(1000) }, }, InInstructionWithBalance { instruction: InInstruction::Dex(DexCall::SwapAndAddLiquidity(SeraiAddress([0xbb; 32]))), balance: ExternalBalance { coin: ExternalCoin::Monero, amount: Amount(9999999999999999) }, }, ], }; let actual_id = SubstrateSignId { session: Session(0), id: SubstrateSignableId::Batch(batch.id), attempt: 0 }; let mut signing_set = vec![]; while signing_set.len() < usize::from(keys.values().next().unwrap().params().t()) { let candidate = Participant::new( u16::try_from((OsRng.next_u64() % u64::try_from(keys.len()).unwrap()) + 1).unwrap(), ) .unwrap(); if signing_set.contains(&candidate) { continue; } signing_set.push(candidate); } let mut signers = HashMap::new(); let mut dbs = HashMap::new(); let mut preprocesses = HashMap::new(); for i in 1 ..= keys.len() { let i = Participant::new(u16::try_from(i).unwrap()).unwrap(); let keys = keys.get(&i).unwrap().clone(); let mut signer = BatchSigner::::new(ExternalNetworkId::Monero, Session(0), vec![keys]); let mut db = MemDb::new(); let mut txn = db.txn(); match signer.sign(&mut txn, batch.clone()).unwrap() { // All participants should emit a preprocess coordinator::ProcessorMessage::BatchPreprocess { id, block: batch_block, preprocesses: mut these_preprocesses, } => { assert_eq!(id, actual_id); assert_eq!(batch_block, block); assert_eq!(these_preprocesses.len(), 1); if signing_set.contains(&i) { preprocesses.insert(i, these_preprocesses.swap_remove(0)); } } _ => panic!("didn't get preprocess back"), } txn.commit(); signers.insert(i, signer); dbs.insert(i, db); } let mut shares = HashMap::new(); for i in &signing_set { let mut txn = dbs.get_mut(i).unwrap().txn(); match signers .get_mut(i) .unwrap() .handle( &mut txn, CoordinatorMessage::SubstratePreprocesses { id: actual_id.clone(), preprocesses: clone_without(&preprocesses, i), }, ) .unwrap() { ProcessorMessage::Coordinator(coordinator::ProcessorMessage::SubstrateShare { id, shares: mut these_shares, }) => { assert_eq!(id, actual_id); assert_eq!(these_shares.len(), 1); shares.insert(*i, these_shares.swap_remove(0)); } _ => panic!("didn't get share back"), } txn.commit(); } for i in &signing_set { let mut txn = dbs.get_mut(i).unwrap().txn(); match signers .get_mut(i) .unwrap() .handle( &mut txn, CoordinatorMessage::SubstrateShares { id: actual_id.clone(), shares: clone_without(&shares, i), }, ) .unwrap() { ProcessorMessage::Substrate(substrate::ProcessorMessage::SignedBatch { batch: signed_batch, }) => { assert_eq!(signed_batch.batch, batch); assert!(Public::from_raw(keys[&participant_one].group_key().to_bytes()) .verify(&batch_message(&batch), &signed_batch.signature)); } _ => panic!("didn't get signed batch back"), } txn.commit(); } } ================================================ FILE: processor/src/tests/cosigner.rs ================================================ use std::collections::HashMap; use rand_core::{RngCore, OsRng}; use ciphersuite::group::GroupEncoding; use frost::{ curve::Ristretto, Participant, tests::{key_gen, clone_without}, }; use sp_application_crypto::{RuntimePublic, sr25519::Public}; use serai_db::{DbTxn, Db, MemDb}; use serai_client::{primitives::*, validator_sets::primitives::Session}; use messages::coordinator::*; use crate::cosigner::Cosigner; #[test] fn test_cosigner() { let keys = key_gen::<_, Ristretto>(&mut OsRng); let participant_one = Participant::new(1).unwrap(); let block_number = OsRng.next_u64(); let block = [0xaa; 32]; let actual_id = SubstrateSignId { session: Session(0), id: SubstrateSignableId::CosigningSubstrateBlock(block), attempt: (OsRng.next_u64() >> 32).try_into().unwrap(), }; let mut signing_set = vec![]; while signing_set.len() < usize::from(keys.values().next().unwrap().params().t()) { let candidate = Participant::new( u16::try_from((OsRng.next_u64() % u64::try_from(keys.len()).unwrap()) + 1).unwrap(), ) .unwrap(); if signing_set.contains(&candidate) { continue; } signing_set.push(candidate); } let mut signers = HashMap::new(); let mut dbs = HashMap::new(); let mut preprocesses = HashMap::new(); for i in 1 ..= keys.len() { let i = Participant::new(u16::try_from(i).unwrap()).unwrap(); let keys = keys.get(&i).unwrap().clone(); let mut db = MemDb::new(); let mut txn = db.txn(); let (signer, preprocess) = Cosigner::new(&mut txn, Session(0), vec![keys], block_number, block, actual_id.attempt) .unwrap(); match preprocess { // All participants should emit a preprocess ProcessorMessage::CosignPreprocess { id, preprocesses: mut these_preprocesses } => { assert_eq!(id, actual_id); assert_eq!(these_preprocesses.len(), 1); if signing_set.contains(&i) { preprocesses.insert(i, these_preprocesses.swap_remove(0)); } } _ => panic!("didn't get preprocess back"), } txn.commit(); signers.insert(i, signer); dbs.insert(i, db); } let mut shares = HashMap::new(); for i in &signing_set { let mut txn = dbs.get_mut(i).unwrap().txn(); match signers .get_mut(i) .unwrap() .handle( &mut txn, CoordinatorMessage::SubstratePreprocesses { id: actual_id.clone(), preprocesses: clone_without(&preprocesses, i), }, ) .unwrap() { ProcessorMessage::SubstrateShare { id, shares: mut these_shares } => { assert_eq!(id, actual_id); assert_eq!(these_shares.len(), 1); shares.insert(*i, these_shares.swap_remove(0)); } _ => panic!("didn't get share back"), } txn.commit(); } for i in &signing_set { let mut txn = dbs.get_mut(i).unwrap().txn(); match signers .get_mut(i) .unwrap() .handle( &mut txn, CoordinatorMessage::SubstrateShares { id: actual_id.clone(), shares: clone_without(&shares, i), }, ) .unwrap() { ProcessorMessage::CosignedBlock { block_number, block: signed_block, signature } => { assert_eq!(signed_block, block); assert!(Public::from_raw(keys[&participant_one].group_key().to_bytes()).verify( &cosign_block_msg(block_number, block), &Signature::from(<[u8; 64]>::try_from(signature).unwrap()) )); } _ => panic!("didn't get cosigned block back"), } txn.commit(); } } ================================================ FILE: processor/src/tests/key_gen.rs ================================================ use std::collections::HashMap; use zeroize::Zeroizing; use rand_core::{RngCore, OsRng}; use ciphersuite::group::GroupEncoding; use frost::{Participant, ThresholdParams, tests::clone_without}; use serai_db::{DbTxn, Db, MemDb}; use sp_application_crypto::sr25519; use serai_client::validator_sets::primitives::{Session, KeyPair}; use messages::key_gen::*; use crate::{ networks::Network, key_gen::{KeyConfirmed, KeyGen}, }; const ID: KeyGenId = KeyGenId { session: Session(1), attempt: 3 }; pub fn test_key_gen() { let mut entropies = HashMap::new(); let mut dbs = HashMap::new(); let mut key_gens = HashMap::new(); for i in 1 ..= 5 { let mut entropy = Zeroizing::new([0; 32]); OsRng.fill_bytes(entropy.as_mut()); entropies.insert(i, entropy); let db = MemDb::new(); dbs.insert(i, db.clone()); key_gens.insert(i, KeyGen::::new(db, entropies[&i].clone())); } let mut all_commitments = HashMap::new(); for i in 1 ..= 5 { let key_gen = key_gens.get_mut(&i).unwrap(); let mut txn = dbs.get_mut(&i).unwrap().txn(); if let ProcessorMessage::Commitments { id, mut commitments } = key_gen.handle( &mut txn, CoordinatorMessage::GenerateKey { id: ID, params: ThresholdParams::new(3, 5, Participant::new(u16::try_from(i).unwrap()).unwrap()) .unwrap(), shares: 1, }, ) { assert_eq!(id, ID); assert_eq!(commitments.len(), 1); all_commitments .insert(Participant::new(u16::try_from(i).unwrap()).unwrap(), commitments.swap_remove(0)); } else { panic!("didn't get commitments back"); } txn.commit(); } // 1 is rebuilt on every step // 2 is rebuilt here // 3 ... are rebuilt once, one at each of the following steps let rebuild = |key_gens: &mut HashMap<_, _>, dbs: &HashMap<_, MemDb>, i| { key_gens.remove(&i); key_gens.insert(i, KeyGen::::new(dbs[&i].clone(), entropies[&i].clone())); }; rebuild(&mut key_gens, &dbs, 1); rebuild(&mut key_gens, &dbs, 2); let mut all_shares = HashMap::new(); for i in 1 ..= 5 { let key_gen = key_gens.get_mut(&i).unwrap(); let mut txn = dbs.get_mut(&i).unwrap().txn(); let i = Participant::new(u16::try_from(i).unwrap()).unwrap(); if let ProcessorMessage::Shares { id, mut shares } = key_gen.handle( &mut txn, CoordinatorMessage::Commitments { id: ID, commitments: clone_without(&all_commitments, &i) }, ) { assert_eq!(id, ID); assert_eq!(shares.len(), 1); all_shares.insert(i, shares.swap_remove(0)); } else { panic!("didn't get shares back"); } txn.commit(); } // Rebuild 1 and 3 rebuild(&mut key_gens, &dbs, 1); rebuild(&mut key_gens, &dbs, 3); let mut res = None; for i in 1 ..= 5 { let key_gen = key_gens.get_mut(&i).unwrap(); let mut txn = dbs.get_mut(&i).unwrap().txn(); let i = Participant::new(u16::try_from(i).unwrap()).unwrap(); if let ProcessorMessage::GeneratedKeyPair { id, substrate_key, network_key } = key_gen.handle( &mut txn, CoordinatorMessage::Shares { id: ID, shares: vec![all_shares .iter() .filter_map(|(l, shares)| if i == *l { None } else { Some((*l, shares[&i].clone())) }) .collect()], }, ) { assert_eq!(id, ID); if res.is_none() { res = Some((substrate_key, network_key.clone())); } assert_eq!(res.as_ref().unwrap(), &(substrate_key, network_key)); } else { panic!("didn't get key back"); } txn.commit(); } let res = res.unwrap(); // Rebuild 1 and 4 rebuild(&mut key_gens, &dbs, 1); rebuild(&mut key_gens, &dbs, 4); for i in 1 ..= 5 { let key_gen = key_gens.get_mut(&i).unwrap(); let mut txn = dbs.get_mut(&i).unwrap().txn(); let KeyConfirmed { mut substrate_keys, mut network_keys } = key_gen.confirm( &mut txn, ID.session, &KeyPair(sr25519::Public::from(res.0), res.1.clone().try_into().unwrap()), ); txn.commit(); assert_eq!(substrate_keys.len(), 1); let substrate_keys = substrate_keys.swap_remove(0); assert_eq!(network_keys.len(), 1); let network_keys = network_keys.swap_remove(0); let params = ThresholdParams::new(3, 5, Participant::new(u16::try_from(i).unwrap()).unwrap()).unwrap(); assert_eq!(substrate_keys.params(), params); assert_eq!(network_keys.params(), params); assert_eq!( ( substrate_keys.group_key().to_bytes(), network_keys.group_key().to_bytes().as_ref().to_vec() ), res ); } } ================================================ FILE: processor/src/tests/literal/mod.rs ================================================ use dockertest::{ PullPolicy, StartPolicy, LogOptions, LogAction, LogPolicy, LogSource, Image, TestBodySpecification, DockerOperations, DockerTest, }; use serai_db::MemDb; #[cfg(feature = "bitcoin")] mod bitcoin { use std::sync::Arc; use rand_core::OsRng; use frost::Participant; use bitcoin_serai::bitcoin::{ secp256k1::{SECP256K1, SecretKey, Message}, PrivateKey, PublicKey, hashes::{HashEngine, Hash, sha256::Hash as Sha256}, sighash::{SighashCache, EcdsaSighashType}, absolute::LockTime, Amount as BAmount, Sequence, Script, Witness, OutPoint, address::Address as BAddress, transaction::{Version, Transaction, TxIn, TxOut}, Network as BNetwork, ScriptBuf, opcodes::all::{OP_SHA256, OP_EQUALVERIFY}, }; use scale::Encode; use sp_application_crypto::Pair; use serai_client::{in_instructions::primitives::Shorthand, primitives::insecure_pair_from_name}; use tokio::{ time::{timeout, Duration}, sync::Mutex, }; use super::*; use crate::{ networks::{Network, Bitcoin, Output, OutputType, Block}, tests::scanner::new_scanner, multisigs::scanner::ScannerEvent, }; #[test] fn test_dust_constant() { struct IsTrue; trait True {} impl True for IsTrue {} fn check() { core::hint::black_box(()); } check::= bitcoin_serai::wallet::DUST }>>(); } #[test] fn test_receive_data_from_input() { let docker = spawn_bitcoin(); docker.run(|ops| async move { let btc = bitcoin(&ops).await(MemDb::new()).await; // generate a multisig address to receive the coins let mut keys = frost::tests::key_gen::<_, ::Curve>(&mut OsRng) .remove(&Participant::new(1).unwrap()) .unwrap(); ::tweak_keys(&mut keys); let group_key = keys.group_key(); let serai_btc_address = ::external_address(&btc, group_key).await; // btc key pair to send from let private_key = PrivateKey::new(SecretKey::new(&mut rand_core::OsRng), BNetwork::Regtest); let public_key = PublicKey::from_private_key(SECP256K1, &private_key); let main_addr = BAddress::p2pkh(public_key, BNetwork::Regtest); // get unlocked coins let new_block = btc.get_latest_block_number().await.unwrap() + 1; btc .rpc .rpc_call::>("generatetoaddress", serde_json::json!([100, main_addr])) .await .unwrap(); // create a scanner let db = MemDb::new(); let mut scanner = new_scanner(&btc, &db, group_key, &Arc::new(Mutex::new(true))).await; // make a transfer instruction & hash it for script. let serai_address = insecure_pair_from_name("alice").public(); let message = Shorthand::transfer(None, serai_address.into()).encode(); let mut data = Sha256::engine(); data.input(&message); // make the output script => msg_script(OP_SHA256 PUSH MSG_HASH OP_EQUALVERIFY) + any_script let mut script = ScriptBuf::builder() .push_opcode(OP_SHA256) .push_slice(Sha256::from_engine(data).as_byte_array()) .push_opcode(OP_EQUALVERIFY) .into_script(); // append a regular spend script for i in main_addr.script_pubkey().instructions() { script.push_instruction(i.unwrap()); } // Create the first transaction let tx = btc.get_block(new_block).await.unwrap().txdata.swap_remove(0); let mut tx = Transaction { version: Version(2), lock_time: LockTime::ZERO, input: vec![TxIn { previous_output: OutPoint { txid: tx.compute_txid(), vout: 0 }, script_sig: Script::new().into(), sequence: Sequence(u32::MAX), witness: Witness::default(), }], output: vec![TxOut { value: tx.output[0].value - BAmount::from_sat(10000), script_pubkey: ScriptBuf::new_p2wsh(&script.wscript_hash()), }], }; tx.input[0].script_sig = Bitcoin::sign_btc_input_for_p2pkh(&tx, 0, &private_key); let initial_output_value = tx.output[0].value; // send it btc.rpc.send_raw_transaction(&tx).await.unwrap(); // Chain a transaction spending it with the InInstruction embedded in the input let mut tx = Transaction { version: Version(2), lock_time: LockTime::ZERO, input: vec![TxIn { previous_output: OutPoint { txid: tx.compute_txid(), vout: 0 }, script_sig: Script::new().into(), sequence: Sequence(u32::MAX), witness: Witness::new(), }], output: vec![TxOut { value: tx.output[0].value - BAmount::from_sat(10000), script_pubkey: serai_btc_address.into(), }], }; // add the witness script // This is the standard script with an extra argument of the InInstruction let mut sig = SECP256K1 .sign_ecdsa_low_r( &Message::from_digest_slice( SighashCache::new(&tx) .p2wsh_signature_hash(0, &script, initial_output_value, EcdsaSighashType::All) .unwrap() .to_raw_hash() .as_ref(), ) .unwrap(), &private_key.inner, ) .serialize_der() .to_vec(); sig.push(1); tx.input[0].witness.push(sig); tx.input[0].witness.push(public_key.inner.serialize()); tx.input[0].witness.push(message.clone()); tx.input[0].witness.push(script); // Send it immediately, as Bitcoin allows mempool chaining btc.rpc.send_raw_transaction(&tx).await.unwrap(); // Mine enough confirmations let block_number = btc.get_latest_block_number().await.unwrap() + 1; for _ in 0 .. ::CONFIRMATIONS { btc.mine_block().await; } let tx_block = btc.get_block(block_number).await.unwrap(); // verify that scanner picked up the output let outputs = match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() { ScannerEvent::Block { is_retirement_block, block, outputs } => { scanner.multisig_completed.send(false).unwrap(); assert!(!is_retirement_block); assert_eq!(block, tx_block.id()); assert_eq!(outputs.len(), 1); assert_eq!(outputs[0].kind(), OutputType::External); outputs } _ => panic!("unexpectedly got eventuality completion"), }; // verify that the amount and message are correct assert_eq!(outputs[0].balance().amount.0, tx.output[0].value.to_sat()); assert_eq!(outputs[0].data(), message); }); } fn spawn_bitcoin() -> DockerTest { serai_docker_tests::build("bitcoin".to_string()); let composition = TestBodySpecification::with_image( Image::with_repository("serai-dev-bitcoin").pull_policy(PullPolicy::Never), ) .set_start_policy(StartPolicy::Strict) .set_log_options(Some(LogOptions { action: LogAction::Forward, policy: LogPolicy::OnError, source: LogSource::Both, })) .set_publish_all_ports(true); let mut test = DockerTest::new().with_network(dockertest::Network::Isolated); test.provide_container(composition); test } async fn bitcoin( ops: &DockerOperations, ) -> impl Fn(MemDb) -> Pin>> { let handle = ops.handle("serai-dev-bitcoin").host_port(8332).unwrap(); let url = format!("http://serai:seraidex@{}:{}", handle.0, handle.1); let bitcoin = Bitcoin::new(url.clone()).await; bitcoin.fresh_chain().await; move |_db| Box::pin(Bitcoin::new(url.clone())) } test_utxo_network!( Bitcoin, spawn_bitcoin, bitcoin, bitcoin_key_gen, bitcoin_scanner, bitcoin_no_deadlock_in_multisig_completed, bitcoin_signer, bitcoin_wallet, bitcoin_addresses, ); } #[cfg(feature = "monero")] mod monero { use super::*; use crate::networks::{Network, Monero}; fn spawn_monero() -> DockerTest { serai_docker_tests::build("monero".to_string()); let composition = TestBodySpecification::with_image( Image::with_repository("serai-dev-monero").pull_policy(PullPolicy::Never), ) .set_start_policy(StartPolicy::Strict) .set_log_options(Some(LogOptions { action: LogAction::Forward, policy: LogPolicy::OnError, source: LogSource::Both, })) .set_publish_all_ports(true); let mut test = DockerTest::new(); test.provide_container(composition); test } async fn monero( ops: &DockerOperations, ) -> impl Fn(MemDb) -> Pin>> { let handle = ops.handle("serai-dev-monero").host_port(18081).unwrap(); let url = format!("http://serai:seraidex@{}:{}", handle.0, handle.1); let monero = Monero::new(url.clone()).await; while monero.get_latest_block_number().await.unwrap() < 150 { monero.mine_block().await; } move |_db| Box::pin(Monero::new(url.clone())) } test_utxo_network!( Monero, spawn_monero, monero, monero_key_gen, monero_scanner, monero_no_deadlock_in_multisig_completed, monero_signer, monero_wallet, monero_addresses, ); } #[cfg(feature = "ethereum")] mod ethereum { use super::*; use ciphersuite::Ciphersuite; use ciphersuite_kp256::Secp256k1; use serai_client::validator_sets::primitives::Session; use crate::networks::Ethereum; fn spawn_ethereum() -> DockerTest { serai_docker_tests::build("ethereum".to_string()); let composition = TestBodySpecification::with_image( Image::with_repository("serai-dev-ethereum").pull_policy(PullPolicy::Never), ) .set_start_policy(StartPolicy::Strict) .set_log_options(Some(LogOptions { action: LogAction::Forward, policy: LogPolicy::OnError, source: LogSource::Both, })) .set_publish_all_ports(true); let mut test = DockerTest::new(); test.provide_container(composition); test } async fn ethereum( ops: &DockerOperations, ) -> impl Fn(MemDb) -> Pin>>> { use std::sync::Arc; use ethereum_serai::{ alloy::{ primitives::U256, simple_request_transport::SimpleRequest, rpc_client::ClientBuilder, provider::{Provider, RootProvider}, }, deployer::Deployer, }; let handle = ops.handle("serai-dev-ethereum").host_port(8545).unwrap(); let url = format!("http://{}:{}", handle.0, handle.1); tokio::time::sleep(core::time::Duration::from_secs(15)).await; { let provider = Arc::new(RootProvider::new( ClientBuilder::default().transport(SimpleRequest::new(url.clone()), true), )); provider.raw_request::<_, ()>("evm_setAutomine".into(), [false]).await.unwrap(); provider.raw_request::<_, ()>("anvil_mine".into(), [96]).await.unwrap(); // Perform deployment { // Make sure the Deployer constructor returns None, as it doesn't exist yet assert!(Deployer::new(provider.clone()).await.unwrap().is_none()); // Deploy the Deployer let tx = Deployer::deployment_tx(); provider .raw_request::<_, ()>( "anvil_setBalance".into(), [ tx.recover_signer().unwrap().to_string(), (U256::from(tx.tx().gas_limit) * U256::from(tx.tx().gas_price)).to_string(), ], ) .await .unwrap(); let (tx, sig, _) = tx.into_parts(); let mut bytes = vec![]; tx.encode_with_signature_fields(&sig, &mut bytes); let pending_tx = provider.send_raw_transaction(&bytes).await.unwrap(); provider.raw_request::<_, ()>("anvil_mine".into(), [96]).await.unwrap(); //tokio::time::sleep(core::time::Duration::from_secs(15)).await; let receipt = pending_tx.get_receipt().await.unwrap(); assert!(receipt.status()); let _ = Deployer::new(provider.clone()) .await .expect("network error") .expect("deployer wasn't deployed"); } } move |db| { let url = url.clone(); Box::pin(async move { { let db = db.clone(); let url = url.clone(); // Spawn a task to deploy the proper Router when the time comes tokio::spawn(async move { let key = loop { let Some(key) = crate::key_gen::NetworkKeyDb::get(&db, Session(0)) else { tokio::time::sleep(core::time::Duration::from_secs(1)).await; continue; }; break ethereum_serai::crypto::PublicKey::new( Secp256k1::read_G(&mut key.as_slice()).unwrap(), ) .unwrap(); }; let provider = Arc::new(RootProvider::new( ClientBuilder::default().transport(SimpleRequest::new(url.clone()), true), )); let deployer = Deployer::new(provider.clone()).await.unwrap().unwrap(); let mut tx = deployer.deploy_router(&key); tx.gas_limit = 1_000_000u64; tx.gas_price = 1_000_000_000u64.into(); let tx = ethereum_serai::crypto::deterministically_sign(&tx); provider .raw_request::<_, ()>( "anvil_setBalance".into(), [ tx.recover_signer().unwrap().to_string(), (U256::from(tx.tx().gas_limit) * U256::from(tx.tx().gas_price)).to_string(), ], ) .await .unwrap(); let (tx, sig, _) = tx.into_parts(); let mut bytes = vec![]; tx.encode_with_signature_fields(&sig, &mut bytes); let pending_tx = provider.send_raw_transaction(&bytes).await.unwrap(); provider.raw_request::<_, ()>("anvil_mine".into(), [96]).await.unwrap(); let receipt = pending_tx.get_receipt().await.unwrap(); assert!(receipt.status()); let _router = deployer.find_router(provider.clone(), &key).await.unwrap().unwrap(); }); } Ethereum::new(db, url.clone(), String::new()).await }) } } test_network!( Ethereum, spawn_ethereum, ethereum, ethereum_key_gen, ethereum_scanner, ethereum_no_deadlock_in_multisig_completed, ethereum_signer, ethereum_wallet, ); } ================================================ FILE: processor/src/tests/mod.rs ================================================ use std::sync::OnceLock; mod key_gen; mod scanner; mod signer; pub(crate) use signer::sign; mod cosigner; mod batch_signer; mod wallet; mod addresses; // Effective Once static INIT_LOGGER_CELL: OnceLock<()> = OnceLock::new(); fn init_logger() { *INIT_LOGGER_CELL.get_or_init(env_logger::init) } #[macro_export] macro_rules! test_network { ( $N: ty, $docker: ident, $network: ident, $key_gen: ident, $scanner: ident, $no_deadlock_in_multisig_completed: ident, $signer: ident, $wallet: ident, ) => { use core::{pin::Pin, future::Future}; use $crate::tests::{ init_logger, key_gen::test_key_gen, scanner::{test_scanner, test_no_deadlock_in_multisig_completed}, signer::test_signer, wallet::test_wallet, }; // This doesn't interact with a node and accordingly doesn't need to be spawn one #[tokio::test] async fn $key_gen() { init_logger(); test_key_gen::<$N>(); } #[test] fn $scanner() { init_logger(); let docker = $docker(); docker.run(|ops| async move { let new_network = $network(&ops).await; test_scanner(new_network).await; }); } #[test] fn $no_deadlock_in_multisig_completed() { init_logger(); let docker = $docker(); docker.run(|ops| async move { let new_network = $network(&ops).await; test_no_deadlock_in_multisig_completed(new_network).await; }); } #[test] fn $signer() { init_logger(); let docker = $docker(); docker.run(|ops| async move { let new_network = $network(&ops).await; test_signer(new_network).await; }); } #[test] fn $wallet() { init_logger(); let docker = $docker(); docker.run(|ops| async move { let new_network = $network(&ops).await; test_wallet(new_network).await; }); } }; } #[macro_export] macro_rules! test_utxo_network { ( $N: ty, $docker: ident, $network: ident, $key_gen: ident, $scanner: ident, $no_deadlock_in_multisig_completed: ident, $signer: ident, $wallet: ident, $addresses: ident, ) => { use $crate::tests::addresses::test_addresses; test_network!( $N, $docker, $network, $key_gen, $scanner, $no_deadlock_in_multisig_completed, $signer, $wallet, ); #[test] fn $addresses() { init_logger(); let docker = $docker(); docker.run(|ops| async move { let new_network = $network(&ops).await; test_addresses(new_network).await; }); } }; } mod literal; ================================================ FILE: processor/src/tests/scanner.rs ================================================ use core::{pin::Pin, time::Duration, future::Future}; use std::sync::Arc; use rand_core::OsRng; use ciphersuite::{group::GroupEncoding, Ciphersuite}; use frost::{Participant, tests::key_gen}; use tokio::{sync::Mutex, time::timeout}; use serai_db::{DbTxn, Db, MemDb}; use serai_client::validator_sets::primitives::Session; use crate::{ networks::{OutputType, Output, Block, Network}, key_gen::NetworkKeyDb, multisigs::scanner::{ScannerEvent, Scanner, ScannerHandle}, }; pub async fn new_scanner( network: &N, db: &D, group_key: ::G, first: &Arc>, ) -> ScannerHandle { let activation_number = network.get_latest_block_number().await.unwrap(); let mut db = db.clone(); let (mut scanner, current_keys) = Scanner::new(network.clone(), db.clone()); let mut first = first.lock().await; if *first { assert!(current_keys.is_empty()); let mut txn = db.txn(); scanner.register_key(&mut txn, activation_number, group_key).await; txn.commit(); for _ in 0 .. N::CONFIRMATIONS { network.mine_block().await; } *first = false; } else { assert_eq!(current_keys.len(), 1); } scanner } pub async fn test_scanner( new_network: impl Fn(MemDb) -> Pin>>, ) { let mut keys = frost::tests::key_gen::<_, N::Curve>(&mut OsRng).remove(&Participant::new(1).unwrap()).unwrap(); N::tweak_keys(&mut keys); let group_key = keys.group_key(); let mut db = MemDb::new(); { let mut txn = db.txn(); NetworkKeyDb::set(&mut txn, Session(0), &group_key.to_bytes().as_ref().to_vec()); txn.commit(); } let network = new_network(db.clone()).await; // Mine blocks so there's a confirmed block for _ in 0 .. N::CONFIRMATIONS { network.mine_block().await; } let first = Arc::new(Mutex::new(true)); let scanner = new_scanner(&network, &db, group_key, &first).await; // Receive funds let block = network.test_send(N::external_address(&network, keys.group_key()).await).await; let block_id = block.id(); // Verify the Scanner picked them up let verify_event = |mut scanner: ScannerHandle| async { let outputs = match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() { ScannerEvent::Block { is_retirement_block, block, outputs } => { scanner.multisig_completed.send(false).unwrap(); assert!(!is_retirement_block); assert_eq!(block, block_id); assert_eq!(outputs.len(), 1); assert_eq!(outputs[0].kind(), OutputType::External); outputs } ScannerEvent::Completed(_, _, _, _, _) => { panic!("unexpectedly got eventuality completion"); } }; (scanner, outputs) }; let (mut scanner, outputs) = verify_event(scanner).await; // Create a new scanner off the current DB and verify it re-emits the above events verify_event(new_scanner(&network, &db, group_key, &first).await).await; // Acknowledge the block let mut cloned_db = db.clone(); let mut txn = cloned_db.txn(); assert_eq!(scanner.ack_block(&mut txn, block_id).await.1, outputs); scanner.release_lock().await; txn.commit(); // There should be no more events assert!(timeout(Duration::from_secs(30), scanner.events.recv()).await.is_err()); // Create a new scanner off the current DB and make sure it also does nothing assert!(timeout( Duration::from_secs(30), new_scanner(&network, &db, group_key, &first).await.events.recv() ) .await .is_err()); } pub async fn test_no_deadlock_in_multisig_completed( new_network: impl Fn(MemDb) -> Pin>>, ) { // This test scans two blocks then acknowledges one, yet a network with one confirm won't scan // two blocks before the first is acknowledged (due to the look-ahead limit) if N::CONFIRMATIONS <= 1 { return; } let mut db = MemDb::new(); let network = new_network(db.clone()).await; // Mine blocks so there's a confirmed block for _ in 0 .. N::CONFIRMATIONS { network.mine_block().await; } let (mut scanner, current_keys) = Scanner::new(network.clone(), db.clone()); assert!(current_keys.is_empty()); // Register keys to cause Block events at CONFIRMATIONS (dropped since first keys), // CONFIRMATIONS + 1, and CONFIRMATIONS + 2 for i in 0 .. 3 { let key = { let mut keys = key_gen(&mut OsRng); for keys in keys.values_mut() { N::tweak_keys(keys); } let key = keys[&Participant::new(1).unwrap()].group_key(); if i == 0 { let mut txn = db.txn(); NetworkKeyDb::set(&mut txn, Session(0), &key.to_bytes().as_ref().to_vec()); txn.commit(); // Sleep for 5 seconds as setting the Network key value will trigger an async task for // Ethereum tokio::time::sleep(Duration::from_secs(5)).await; } key }; let mut txn = db.txn(); scanner .register_key( &mut txn, network.get_latest_block_number().await.unwrap() + N::CONFIRMATIONS + i, key, ) .await; txn.commit(); } for _ in 0 .. (3 * N::CONFIRMATIONS) { network.mine_block().await; } // Block for the second set of keys registered let block_id = match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() { ScannerEvent::Block { is_retirement_block, block, outputs: _ } => { scanner.multisig_completed.send(false).unwrap(); assert!(!is_retirement_block); block } ScannerEvent::Completed(_, _, _, _, _) => { panic!("unexpectedly got eventuality completion"); } }; // Block for the third set of keys registered match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() { ScannerEvent::Block { .. } => {} ScannerEvent::Completed(_, _, _, _, _) => { panic!("unexpectedly got eventuality completion"); } }; // The ack_block acquisition shows the Scanner isn't maintaining the lock on its own thread after // emitting the Block event // TODO: This is incomplete. Also test after emitting Completed let mut txn = db.txn(); assert_eq!(scanner.ack_block(&mut txn, block_id).await.1, vec![]); scanner.release_lock().await; txn.commit(); scanner.multisig_completed.send(false).unwrap(); } ================================================ FILE: processor/src/tests/signer.rs ================================================ use core::{pin::Pin, future::Future}; use std::collections::HashMap; use rand_core::{RngCore, OsRng}; use ciphersuite::group::GroupEncoding; use frost::{ Participant, ThresholdKeys, tests::{key_gen, clone_without}, }; use serai_db::{DbTxn, Db, MemDb}; use serai_client::{ primitives::{ExternalNetworkId, ExternalCoin, Amount, ExternalBalance}, validator_sets::primitives::Session, }; use messages::sign::*; use crate::{ Payment, networks::{Output, Transaction, Eventuality, Network}, key_gen::NetworkKeyDb, multisigs::scheduler::Scheduler, signer::Signer, }; #[allow(clippy::type_complexity)] pub async fn sign( network: N, session: Session, mut keys_txs: HashMap< Participant, (ThresholdKeys, (N::SignableTransaction, N::Eventuality)), >, ) -> ::Claim { let actual_id = SignId { session, id: [0xaa; 32], attempt: 0 }; let mut keys = HashMap::new(); let mut txs = HashMap::new(); for (i, (these_keys, this_tx)) in keys_txs.drain() { keys.insert(i, these_keys); txs.insert(i, this_tx); } let mut signers = HashMap::new(); let mut dbs = HashMap::new(); let mut t = 0; for i in 1 ..= keys.len() { let i = Participant::new(u16::try_from(i).unwrap()).unwrap(); let keys = keys.remove(&i).unwrap(); t = keys.params().t(); signers.insert(i, Signer::<_, MemDb>::new(network.clone(), Session(0), vec![keys])); dbs.insert(i, MemDb::new()); } drop(keys); let mut signing_set = vec![]; while signing_set.len() < usize::from(t) { let candidate = Participant::new( u16::try_from((OsRng.next_u64() % u64::try_from(signers.len()).unwrap()) + 1).unwrap(), ) .unwrap(); if signing_set.contains(&candidate) { continue; } signing_set.push(candidate); } let mut preprocesses = HashMap::new(); let mut eventuality = None; for i in 1 ..= signers.len() { let i = Participant::new(u16::try_from(i).unwrap()).unwrap(); let (tx, this_eventuality) = txs.remove(&i).unwrap(); let mut txn = dbs.get_mut(&i).unwrap().txn(); match signers .get_mut(&i) .unwrap() .sign_transaction(&mut txn, actual_id.id, tx, &this_eventuality) .await { // All participants should emit a preprocess Some(ProcessorMessage::Preprocess { id, preprocesses: mut these_preprocesses }) => { assert_eq!(id, actual_id); assert_eq!(these_preprocesses.len(), 1); if signing_set.contains(&i) { preprocesses.insert(i, these_preprocesses.swap_remove(0)); } } _ => panic!("didn't get preprocess back"), } txn.commit(); if eventuality.is_none() { eventuality = Some(this_eventuality.clone()); } assert_eq!(eventuality, Some(this_eventuality)); } let mut shares = HashMap::new(); for i in &signing_set { let mut txn = dbs.get_mut(i).unwrap().txn(); match signers .get_mut(i) .unwrap() .handle( &mut txn, CoordinatorMessage::Preprocesses { id: actual_id.clone(), preprocesses: clone_without(&preprocesses, i), }, ) .await .unwrap() { ProcessorMessage::Share { id, shares: mut these_shares } => { assert_eq!(id, actual_id); assert_eq!(these_shares.len(), 1); shares.insert(*i, these_shares.swap_remove(0)); } _ => panic!("didn't get share back"), } txn.commit(); } let mut tx_id = None; for i in &signing_set { let mut txn = dbs.get_mut(i).unwrap().txn(); match signers .get_mut(i) .unwrap() .handle( &mut txn, CoordinatorMessage::Shares { id: actual_id.clone(), shares: clone_without(&shares, i) }, ) .await .unwrap() { ProcessorMessage::Completed { session, id, tx } => { assert_eq!(session, Session(0)); assert_eq!(id, actual_id.id); if tx_id.is_none() { tx_id = Some(tx.clone()); } assert_eq!(tx_id, Some(tx)); } _ => panic!("didn't get TX back"), } txn.commit(); } let mut typed_claim = ::Claim::default(); typed_claim.as_mut().copy_from_slice(tx_id.unwrap().as_ref()); assert!(network.check_eventuality_by_claim(&eventuality.unwrap(), &typed_claim).await); typed_claim } pub async fn test_signer( new_network: impl Fn(MemDb) -> Pin>>, ) { let mut keys = key_gen(&mut OsRng); for keys in keys.values_mut() { N::tweak_keys(keys); } let key = keys[&Participant::new(1).unwrap()].group_key(); let mut db = MemDb::new(); { let mut txn = db.txn(); NetworkKeyDb::set(&mut txn, Session(0), &key.to_bytes().as_ref().to_vec()); txn.commit(); } let network = new_network(db.clone()).await; let outputs = network .get_outputs(&network.test_send(N::external_address(&network, key).await).await, key) .await; let sync_block = network.get_latest_block_number().await.unwrap() - N::CONFIRMATIONS; let amount = (2 * N::DUST) + 1000; let plan = { let mut txn = db.txn(); let mut scheduler = N::Scheduler::new::(&mut txn, key, N::NETWORK); let payments = vec![Payment { address: N::external_address(&network, key).await, data: None, balance: ExternalBalance { coin: match N::NETWORK { ExternalNetworkId::Bitcoin => ExternalCoin::Bitcoin, ExternalNetworkId::Ethereum => ExternalCoin::Ether, ExternalNetworkId::Monero => ExternalCoin::Monero, }, amount: Amount(amount), }, }]; let mut plans = scheduler.schedule::(&mut txn, outputs.clone(), payments, key, false); assert_eq!(plans.len(), 1); plans.swap_remove(0) }; let mut keys_txs = HashMap::new(); let mut eventualities = vec![]; for (i, keys) in keys.drain() { let (signable, eventuality) = network.prepare_send(sync_block, plan.clone(), 0).await.unwrap().tx.unwrap(); eventualities.push(eventuality.clone()); keys_txs.insert(i, (keys, (signable, eventuality))); } let claim = sign(network.clone(), Session(0), keys_txs).await; // Mine a block, and scan it, to ensure that the TX actually made it on chain network.mine_block().await; let block_number = network.get_latest_block_number().await.unwrap(); let tx = network.get_transaction_by_eventuality(block_number, &eventualities[0]).await; let outputs = network .get_outputs( &network.get_block(network.get_latest_block_number().await.unwrap()).await.unwrap(), key, ) .await; // Don't run if Ethereum as the received output will revert by the contract // (and therefore not actually exist) if N::NETWORK != ExternalNetworkId::Ethereum { assert_eq!(outputs.len(), 1 + usize::from(u8::from(plan.change.is_some()))); // Adjust the amount for the fees let amount = amount - tx.fee(&network).await; if plan.change.is_some() { // Check either output since Monero will randomize its output order assert!( (outputs[0].balance().amount.0 == amount) || (outputs[1].balance().amount.0 == amount) ); } else { assert!(outputs[0].balance().amount.0 == amount); } } // Check the eventualities pass for eventuality in eventualities { let completion = network.confirm_completion(&eventuality, &claim).await.unwrap().unwrap(); assert_eq!(N::Eventuality::claim(&completion), claim); } } ================================================ FILE: processor/src/tests/wallet.rs ================================================ use core::{time::Duration, pin::Pin, future::Future}; use std::collections::HashMap; use rand_core::OsRng; use ciphersuite::group::GroupEncoding; use frost::{Participant, tests::key_gen}; use tokio::time::timeout; use serai_db::{DbTxn, Db, MemDb}; use serai_client::{ primitives::{ExternalNetworkId, ExternalCoin, Amount, ExternalBalance}, validator_sets::primitives::Session, }; use crate::{ Payment, Plan, networks::{Output, Transaction, Eventuality, Block, Network}, key_gen::NetworkKeyDb, multisigs::{ scanner::{ScannerEvent, Scanner}, scheduler::{self, Scheduler}, }, tests::sign, }; // Tests the Scanner, Scheduler, and Signer together pub async fn test_wallet( new_network: impl Fn(MemDb) -> Pin>>, ) { let mut keys = key_gen(&mut OsRng); for keys in keys.values_mut() { N::tweak_keys(keys); } let key = keys[&Participant::new(1).unwrap()].group_key(); let mut db = MemDb::new(); { let mut txn = db.txn(); NetworkKeyDb::set(&mut txn, Session(0), &key.to_bytes().as_ref().to_vec()); txn.commit(); } let network = new_network(db.clone()).await; // Mine blocks so there's a confirmed block for _ in 0 .. N::CONFIRMATIONS { network.mine_block().await; } let (mut scanner, current_keys) = Scanner::new(network.clone(), db.clone()); assert!(current_keys.is_empty()); let (block_id, outputs) = { let mut txn = db.txn(); scanner.register_key(&mut txn, network.get_latest_block_number().await.unwrap(), key).await; txn.commit(); for _ in 0 .. N::CONFIRMATIONS { network.mine_block().await; } let block = network.test_send(N::external_address(&network, key).await).await; let block_id = block.id(); match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() { ScannerEvent::Block { is_retirement_block, block, outputs } => { scanner.multisig_completed.send(false).unwrap(); assert!(!is_retirement_block); assert_eq!(block, block_id); assert_eq!(outputs.len(), 1); (block_id, outputs) } ScannerEvent::Completed(_, _, _, _, _) => { panic!("unexpectedly got eventuality completion"); } } }; let mut txn = db.txn(); assert_eq!(scanner.ack_block(&mut txn, block_id.clone()).await.1, outputs); scanner.release_lock().await; txn.commit(); let mut txn = db.txn(); let mut scheduler = N::Scheduler::new::(&mut txn, key, N::NETWORK); let amount = 2 * N::DUST; let plans = scheduler.schedule::( &mut txn, outputs.clone(), vec![Payment { address: N::external_address(&network, key).await, data: None, balance: ExternalBalance { coin: match N::NETWORK { ExternalNetworkId::Bitcoin => ExternalCoin::Bitcoin, ExternalNetworkId::Ethereum => ExternalCoin::Ether, ExternalNetworkId::Monero => ExternalCoin::Monero, }, amount: Amount(amount), }, }], key, false, ); txn.commit(); assert_eq!(plans.len(), 1); assert_eq!(plans[0].key, key); if std::any::TypeId::of::() == std::any::TypeId::of::>() { assert_eq!(plans[0].inputs, vec![]); } else { assert_eq!(plans[0].inputs, outputs); } assert_eq!( plans[0].payments, vec![Payment { address: N::external_address(&network, key).await, data: None, balance: ExternalBalance { coin: match N::NETWORK { ExternalNetworkId::Bitcoin => ExternalCoin::Bitcoin, ExternalNetworkId::Ethereum => ExternalCoin::Ether, ExternalNetworkId::Monero => ExternalCoin::Monero, }, amount: Amount(amount), } }] ); assert_eq!(plans[0].change, N::change_address(key)); { let mut buf = vec![]; plans[0].write(&mut buf).unwrap(); assert_eq!(plans[0], Plan::::read::<&[u8]>(&mut buf.as_ref()).unwrap()); } // Execute the plan let mut keys_txs = HashMap::new(); let mut eventualities = vec![]; for (i, keys) in keys.drain() { let (signable, eventuality) = network .prepare_send(network.get_block_number(&block_id).await, plans[0].clone(), 0) .await .unwrap() .tx .unwrap(); eventualities.push(eventuality.clone()); keys_txs.insert(i, (keys, (signable, eventuality))); } let claim = sign(network.clone(), Session(0), keys_txs).await; network.mine_block().await; let block_number = network.get_latest_block_number().await.unwrap(); let tx = network.get_transaction_by_eventuality(block_number, &eventualities[0]).await; let block = network.get_block(block_number).await.unwrap(); let outputs = network.get_outputs(&block, key).await; // Don't run if Ethereum as the received output will revert by the contract // (and therefore not actually exist) if N::NETWORK != ExternalNetworkId::Ethereum { assert_eq!(outputs.len(), 1 + usize::from(u8::from(plans[0].change.is_some()))); // Adjust the amount for the fees let amount = amount - tx.fee(&network).await; if plans[0].change.is_some() { // Check either output since Monero will randomize its output order assert!( (outputs[0].balance().amount.0 == amount) || (outputs[1].balance().amount.0 == amount) ); } else { assert!(outputs[0].balance().amount.0 == amount); } } for eventuality in eventualities { let completion = network.confirm_completion(&eventuality, &claim).await.unwrap().unwrap(); assert_eq!(N::Eventuality::claim(&completion), claim); } for _ in 1 .. N::CONFIRMATIONS { network.mine_block().await; } if N::NETWORK != ExternalNetworkId::Ethereum { match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() { ScannerEvent::Block { is_retirement_block, block: block_id, outputs: these_outputs } => { scanner.multisig_completed.send(false).unwrap(); assert!(!is_retirement_block); assert_eq!(block_id, block.id()); assert_eq!(these_outputs, outputs); } ScannerEvent::Completed(_, _, _, _, _) => { panic!("unexpectedly got eventuality completion"); } } // Check the Scanner DB can reload the outputs let mut txn = db.txn(); assert_eq!(scanner.ack_block(&mut txn, block.id()).await.1, outputs); scanner.release_lock().await; txn.commit(); } } ================================================ FILE: rust-toolchain.toml ================================================ [toolchain] channel = "1.89" targets = ["wasm32v1-none"] profile = "minimal" components = ["rust-src", "rustfmt", "clippy"] ================================================ FILE: spec/DKG Exclusions.md ================================================ Upon an issue with the DKG, the honest validators must remove the malicious validators. Ideally, a threshold signature would be used, yet that would require a threshold key (which would require authentication by a MuSig signature). A MuSig signature which specifies the signing set (or rather, the excluded signers) achieves the most efficiency. While that resolves the on-chain behavior, the Tributary also has to perform exclusion. This has the following forms: 1) Rejecting further transactions (required) 2) Rejecting further participation in Tendermint With regards to rejecting further participation in Tendermint, it's *ideal* to remove the validator from the list of validators. Each validator removed from participation, yet not from the list of validators, increases the likelihood of the network failing to form consensus. With regards to the economic security, an honest 67% may remove a faulty (explicitly or simply offline) 33%, letting 67% of the remaining 67% (4/9ths) take control of the associated private keys. In such a case, the malicious parties are defined as the 4/9ths of validators with access to the private key and the 33% removed (who together form >67% of the originally intended validator set and have presumably provided enough stake to cover losses). ================================================ FILE: spec/Getting Started.md ================================================ # Getting Started ### Dependencies ##### Ubuntu ``` sudo apt-get install -y build-essential clang-11 pkg-config cmake git curl protobuf-compiler ``` ### Install rustup ##### Linux ``` curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh ``` ##### macOS ``` brew install rustup ``` ### Install Rust ``` rustup update rustup toolchain install stable rustup target add wasm32v1-none rustup toolchain install nightly rustup target add wasm32v1-none --toolchain nightly ``` ### Install Solidity with `svm` ``` cargo install svm-rs svm install 0.8.26 svm use 0.8.26 ``` ### Install foundry (for tests) ``` curl -L https://foundry.paradigm.xyz | bash foundryup ``` ### Clone and Build Serai ``` git clone https://github.com/serai-dex/serai cd serai cargo build --release --all-features ``` ### Run Tests Running tests requires: - [A rootless Docker setup](https://docs.docker.com/engine/security/rootless/) - A properly configured Bitcoin regtest node (available via Docker) - A properly configured Monero regtest node (available via Docker) - A properly configured monero-wallet-rpc instance (available via Docker) To start the required daemons, one may run: ``` cargo run -p serai-orchestrator -- key_gen dev cargo run -p serai-orchestrator -- setup dev ``` and then: ``` cargo run -p serai-orchestrator -- start dev bitcoin-daemon monero-daemon monero-wallet-rpc ``` Finally, to run the tests: ``` cargo test --all-features ``` ================================================ FILE: spec/Serai.md ================================================ # Serai Serai is a decentralized execution layer whose validators form multisig wallets for various connected networks, offering secure decentralized control of foreign coins to applications built on it. Serai is exemplified by Serai DEX, an automated-market-maker (AMM) decentralized exchange, allowing swapping Bitcoin, Ether, DAI, and Monero. It is the premier application of Serai. ### Substrate Serai is based on [Substrate](https://docs.substrate.io), a blockchain framework offering a robust infrastructure. ================================================ FILE: spec/coordinator/Coordinator.md ================================================ # Coordinator The coordinator is a service which communicates with all of the processors, all of the other coordinators over a secondary P2P network, and with the Serai node. This document primarily details its flow with regards to the Serai node and processor. ### New Set Event On `validator_sets::pallet::Event::NewSet`, the coordinator spawns a tributary for the new set. It additionally sends the processor `key_gen::CoordinatorMessage::GenerateKey`. ### Key Generation Event On `validator_sets::pallet::Event::KeyGen`, the coordinator sends `substrate::CoordinatorMessage::ConfirmKeyPair` to the processor. ### Batch On `substrate::ProcessorMessage::Batch`, the coordinator notes what the on-chain `Batch` should be, for verification once published. ### SignedBatch On `substrate::ProcessorMessage::SignedBatch`, the coordinator publishes an unsigned transaction containing the signed batch to the Serai blockchain. ### Sign Completed On `sign::ProcessorMessage::Completed`, the coordinator makes a tributary transaction containing the transaction hash the signing process was supposedly completed with. Due to rushing adversaries, the actual transaction completing the plan may be distinct on-chain. These messages solely exist to coordinate the signing process, not to determine chain state. ================================================ FILE: spec/coordinator/Tributary.md ================================================ # Tributary A tributary is a side-chain, created for a specific multisig instance, used as a verifiable broadcast layer. ## Transactions ### Key Gen Commitments `DkgCommitments` is created when a processor sends the coordinator `key_gen::ProcessorMessage::Commitments`. When all validators participating in a multisig publish `DkgCommitments`, the coordinator sends the processor `key_gen::CoordinatorMessage::Commitments`, excluding the processor's own commitments. ### Key Gen Shares `DkgShares` is created when a processor sends the coordinator `key_gen::ProcessorMessage::Shares`. The coordinator additionally includes its own pair of MuSig nonces, used in a signing protocol to inform Substrate of the key's successful creation. When all validators participating in a multisig publish `DkgShares`, the coordinator sends the processor `key_gen::CoordinatorMessage::Shares`, excluding the processor's own shares and the MuSig nonces. ### Key Gen Confirmation `DkgConfirmed` is created when a processor sends the coordinator `key_gen::ProcessorMessage::GeneratedKeyPair`. The coordinator takes the MuSig nonces they prior associated with this DKG attempt and publishes their signature share. When all validators participating in the multisig publish `DkgConfirmed`, an extrinsic calling `validator_sets::pallet::set_keys` is made to confirm the keys. Setting the keys on the Serai blockchain as such lets it receive `Batch`s, provides a BFT consensus guarantee, and enables accessibility by users. While the tributary itself could offer both the BFT consensus guarantee, and verifiable accessibility to users, they'd both require users access the tributary. Since Substrate must already know the resulting key, there's no value to usage of the tributary as-such, as all desired properties are already offered by Substrate. Note that the keys are confirmed when Substrate emits a `KeyGen` event, regardless of if the Tributary has the expected `DkgConfirmed` transactions. ### Batch When *TODO*, a `Batch` transaction is provided. This is used to have the group acknowledge and synchronize around a batch, without the overhead of voting in its acknowledgment. When a `Batch` transaction is included, participants are allowed to publish transactions to produce a threshold signature for the batch synchronized over. ### Substrate Block `SubstrateBlock` is provided when the processor sends the coordinator `substrate::ProcessorMessage::SubstrateBlockAck`. When a `SubstrateBlock` transaction is included, participants are allowed to publish transactions for the signing protocols it causes. ### Batch Preprocess `BatchPreprocess` is created when a processor sends the coordinator `coordinator::ProcessorMessage::BatchPreprocess` and an `Batch` transaction allowing the batch to be signed has already been included on chain. When `t` validators have published `BatchPreprocess` transactions, if the coordinator represents one of the first `t` validators to do so, a `coordinator::ProcessorMessage::BatchPreprocesses` is sent to the processor, excluding the processor's own preprocess. ### Batch Share `BatchShare` is created when a processor sends the coordinator `coordinator::ProcessorMessage::BatchShare`. The relevant `Batch` transaction having already been included on chain follows from `coordinator::ProcessorMessage::BatchShare` being a response to a message which also has that precondition. When the `t` validators who first published `BatchPreprocess` transactions have published `BatchShare` transactions, if the coordinator represents one of the first `t` validators to do so, a `coordinator::ProcessorMessage::BatchShares` with the relevant shares (excluding the processor's own) is sent to the processor. ### Sign Preprocess `SignPreprocess` is created when a processor sends the coordinator `sign::ProcessorMessage::Preprocess` and a `SubstrateBlock` transaction allowing the transaction to be signed has already been included on chain. When `t` validators have published `SignPreprocess` transactions, if the coordinator represents one of the first `t` validators to do so, a `sign::ProcessorMessage::Preprocesses` is sent to the processor, excluding the processor's own preprocess. ### Sign Share `SignShare` is created when a processor sends the coordinator `sign::ProcessorMessage::Share`. The relevant `SubstrateBlock` transaction having already been included on chain follows from `sign::ProcessorMessage::Share` being a response to a message which also has that precondition. When the `t` validators who first published `SignPreprocess` transactions have published `SignShare` transactions, if the coordinator represents one of the first `t` validators to do so, a `sign::ProcessorMessage::Shares` with the relevant shares (excluding the processor's own) is sent to the processor. ### Sign Completed `SignCompleted` is created when a processor sends the coordinator `sign::ProcessorMessage::Completed`. As soon as 34% of validators send `Completed`, the signing protocol is no longer further attempted. ## Re-attempts Key generation protocols may fail if a validator is malicious. Signing protocols, whether batch or transaction, may fail if a validator goes offline or takes too long to respond. Accordingly, the tributary will schedule re-attempts. These are communicated with `key_gen::CoordinatorMessage::GenerateKey`, `coordinator::CoordinatorMessage::BatchReattempt`, and `sign::CoordinatorMessage::Reattempt`. TODO: Document the re-attempt scheduling logic. ================================================ FILE: spec/cryptography/Distributed Key Generation.md ================================================ # Distributed Key Generation Serai uses a modification of Pedersen's Distributed Key Generation, which is actually Feldman's Verifiable Secret Sharing Scheme run by every participant, as described in the FROST paper. The modification included in FROST was to include a Schnorr Proof of Knowledge for coefficient zero, preventing rogue key attacks. This results in a two-round protocol. ### Encryption In order to protect the secret shares during communication, the `dkg` library establishes a public key for encryption at the start of a given protocol. Every encrypted message (such as the secret shares) then includes a per-message encryption key. These two keys are used in an Elliptic-curve Diffie-Hellman handshake to derive a shared key. This shared key is then hashed to obtain a key and IV for use in a ChaCha20 stream cipher instance, which is xor'd against a message to encrypt it. ### Blame Since each message has a distinct key attached, and accordingly a distinct shared key, it's possible to reveal the shared key for a specific message without revealing any other message's decryption keys. This is utilized when a participant misbehaves. A participant who receives an invalid encrypted message publishes its key, able to without concern for side effects, With the key published, all participants can decrypt the message in order to decide blame. While key reuse by a participant is considered as them revealing the messages themselves, and therefore out of scope, there is an attack where a malicious adversary claims another participant's encryption key. They'll fail to encrypt their message, and the recipient will issue a blame statement. This blame statement, intended to reveal the malicious adversary, also reveals the message by the participant whose keys were co-opted. To resolve this, a proof-of-possession is also included with encrypted messages, ensuring only those actually with per-message keys can claim to use them. ================================================ FILE: spec/cryptography/FROST.md ================================================ # FROST Serai implements [FROST](https://eprint.iacr.org/2020/852), as specified in [draft-irtf-cfrg-frost-11](https://datatracker.ietf.org/doc/draft-irtf-cfrg-frost/). ### Modularity In order to support other algorithms which decompose to Schnorr, our FROST implementation is generic, able to run any algorithm satisfying its `Algorithm` trait. With these algorithms, there's frequently a requirement for further transcripting than what FROST expects. Accordingly, the transcript format is also modular so formats which aren't naive like the IETF's can be used. ### Extensions In order to support algorithms which require their nonces be represented across multiple generators, FROST supports providing a nonce's commitments across multiple generators. In order to ensure their correctness, an extended [CP93's Discrete Log Equality Proof](https://chaum.com/wp-content/uploads/2021/12/Wallet_Databases.pdf) is used. The extension is simply to transcript `n` generators, instead of just two, enabling proving for all of them at once. Since FROST nonces are binomial, every nonce would require two DLEq proofs. To make this more efficient, we hash their commitments to obtain a binding factor, before doing a single DLEq proof for `d + be`, similar to how FROST calculates its nonces (as well as MuSig's key aggregation). As some algorithms require multiple nonces, effectively including multiple Schnorr signatures within one signature, the library also supports providing multiple nonces. The second component of a FROST nonce is intended to be multiplied by a per-participant binding factor to ensure the security of FROST. When additional nonces are used, this is actually a per-nonce per-participant binding factor. When multiple nonces are used, with multiple generators, we use a single DLEq proof for all nonces, merging their challenges. This provides a proof of `1 + n` elements instead of `2n`. Finally, to support additive offset signing schemes (accounts, stealth addresses, randomization), it's possible to specify a scalar offset for keys. The public key signed for is also offset by this value. During the signing process, the offset is explicitly transcripted. Then, the offset is added to the participant with the lowest ID. # Caching modular-frost supports caching a preprocess. This is done by having all preprocesses use a seeded RNG. Accordingly, the entire preprocess can be derived from the RNG seed, making the cache just the seed. Reusing preprocesses would enable a third-party to recover your private key share. Accordingly, you MUST not reuse preprocesses. Third-party knowledge of your preprocess would also enable their recovery of your private key share. Accordingly, you MUST treat cached preprocesses with the same security as your private key share. Since a reused seed will lead to a reused preprocess, seeded RNGs are generally frowned upon when doing multisignature operations. This isn't an issue as each new preprocess obtains a fresh seed from the specified RNG. Assuming the provided RNG isn't generating the same seed multiple times, the only way for this seeded RNG to fail is if a preprocess is loaded multiple times, which was already a failure point. ================================================ FILE: spec/integrations/Bitcoin.md ================================================ # Bitcoin ### Addresses Bitcoin addresses are an enum, defined as follows: - `p2pkh`: 20-byte hash. - `p2sh`: 20-byte hash. - `p2wpkh`: 20-byte hash. - `p2wsh`: 32-byte hash. - `p2tr`: 32-byte key. ### In Instructions Bitcoin In Instructions are present via the transaction's last output in the form of `OP_RETURN`, and accordingly limited to 80 bytes. `origin` is automatically set to the transaction's first input's address, if recognized. If it's not recognized, an address of the multisig's current Bitcoin address is used, causing any failure to become a donation. ### Out Instructions Out Instructions ignore `data`. ================================================ FILE: spec/integrations/Ethereum.md ================================================ # Ethereum ### Addresses Ethereum addresses are 20-byte hashes, identical to Ethereum proper. ### In Instructions In Instructions may be created in one of two ways. 1) Have an EOA call `transfer` or `transferFrom` on an ERC20, appending the encoded InInstruction directly after the calldata. `origin` defaults to the party transferred from. 2) Call `inInstruction` on the Router. `origin` defaults to `msg.sender`. ### Out Instructions `data` is limited to 512 bytes. If `data` isn't provided or is malformed, ETH transfers will execute with 5,000 gas and token transfers with 100,000 gas. If `data` is provided and well-formed, `destination` is ignored and the Ethereum Router will construct and call a new contract to proxy the contained calls. The transfer executes to the constructed contract as above, before the constructed contract is called with the calls inside `data`. The sandboxed execution has a gas limit of 350,000. ================================================ FILE: spec/integrations/Instructions.md ================================================ # Instructions Instructions are used to communicate with networks connected to Serai, and they come in two forms: - In Instructions are programmable specifications paired with incoming coins, encoded into transactions on connected networks. Serai will parse included instructions when it receives coins, executing the included specs. - Out Instructions detail how to transfer coins, either to a Serai address or an address native to the network of the coins in question. A transaction containing an In Instruction and an Out Instruction (to a native address) will receive coins to Serai and send coins from Serai, without requiring directly performing any transactions on Serai itself. All instructions are encoded under [Shorthand](#shorthand). Shorthand provides frequent use cases to create minimal data representations on connected networks. Instructions are interpreted according to their non-Serai network. Addresses have no validation performed unless otherwise noted. If the processor is instructed to act on invalid data, it will drop the entire instruction. ### Serialization Instructions are [SCALE](https://docs.substrate.io/reference/scale-codec/) encoded. ### In Instruction InInstruction is an enum of: - `Transfer` - `Dex(Data)` The specified target will be minted an appropriate amount of the respective Serai token. If `Dex`, the encoded call will be executed. ### Refundable In Instruction - `origin` (Option\): Address, from the network of origin, which sent coins in. - `instruction` (InInstruction): The action to perform with the incoming coins. Networks may automatically provide `origin`. If they do, the instruction may still provide `origin`, overriding the automatically provided value. If the instruction fails, coins are scheduled to be returned to `origin`, if provided. ### Out Instruction - `address` (ExternalAddress): Address to transfer the coins included with this instruction to. - `data` (Option): Data to include when transferring coins. No validation of external addresses/data is performed on-chain. If data is specified for a chain not supporting data, it is silently dropped. ### Destination Destination is an enum of SeraiAddress and OutInstruction. ### Shorthand Shorthand is an enum which expands to an Refundable In Instruction. ##### Raw Raw Shorthand contains a Refundable In Instruction directly. This is a verbose fallback option for infrequent use cases not covered by Shorthand. ##### Swap - `origin` (Option\): Refundable In Instruction's `origin`. - `coin` (Coin): Coin to swap funds for. - `minimum` (Amount): Minimum amount of `coin` to receive. - `out` (Destination): Final destination for funds. which expands to: ``` RefundableInInstruction { origin, instruction: InInstruction::Dex(swap(Incoming Asset, coin, minimum, out)), } ``` where `swap` is a function which: 1) Swaps the incoming funds for SRI. 2) Swaps the SRI for `coin`. 3) Checks the amount of `coin` received is greater than `minimum`. 4) Executes `out` with the amount of `coin` received. ##### Add Liquidity - `origin` (Option\): Refundable In Instruction's `origin`. - `minimum` (Amount): Minimum amount of SRI tokens to swap half for. - `gas` (Amount): Amount of SRI to send to `address` to cover gas in the future. - `address` (Address): Account to send the created liquidity tokens. which expands to: ``` RefundableInInstruction { origin, instruction: InInstruction::Dex( swap_and_add_liquidity(Incoming Asset, minimum, gas, address) ), } ``` where `swap_and_add_liquidity` is a function which: 1) Swaps half of the incoming funds for SRI. 2) Checks the amount of SRI received is greater than `minimum`. 3) Calls `swap_and_add_liquidity` with the amount of SRI received - `gas`, and a matching amount of the incoming coin. 4) Transfers any leftover funds to `address`. ================================================ FILE: spec/integrations/Monero.md ================================================ # Monero ### Addresses Monero addresses are structs, defined as follows: - `kind`: Enum { Standard, Subaddress, Featured { flags: u8 } } - `spend`: [u8; 32] - `view`: [u8; 32] Integrated addresses are not supported due to only being able to send to one per Monero transaction. Supporting them would add a level of complexity to Serai which isn't worth it. This definition of Featured Addresses is non-standard since the flags are represented by a u8, not a VarInt. Currently, only half of the bits are used, with no further planned features. Accordingly, it should be fine to fix its size. If needed, another enum entry for a 2-byte flags Featured Address could be added. This definition is also non-standard by not having a Payment ID field. This is per not supporting integrated addresses. ### In Instructions Monero In Instructions are present via `tx.extra`, specifically via inclusion in a `TX_EXTRA_NONCE` tag. The tag is followed by the VarInt length of its contents, and then additionally marked by a byte `127`. The following data is limited to 254 bytes. ### Out Instructions Out Instructions ignore `data`. ================================================ FILE: spec/policy/Canonical Chain.md ================================================ # Canonical Chain As Serai is a network connected to many external networks, at some point we will likely have to ask ourselves what the canonical chain for a network is. This document intends to establish soft, non-binding policy, in the hopes it'll guide most discussions on the matter. The canonical chain is the chain Serai follows and honors transactions on. Serai does not guarantee operations availability nor integrity on any chains other than the canonical chain. Which chain is considered canonical is dependent on several factors. ### Finalization Serai finalizes blocks from external networks onto itself. Once a block is finalized, it is considered irreversible. Accordingly, the primary tenet regarding what chain Serai will honor is the chain Serai has finalized. We can only assume the integrity of our coins on that chain. ### Node Software Only node software which passes a quality threshold and actively identifies as belonging to an external network's protocol should be run. Never should a transformative node (a node trying to create a new network from an existing one) be run in place of a node actually for the external network. Beyond active identification, it must have community recognition as belonging. If the majority of a community actively identifying as the network stands behind a hard fork, it should not be considered as a new network yet the next step of the existing one. If a hard fork breaks Serai's integrity, it should not be supported. Multiple independent nodes should be run in order to reduce the likelihood of vulnerabilities to any specific node's faults. ### Rollbacks Over time, various networks have rolled back in response to exploits. A rollback should undergo the same scrutiny as a hard fork. If the rollback breaks Serai's integrity, yet someone identifying as from the project offers to restore integrity out-of-band, integrity is considered kept so long as the offer is followed through on. Since a rollback would break Serai's finalization policy, a technical note on how it could be implemented is provided. Assume a blockchain from `0 .. 100` exists, with `100a ..= 500a` being rolled back blocks. The new chain extends from `99` with `100b ..= 200b`. Serai would define the canonical chain as `0 .. 100`, `100a ..= 500a`, `100b ..= 200b`, with `100b` building off `500a`. Serai would have to perform data-availability for `100a ..= 500a` (such as via a JSON file in-tree), and would have to modify the processor to edit its `Eventuality`s/UTXOs at `500a` back to the state at `99`. Any `Burn`s handled after `99` should be handled once again, if the transactions from `100a ..= 500a` cannot simply be carried over. ### On Fault If the canonical chain does put Serai's coins into an invalid state, irreversibly and without amends, then the discrepancy should be amortized to all users as feasible, yet affected operations should otherwise halt if under permanent duress. For example, if Serai lists a token which has a by-governance blacklist function, and is blacklisted without appeal, Serai should destroy all associated sriXYZ and cease operations. If a bug, either in the chain or in Serai's own code, causes a loss of 10% of coins (without amends), operations should halt until all outputs in system can have their virtual amount reduced by a total amount of the loss, proportionalized to each output. Alternatively, Serai could decrease all token balances by 10%. All liquidity/swap operations should be halted until users are given proper time to withdraw, if they so choose, before operations resume. ================================================ FILE: spec/processor/Multisig Rotation.md ================================================ # Multisig Rotation Substrate is expected to determine when a new validator set instance will be created, and with it, a new multisig. Upon the successful creation of a new multisig, as determined by the new multisig setting their key pair on Substrate, rotation begins. ### Timeline The following timeline is established: 1) The new multisig is created, and has its keys set on Serai. Once the next `Batch` with a new external network block is published, its block becomes the "queue block". The new multisig is set to activate at the "queue block", plus `CONFIRMATIONS` blocks (the "activation block"). We don't use the last `Batch`'s external network block, as that `Batch` may be older than `CONFIRMATIONS` blocks. Any yet-to-be-included-and-finalized `Batch` will be within `CONFIRMATIONS` blocks of what any processor has scanned however, as it'll wait for inclusion and finalization before continuing scanning. 2) Once the "activation block" itself has been finalized on Serai, UIs should start exclusively using the new multisig. If the "activation block" isn't finalized within `2 * CONFIRMATIONS` blocks, UIs should stop making transactions to any multisig on that network. Waiting for Serai's finalization prevents a UI from using an unfinalized "activation block" before a re-organization to a shorter chain. If a transaction to Serai was carried from the unfinalized "activation block" to the shorter chain, it'd no longer be after the "activation block" and accordingly would be ignored. We could not wait for Serai to finalize the block, yet instead wait for the block to have `CONFIRMATIONS` confirmations. This would prevent needing to wait for an indeterminate amount of time for Serai to finalize the "activation block", with the knowledge it should be finalized. Doing so would open UIs to eclipse attacks, where they live on an alternate chain where a possible "activation block" is finalized, yet Serai finalizes a distinct "activation block". If the alternate chain was longer than the finalized chain, the above issue would be reopened. The reason for UIs stopping under abnormal behavior is as follows. Given a sufficiently delayed `Batch` for the "activation block", UIs will use the old multisig past the point it will be deprecated. Accordingly, UIs must realize when `Batch`s are so delayed and continued transactions are a risk. While `2 * CONFIRMATIONS` is presumably well within the 6 hour period (defined below), that period exists for low-fee transactions at time of congestion. It does not exist for UIs with old state, though it can be used to compensate for them (reducing the tolerance for inclusion delays). `2 * CONFIRMATIONS` is before the 6 hour period is enacted, preserving the tolerance for inclusion delays, yet still should only happen under highly abnormal circumstances. In order to minimize the time it takes for "activation block" to be finalized, a `Batch` will always be created for it, regardless of it would otherwise have a `Batch` created. 3) The prior multisig continues handling `Batch`s and `Burn`s for `CONFIRMATIONS` blocks, plus 10 minutes, after the "activation block". The first `CONFIRMATIONS` blocks is due to the fact the new multisig shouldn't actually be sent coins during this period, making it irrelevant. If coins are prematurely sent to the new multisig, they're artificially delayed until the end of the `CONFIRMATIONS` blocks plus 10 minutes period. This prevents an adversary from minting Serai tokens using coins in the new multisig, yet then burning them to drain the prior multisig, creating a lack of liquidity for several blocks. The reason for the 10 minutes is to provide grace to honest UIs. Since UIs will wait until Serai confirms the "activation block" for keys before sending to them, which will take `CONFIRMATIONS` blocks plus some latency, UIs would make transactions to the prior multisig past the end of this period if it was `CONFIRMATIONS` alone. Since the next period is `CONFIRMATIONS` blocks, which is how long transactions take to confirm, transactions made past the end of this period would only received after the next period. After the next period, the prior multisig adds fees and a delay to all received funds (as it forwards the funds from itself to the new multisig). The 10 minutes provides grace for latency. The 10 minutes is a delay on anyone who immediately transitions to the new multisig, in a no latency environment, yet the delay is preferable to fees from forwarding. It also should be less than 10 minutes thanks to various latencies. 4) The prior multisig continues handling `Batch`s and `Burn`s for another `CONFIRMATIONS` blocks. This is for two reasons: 1) Coins sent to the new multisig still need time to gain sufficient confirmations. 2) All outputs belonging to the prior multisig should become available within `CONFIRMATIONS` blocks. All `Burn`s handled during this period should use the new multisig for the change address. This should effect a transfer of most outputs. With the expected transfer of most outputs, and the new multisig receiving new external transactions, the new multisig takes the responsibility of signing all unhandled and newly emitted `Burn`s. 5) For the next 6 hours, all non-`Branch` outputs received are immediately forwarded to the new multisig. Only external transactions to the new multisig are included in `Batch`s. The new multisig infers the `InInstruction`, and refund address, for forwarded `External` outputs via reading what they were for the original `External` output. Alternatively, the `InInstruction`, with refund address explicitly included, could be included in the forwarding transaction. This may fail if the `InInstruction` omitted the refund address and is too large to fit in a transaction with one explicitly included. On such failure, the refund would be immediately issued instead. 6) Once the 6 hour period has expired, the prior multisig stops handling outputs it didn't itself create. Any remaining `Eventuality`s are completed, and any available/freshly available outputs are forwarded (creating new `Eventuality`s which also need to successfully resolve). Once all the 6 hour period has expired, no `Eventuality`s remain, and all outputs are forwarded, the multisig publishes a final `Batch` of the first block, plus `CONFIRMATIONS`, which met these conditions, regardless of if it would've otherwise had a `Batch`. No further actions by it, nor its validators, are expected (unless, of course, those validators remain present in the new multisig). 7) The new multisig confirms all transactions from all prior multisigs were made as expected, including the reported `Batch`s. Unfortunately, we cannot solely check the immediately prior multisig due to the ability for two sequential malicious multisigs to steal. If multisig `n - 2` only transfers a fraction of its coins to multisig `n - 1`, multisig `n - 1` can 'honestly' operate on the dishonest state it was given, laundering it. This would let multisig `n - 1` forward the results of its as-expected operations from a dishonest starting point to the new multisig, and multisig `n` would attest to multisig `n - 1`'s expected (and therefore presumed honest) operations, assuming liability. This would cause an honest multisig to face full liability for the invalid state, causing it to be fully slashed (as needed to reacquire any lost coins). This would appear short-circuitable if multisig `n - 1` transfers coins exceeding the relevant Serai tokens' supply. Serai never expects to operate in an over-solvent state, yet balance should trend upwards due to a flat fee applied to each received output (preventing a griefing attack). Any balance greater than the tokens' supply may have had funds skimmed off the top, yet they'd still guarantee the solvency of Serai without any additional fees passed to users. Unfortunately, due to the requirement to verify the `Batch`s published (as else the Serai tokens' supply may be manipulated), this cannot actually be achieved (at least, not without a ZK proof the published `Batch`s were correct). 8) The new multisig publishes the next `Batch`, signifying the accepting of full responsibilities and a successful close of the prior multisig. ### Latency and Fees Slightly before the end of step 3, the new multisig should start receiving new external outputs. These won't be confirmed for another `CONFIRMATIONS` blocks, and the new multisig won't start handling `Burn`s for another `CONFIRMATIONS` blocks plus 10 minutes. Accordingly, the new multisig should only become responsible for `Burn`s shortly after it has taken ownership of the stream of newly received coins. Before it takes responsibility, it also should've been transferred all internal outputs under the standard scheduling flow. Any delayed outputs will be immediately forwarded, and external stragglers are only reported to Serai once sufficiently confirmed in the new multisig. Accordingly, liquidity should avoid fragmentation during rotation. The only latency should be on the 10 minutes present, and on delayed outputs, which should've been immediately usable, having to wait another `CONFIRMATIONS` blocks to be confirmed once forwarded. Immediate forwarding does unfortunately prevent batching inputs to reduce fees. Given immediate forwarding only applies to latent outputs, considered exceptional, and the protocol's fee handling ensures solvency, this is accepted. ================================================ FILE: spec/processor/Processor.md ================================================ # Processor The processor is a service which has an instance spawned per network. It is responsible for several tasks, from scanning an external network to signing transactions with payments. This document primarily discusses its flow with regards to the coordinator. ### Generate Key On `key_gen::CoordinatorMessage::GenerateKey`, the processor begins a pair of instances of the distributed key generation protocol specified in the FROST paper. The first instance is for a key to use on the external network. The second instance is for a Ristretto public key used to publish data to the Serai blockchain. This pair of FROST DKG instances is considered a single instance of Serai's overall key generation protocol. The commitments for both protocols are sent to the coordinator in a single `key_gen::ProcessorMessage::Commitments`. ### Key Gen Commitments On `key_gen::CoordinatorMessage::Commitments`, the processor continues the specified key generation instance. The secret shares for each fellow participant are sent to the coordinator in a `key_gen::ProcessorMessage::Shares`. #### Key Gen Shares On `key_gen::CoordinatorMessage::Shares`, the processor completes the specified key generation instance. The generated key pair is sent to the coordinator in a `key_gen::ProcessorMessage::GeneratedKeyPair`. ### Confirm Key Pair On `substrate::CoordinatorMessage::ConfirmKeyPair`, the processor starts using the newly confirmed key, scanning blocks on the external network for transfers to it. ### External Network Block When the external network has a new block, which is considered finalized (either due to being literally finalized or due to having a sufficient amount of confirmations), it's scanned. Outputs to the key of Serai's multisig are saved to the database. Outputs which newly transfer into Serai are used to build `Batch`s for the block. The processor then begins a threshold signature protocol with its key pair's Ristretto key to sign the `Batch`s. The `Batch`s are each sent to the coordinator in a `substrate::ProcessorMessage::Batch`, enabling the coordinator to know what `Batch`s *should* be published to Serai. After each `substrate::ProcessorMessage::Batch`, the preprocess for the first instance of its signing protocol is sent to the coordinator in a `coordinator::ProcessorMessage::BatchPreprocess`. As a design comment, we *may* be able to sign now possible, already scheduled, branch/leaf transactions at this point. Doing so would be giving a mutable borrow over the scheduler to both the external network and the Serai network, and would accordingly be unsafe. We may want to look at splitting the scheduler in two, in order to reduce latency (TODO). ### Batch Preprocesses On `coordinator::CoordinatorMessage::BatchPreprocesses`, the processor continues the specified batch signing protocol, sending `coordinator::ProcessorMessage::BatchShare` to the coordinator. ### Batch Shares On `coordinator::CoordinatorMessage::BatchShares`, the processor completes the specified batch signing protocol. If successful, the processor stops signing for this batch and sends `substrate::ProcessorMessage::SignedBatch` to the coordinator. ### Batch Re-attempt On `coordinator::CoordinatorMessage::BatchReattempt`, the processor will create a new instance of the batch signing protocol. The new protocol's preprocess is sent to the coordinator in a `coordinator::ProcessorMessage::BatchPreprocess`. ### Substrate Block On `substrate::CoordinatorMessage::SubstrateBlock`, the processor: 1) Marks all blocks, up to the external block now considered finalized by Serai, as having had their batches signed. 2) Adds the new outputs from newly finalized blocks to the scheduler, along with the necessary payments from `Burn` events on Serai. 3) Sends a `substrate::ProcessorMessage::SubstrateBlockAck`, containing the IDs of all plans now being signed for, to the coordinator. 4) Sends `sign::ProcessorMessage::Preprocess` for each plan now being signed for. ### Sign Preprocesses On `sign::CoordinatorMessage::Preprocesses`, the processor continues the specified transaction signing protocol, sending `sign::ProcessorMessage::Share` to the coordinator. ### Sign Shares On `sign::CoordinatorMessage::Shares`, the processor completes the specified transaction signing protocol. If successful, the processor stops signing for this transaction and publishes the signed transaction. Then, `sign::ProcessorMessage::Completed` is sent to the coordinator, to be broadcasted to all validators so everyone can observe the attempt completed, producing a signed and published transaction. ### Sign Re-attempt On `sign::CoordinatorMessage::Reattempt`, the processor will create a new a new instance of the transaction signing protocol if it hasn't already completed/observed completion of an instance of the signing protocol. The new protocol's preprocess is sent to the coordinator in a `sign::ProcessorMessage::Preprocess`. ### Sign Completed On `sign::CoordinatorMessage::Completed`, the processor verifies the included transaction hash actually refers to an accepted transaction which completes the plan it was supposed to. If so, the processor stops locally signing for the transaction, and emits `sign::ProcessorMessage::Completed` if it hasn't prior. ================================================ FILE: spec/processor/Scanning.md ================================================ # Scanning Only blocks with finality, either actual or sufficiently probabilistic, are operated upon. This is referred to as a block with `CONFIRMATIONS` confirmations, the block itself being the first confirmation. For chains which promise finality on a known schedule, `CONFIRMATIONS` is set to `1` and each group of finalized blocks is treated as a single block, with the tail block's hash representing the entire group. For chains which offer finality, on an unknown schedule, `CONFIRMATIONS` is still set to `1` yet blocks aren't aggregated into a group. They're handled individually, yet only once finalized. This allows networks which form finalization erratically to not have to agree on when finalizations were formed, solely that the blocks contained have a finalized descendant. ### Notability, causing a `Batch` `Batch`s are only created for blocks which it benefits to achieve ordering on. These are: - Blocks which contain transactions relevant to Serai - Blocks which in which a new multisig activates - Blocks in which a prior multisig retires ### Waiting for `Batch` inclusion Once a `Batch` is created, it is expected to eventually be included on Serai. If the `Batch` isn't included within `CONFIRMATIONS` blocks of its creation, the scanner will wait until its inclusion before scanning `batch_block + CONFIRMATIONS`. ================================================ FILE: spec/processor/UTXO Management.md ================================================ # UTXO Management UTXO-based chains have practical requirements for efficient operation which can effectively be guaranteed to terminate with a safe end state. This document attempts to detail such requirements, and the implementations in Serai resolving them. ## Fees From Effecting Transactions Out When `sriXYZ` is burnt, Serai is expected to create an output for `XYZ` as instructed. The transaction containing this output will presumably have some fee necessitating payment. Serai linearly amortizes this fee over all outputs this transaction intends to create in response to burns. While Serai could charge a fee in advance, either static or dynamic to views of the fee market, it'd risk the fee being inaccurate. If it's too high, users have paid fees they shouldn't have. If it's too low, Serai is insolvent. This is why the actual fee is amortized, rather than an estimation being prepaid. Serai could report a view, and when burning occurred, that view could be locked in as the basis for transaction fees as used to fulfill the output in question. This would require burns specify the most recent fee market view they're aware of, signifying their agreeance, with Serai erroring is a new view is published before the burn is included on-chain. Not only would this require more data be published to Serai (widening data pipeline requirements), it'd prevent any RBF-based solutions to dynamic fee markets causing transactions to get stuck. ## Output Frequency Outputs can be created on an external network at rate `max_outputs_per_tx / external_tick_rate`, where `external_tick_rate` is the external's network limitations on spending outputs. While `external_tick_rate` is generally writable as zero, due to mempool chaining, some external networks may not allow spending outputs from transactions which have yet to be ordered. Monero only allows spending outputs from transactions who have 10 confirmations, for its own security. Serai defines its own tick rate per external network, such that `serai_tick_rate >= external_tick_rate`. This ensures that Serai never assumes availability before actual availability. `serai_tick_rate` is also `> 0`. This is since a zero `external_tick_rate` generally does not truly allow an infinite output creation rate due to limitations on the amount of transactions allowed in the mempool. Define `output_creation_rate` as `max_outputs_per_tx / serai_tick_rate`. Under a naive system which greedily accumulates inputs and linearly processes outputs, this is the highest speed at which outputs which may be processed. If the Serai blockchain enables burning sriXYZ at a rate exceeding `output_creation_rate`, a backlog would form. This backlog could linearly grow at a rate larger than the outputs could linearly shrink, creating an ever-growing backlog, performing a DoS against Serai. One solution would be to increase the fee associated with burning sriXYZ when approaching `output_creation_rate`, making such a DoS unsustainable. This would require the Serai blockchain be aware of each external network's `output_creation_rate` and implement such a sliding fee. This 'solution' isn't preferred as it still temporarily has a growing queue, and normal users would also be affected by the increased fees. The solution implemented into Serai is to consume all burns from the start of a global queue which can be satisfied under currently available inputs. While the consumed queue may have 256 items, which can't be processed within a single tick by an external network whose `output_creation_rate` is 16, Serai can immediately set a finite bound on execution duration. For the above example parameters, Serai would create 16 outputs within its tick, ignoring the necessity of a change output. These 16 outputs would _not_ create any outputs Serai is expected to create in response to burns, yet instead create 16 "branch" outputs. One tick later, when the branch outputs are available to spend, each would fund creating of 16 expected outputs. For `e` expected outputs, the execution duration is just `log e` ticks _with the base of the logarithm being `output_creation_rate`_. Since these `e` expected outputs are consumed from the linearly-implemented global queue into their own tree structure, execution duration cannot be extended. We can also re-consume the entire global queue (barring input availability, see next section) after just one tick, when the change output becomes available again. Due to the logarithmic complexity of fulfilling burns, attacks require exponential growth (which is infeasible to scale). This solution does not require a sliding fee on Serai's side due to not needing to limit the on-chain rate of burns, which means it doesn't so adversely affect normal users. While an increased tree depth will increase the amount of transactions needed to fulfill an output, increasing the fee amortized over the output and its siblings, this fee scales linearly with the logarithmically scaling tree depth. This is considered acceptable. ## Input Availability The following section refers to spending an output, and then spending it again. Spending it again, which is impossible under the UTXO model, refers to spending the change output of the transaction it was spent in. The following section also assumes any published transaction is immediately ordered on-chain, ignoring the potential for latency from mempool to blockchain (as it is assumed to have a negligible effect in practice). When a burn for amount `a` is issued, the sum amount of immediately available inputs may be `< a`. This is because despite each output being considered usable on a tick basis, there is no global tick. Each output may or may not be spendable at some moment, and spending it will prevent its availability for one tick of a clock newly started. This means all outputs will become available by simply waiting a single tick, without spending any outputs during the waited tick. Any outputs unlocked at the start of the tick will carry, and within the tick the rest of the outputs will become unlocked. This means that within a tick of operations, the full balance of Serai can be considered unlocked and used to consume the entire global queue. While Serai could wait for all its outputs to be available before popping from the front of the global queue, eager execution as enough inputs become available provides lower latency. Considering the tick may be an hour (as in the case of Bitcoin), this is very appreciated. If a full tick is waited for, due to the front of the global queue having a notably large burn, then the entire global queue will be consumed as full input availability means the ability to satisfy all potential burns in a solvent system. ## Fees Incurred During Operations While fees incurred when satisfying burn were covered above, with documentation on how solvency is maintained, two other operating costs exists. 1) Input accumulation 2) Multisig rotations Input accumulation refers to transactions which exist to merge inputs. Just as there is a `max_outputs_per_tx`, there is a `max_inputs_per_tx`. When the amount of inputs belonging to Serai exceeds `max_inputs_per_tx`, a TX merging them is created. This TX incurs fees yet has no outputs mapping to burns to amortize them over, accumulating operating costs. Please note that this merging occurs in parallel to create a logarithmic execution, similar to how outputs are also processed in parallel. As for multisig rotation, multisig rotation occurs when a new multisig for an external network is created and the old multisig must transfer its inputs in order for Serai to continue its operations. This operation also incurs fees without having outputs immediately available to amortize over. Serai could charge fees on received outputs, deducting from the amount of `sriXYZ` minted in order to cover these operating fees. An overt amount would be deducted to practically ensure solvency, forming a buffer. Once the buffer is filled, fees would be reduced. As the buffer drains, fees would go back up. This would keep charged fees in line with actual fees, once the buffer is initially filled, yet requires: 1) Creating and tracking a buffer 2) Overcharging some users on fees while still risking insolvency, if the actual fees keep increasing in a way preventing successful estimation. The solution Serai implements is to accrue operating costs, tracking with each created transaction the running operating costs. When a created transaction has payments out, all of the operating costs incurred so far, which have yet to be amortized, are immediately and fully amortized. ## Attacks by a Malicious Miner There is the concern that a significant amount of outputs could be created, which when merged as inputs, create a significant amount of operating costs. This would then be forced onto random users who burn `sriXYZ` soon after, while the party who caused the operating costs would then be able to burn their own `sriXYZ` without notable fees. To describe this attack in its optimal form, assume a sole malicious block producer for an external network. The malicious miner adds an output to Serai, not paying any fees as the block producer. This single output alone may trigger an aggregation transaction. Serai would pay for the transaction fee, the fee going to the malicious miner. When Serai users burn `sriXYZ`, they are hit with the aggregation transaction's fee plus the normally amortized fee. Then, the malicious miner burns their `sriXYZ`, having the fee they capture be amortized over their output. In this process, they remain net except for the increased transaction fees they gain from other users, which they profit. To limit this attack vector, a flat fee of `2 * (the estimation of a 2-input-merging transaction fee)` is applied to each input. This means, assuming an inability to manipulate Serai's fee estimations, creating an output to force a merge transaction (and the associated fee) costs the attacker twice as much as the associated fee. A 2-input TX's fee is used as aggregating multiple inputs at once actually yields in Serai's favor so long as the per-input fee exceeds the cost of the per-input addition to the TX. Since the per-input fee is the cost of an entire TX, this property is true. ### Profitability Without the Flat Fee With a Minority of Hash Power Ignoring the above flat fee, a malicious miner could use aggregating multiple inputs to achieve profit with a minority of hash power. The following is how a miner with 7% of the external network's hash power could execute this attack profitably over a network with a `max_inputs_per_tx` value of 16: 1) Mint `sriXYZ` with 256 outputs during their own blocks. This incurs no fees and would force 16 aggregation transactions to be created. 2) _A miner_, which has a 7% chance of being the malicious miner, collects the 16 transaction fees. 3) The malicious miner burns their sriXYZ, with a 7% chance of collecting their own fee or a 93% chance of losing a single transaction fee. 16 attempts would cost 16 transaction fees if they always lose their single transaction fee. Gaining the 16 transaction fees once, offsetting costs, is expected to happen with just 6.25% of the hash power. Since the malicious miner has 7%, they're statistically likely to recoup their costs and eventually turn a profit. With a flat fee of at least the cost to aggregate a single input in a full aggregation transaction, this attack falls apart. Serai's flat fee is the higher cost of the fee to aggregate two inputs in an aggregation transaction. ### Solvency Without the Flat Fee Even without the above flat fee, Serai remains solvent. With the above flat fee, malicious miners on external networks can only steal from other users if they can manipulate Serai's fee estimations so that the merge transaction fee used is twice as high as the fees charged for causing a merge transaction. This is assumed infeasible to perform at scale, yet even if demonstrated feasible, it would not be a critical vulnerability against Serai. Solely a low/medium/high vulnerability against the users (though one it would still be our responsibility to rectify). ================================================ FILE: spec/protocol/Constants.md ================================================ # Constants ### Types These are the list of types used to represent various properties within the protocol. | Alias | Type | |-----------------|----------------------------------------------| | SeraiAddress | sr25519::Public (unchecked [u8; 32] wrapper) | | Amount | u64 | | NetworkId | NetworkId (Rust enum, SCALE-encoded) | | Coin | Coin (Rust enum, SCALE-encoded) | | Session | u32 | | Validator Set | (NetworkId, Session) | | Key | BoundedVec\ | | KeyPair | (SeraiAddress, Key) | | ExternalAddress | BoundedVec\ | | Data | BoundedVec\ | ### Networks Every network connected to Serai operates over a specific curve. The processor generates a distinct set of keys per network. Beyond the key-generation itself being isolated, the generated keys are further bound to their respective networks via an additive offset created by hashing the network's name (among other properties). The network's key is used for all coins on that network. | Network | Curve | ID | |----------|-----------|----| | Serai | Ristretto | 0 | | Bitcoin | Secp256k1 | 1 | | Ethereum | Secp256k1 | 2 | | Monero | Ed25519 | 3 | ### Coins Coins exist over a network and have a distinct integer ID. | Coin | Network | ID | |----------|----------|----| | Serai | Serai | 0 | | Bitcoin | Bitcoin | 1 | | Ether | Ethereum | 2 | | DAI | Ethereum | 3 | | Monero | Monero | 4 | ================================================ FILE: spec/protocol/In Instructions.md ================================================ # In Instructions In Instructions are included onto the Serai blockchain via unsigned transactions. In order to ensure the integrity of the included instructions, the validator set responsible for the network in question produces a threshold signature of their authenticity. This lets all other validators verify the instructions with an O(1) operation. ================================================ FILE: spec/protocol/Validator Sets.md ================================================ # Validator Sets Validator Sets are defined at the protocol level, with the following parameters: - `network` (NetworkId): The network this validator set operates over. - `allocation_per_key_share` (Amount): Amount of stake needing allocation in order to receive a key share. ### Participation in Consensus The validator set for `NetworkId::Serai` participates in Serai's own consensus, producing and finalizing blocks. ### Multisig Every Validator Set is expected to form a `t`-of-`n` multisig, where `n` is the amount of key shares in the Validator Set and `t` is `n * 2 / 3 + 1`, for each of its networks. This multisig is secure to hold coins valued at up to 33% of the Validator Set's allocated stake. If the coins exceed that threshold, there's more value in the multisig and associated liquidity pool than in the supermajority of allocated stake securing them both. Accordingly, it'd be no longer financially secure, and it MUST reject newly added coins. ### Multisig Creation Multisigs are created by Processors, communicating via their Coordinators. They're then confirmed on chain via the `validator-sets` pallet. This is done by having 100% of participants agree on the resulting group key. While this isn't fault tolerant regarding liveliness, a malicious actor who forces a `t`-of-`n` multisig to be `t`-of-`n-1` reduces the fault tolerance of the created multisig which is a greater issue. If a node does prevent multisig creation, other validators should issue slashes for it/remove it from the Validator Set entirely. Placing the creation on chain also solves the question of if the multisig was successfully created or not. Processors cannot simply ask each other if they succeeded without creating an instance of the Byzantine Generals Problem. Placing results within a Byzantine Fault Tolerant system resolves this. ### Multisig Rotation Please see `processor/Multisig Rotation.md` for details on the timing. Once the new multisig publishes its first `Batch`, the old multisig's keys are cleared and the set is considered retired. After a one-session cooldown period, they may deallocate their stake. ### Set Keys (message) - `network` (Network): Network whose key is being set. - `key_pair` (KeyPair): Key pair being set for this `Session`. - `signature` (Signature): A MuSig-style signature of all validators, confirming this key. ================================================ FILE: substrate/abi/Cargo.toml ================================================ [package] name = "serai-abi" version = "0.1.0" description = "ABI for the Serai runtime" license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/substrate/abi" authors = ["Luke Parker "] edition = "2021" rust-version = "1.74" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive"] } borsh = { version = "1", default-features = false, features = ["derive", "de_strict_order"], optional = true } serde = { version = "1", default-features = false, features = ["derive", "alloc"], optional = true } sp-core = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-runtime = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-consensus-babe = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-consensus-grandpa = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } frame-system = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } frame-support = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } serai-primitives = { path = "../primitives", version = "0.1", default-features = false } serai-coins-primitives = { path = "../coins/primitives", version = "0.1", default-features = false } serai-validator-sets-primitives = { path = "../validator-sets/primitives", version = "0.1", default-features = false } serai-genesis-liquidity-primitives = { path = "../genesis-liquidity/primitives", version = "0.1", default-features = false } serai-emissions-primitives = { path = "../emissions/primitives", version = "0.1", default-features = false } serai-in-instructions-primitives = { path = "../in-instructions/primitives", version = "0.1", default-features = false } serai-signals-primitives = { path = "../signals/primitives", version = "0.1", default-features = false } [features] std = [ "scale/std", "borsh?/std", "serde?/std", "sp-core/std", "sp-runtime/std", "sp-consensus-babe/std", "sp-consensus-grandpa/std", "frame-system/std", "frame-support/std", "serai-primitives/std", "serai-coins-primitives/std", "serai-validator-sets-primitives/std", "serai-genesis-liquidity-primitives/std", "serai-emissions-primitives/std", "serai-in-instructions-primitives/std", "serai-signals-primitives/std", ] borsh = [ "dep:borsh", "serai-primitives/borsh", "serai-coins-primitives/borsh", "serai-validator-sets-primitives/borsh", "serai-genesis-liquidity-primitives/borsh", "serai-in-instructions-primitives/borsh", "serai-signals-primitives/borsh", ] serde = [ "dep:serde", "serai-primitives/serde", "serai-coins-primitives/serde", "serai-validator-sets-primitives/serde", "serai-genesis-liquidity-primitives/serde", "serai-in-instructions-primitives/serde", "serai-signals-primitives/serde", ] default = ["std"] ================================================ FILE: substrate/abi/LICENSE ================================================ MIT License Copyright (c) 2023 Luke Parker Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: substrate/abi/src/babe.rs ================================================ use sp_consensus_babe::EquivocationProof; use serai_primitives::{Header, SeraiAddress}; #[derive( Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking, )] pub struct ReportEquivocation { pub equivocation_proof: alloc::boxed::Box>, pub key_owner_proof: SeraiAddress, } // We could define a Babe Config here and use the literal pallet_babe::Call // The disadvantage to this would be the complexity and presence of junk fields such as `__Ignore` #[derive( Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking, )] pub enum Call { report_equivocation(ReportEquivocation), report_equivocation_unsigned(ReportEquivocation), } ================================================ FILE: substrate/abi/src/coins.rs ================================================ use serai_primitives::{Balance, SeraiAddress}; pub use serai_coins_primitives as primitives; use primitives::OutInstructionWithBalance; #[derive( Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking, )] #[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize, borsh::BorshDeserialize))] #[cfg_attr(feature = "serde", derive(serde::Serialize))] #[cfg_attr(all(feature = "std", feature = "serde"), derive(serde::Deserialize))] pub enum Call { transfer { to: SeraiAddress, balance: Balance }, burn { balance: Balance }, burn_with_instruction { instruction: OutInstructionWithBalance }, } #[derive( Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking, )] #[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize, borsh::BorshDeserialize))] #[cfg_attr(feature = "serde", derive(serde::Serialize))] #[cfg_attr(all(feature = "std", feature = "serde"), derive(serde::Deserialize))] pub enum Event { Mint { to: SeraiAddress, balance: Balance }, Burn { from: SeraiAddress, balance: Balance }, BurnWithInstruction { from: SeraiAddress, instruction: OutInstructionWithBalance }, Transfer { from: SeraiAddress, to: SeraiAddress, balance: Balance }, } ================================================ FILE: substrate/abi/src/dex.rs ================================================ use sp_runtime::BoundedVec; use serai_primitives::*; type PoolId = ExternalCoin; type MaxSwapPathLength = sp_core::ConstU32<3>; #[derive( Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking, )] #[cfg_attr(feature = "serde", derive(serde::Serialize))] #[cfg_attr(all(feature = "std", feature = "serde"), derive(serde::Deserialize))] pub enum Call { add_liquidity { coin: ExternalCoin, coin_desired: SubstrateAmount, sri_desired: SubstrateAmount, coin_min: SubstrateAmount, sri_min: SubstrateAmount, mint_to: SeraiAddress, }, remove_liquidity { coin: ExternalCoin, lp_token_burn: SubstrateAmount, coin_min_receive: SubstrateAmount, sri_min_receive: SubstrateAmount, withdraw_to: SeraiAddress, }, swap_exact_tokens_for_tokens { path: BoundedVec, amount_in: SubstrateAmount, amount_out_min: SubstrateAmount, send_to: SeraiAddress, }, swap_tokens_for_exact_tokens { path: BoundedVec, amount_out: SubstrateAmount, amount_in_max: SubstrateAmount, send_to: SeraiAddress, }, } #[derive( Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking, )] #[cfg_attr(feature = "serde", derive(serde::Serialize))] #[cfg_attr(all(feature = "std", feature = "serde"), derive(serde::Deserialize))] pub enum Event { PoolCreated { pool_id: PoolId, pool_account: SeraiAddress, }, LiquidityAdded { who: SeraiAddress, mint_to: SeraiAddress, pool_id: PoolId, coin_amount: SubstrateAmount, sri_amount: SubstrateAmount, lp_token_minted: SubstrateAmount, }, LiquidityRemoved { who: SeraiAddress, withdraw_to: SeraiAddress, pool_id: PoolId, coin_amount: SubstrateAmount, sri_amount: SubstrateAmount, lp_token_burned: SubstrateAmount, }, SwapExecuted { who: SeraiAddress, send_to: SeraiAddress, path: BoundedVec, amount_in: SubstrateAmount, amount_out: SubstrateAmount, }, } ================================================ FILE: substrate/abi/src/economic_security.rs ================================================ use serai_primitives::ExternalNetworkId; #[derive( Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking, )] #[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize, borsh::BorshDeserialize))] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub enum Event { EconomicSecurityReached { network: ExternalNetworkId }, } ================================================ FILE: substrate/abi/src/emissions.rs ================================================ pub use serai_emissions_primitives as primitives; ================================================ FILE: substrate/abi/src/genesis_liquidity.rs ================================================ pub use serai_genesis_liquidity_primitives as primitives; use serai_primitives::*; use primitives::*; #[derive( Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking, )] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub enum Call { remove_coin_liquidity { balance: ExternalBalance }, oraclize_values { values: Values, signature: Signature }, } #[derive( Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking, )] #[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize, borsh::BorshDeserialize))] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub enum Event { GenesisLiquidityAdded { by: SeraiAddress, balance: ExternalBalance }, GenesisLiquidityRemoved { by: SeraiAddress, balance: ExternalBalance }, GenesisLiquidityAddedToPool { coin: ExternalBalance, sri: Amount }, } ================================================ FILE: substrate/abi/src/grandpa.rs ================================================ use sp_consensus_grandpa::EquivocationProof; use serai_primitives::{BlockNumber, SeraiAddress}; #[derive( Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking, )] pub struct ReportEquivocation { pub equivocation_proof: alloc::boxed::Box>, pub key_owner_proof: SeraiAddress, } #[derive( Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking, )] pub enum Call { report_equivocation(ReportEquivocation), report_equivocation_unsigned(ReportEquivocation), } #[derive( Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking, )] #[cfg_attr(feature = "serde", derive(serde::Serialize))] #[cfg_attr(all(feature = "std", feature = "serde"), derive(serde::Deserialize))] pub enum Event { NewAuthorities { authority_set: alloc::vec::Vec<(SeraiAddress, u64)> }, // TODO: Remove these Paused, Resumed, } ================================================ FILE: substrate/abi/src/in_instructions.rs ================================================ use serai_primitives::*; pub use serai_in_instructions_primitives as primitives; use primitives::SignedBatch; #[derive( Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking, )] #[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize, borsh::BorshDeserialize))] #[cfg_attr(feature = "serde", derive(serde::Serialize))] #[cfg_attr(all(feature = "std", feature = "serde"), derive(serde::Deserialize))] pub enum Call { execute_batch { batch: SignedBatch }, } #[derive( Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking, )] #[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize, borsh::BorshDeserialize))] #[cfg_attr(feature = "serde", derive(serde::Serialize))] #[cfg_attr(all(feature = "std", feature = "serde"), derive(serde::Deserialize))] pub enum Event { Batch { network: ExternalNetworkId, id: u32, block: BlockHash, instructions_hash: [u8; 32] }, InstructionFailure { network: ExternalNetworkId, id: u32, index: u32 }, Halt { network: ExternalNetworkId }, } ================================================ FILE: substrate/abi/src/lib.rs ================================================ #![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(not(feature = "std"), no_std)] #![allow(non_camel_case_types)] #![expect(clippy::cast_possible_truncation)] extern crate alloc; pub use serai_primitives as primitives; pub mod system; pub mod timestamp; pub mod coins; pub mod liquidity_tokens; pub mod dex; pub mod validator_sets; pub mod genesis_liquidity; pub mod emissions; pub mod economic_security; pub mod in_instructions; pub mod signals; pub mod babe; pub mod grandpa; pub mod tx; #[derive( Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking, )] pub enum Call { #[codec(index = 1)] Timestamp(timestamp::Call), #[codec(index = 3)] Coins(coins::Call), #[codec(index = 4)] LiquidityTokens(liquidity_tokens::Call), #[codec(index = 5)] Dex(dex::Call), #[codec(index = 6)] ValidatorSets(validator_sets::Call), #[codec(index = 7)] GenesisLiquidity(genesis_liquidity::Call), #[codec(index = 10)] InInstructions(in_instructions::Call), #[codec(index = 11)] Signals(signals::Call), #[codec(index = 12)] Babe(babe::Call), #[codec(index = 13)] Grandpa(grandpa::Call), } // TODO: Remove this #[derive( Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking, )] pub enum TransactionPaymentEvent { TransactionFeePaid { who: serai_primitives::SeraiAddress, actual_fee: u64, tip: u64 }, } #[derive( Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking, )] pub enum Event { #[codec(index = 0)] System(system::Event), #[codec(index = 2)] TransactionPayment(TransactionPaymentEvent), #[codec(index = 3)] Coins(coins::Event), #[codec(index = 4)] LiquidityTokens(liquidity_tokens::Event), #[codec(index = 5)] Dex(dex::Event), #[codec(index = 6)] ValidatorSets(validator_sets::Event), #[codec(index = 7)] GenesisLiquidity(genesis_liquidity::Event), #[codec(index = 9)] EconomicSecurity(economic_security::Event), #[codec(index = 10)] InInstructions(in_instructions::Event), #[codec(index = 11)] Signals(signals::Event), #[codec(index = 13)] Grandpa(grandpa::Event), } #[derive( Clone, Copy, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking, )] #[cfg_attr(feature = "serde", derive(serde::Serialize))] #[cfg_attr(all(feature = "std", feature = "serde"), derive(serde::Deserialize))] pub struct Extra { pub era: sp_runtime::generic::Era, #[codec(compact)] pub nonce: u32, #[codec(compact)] pub tip: u64, } #[derive( Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking, )] #[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize, borsh::BorshDeserialize))] #[cfg_attr(feature = "serde", derive(serde::Serialize))] #[cfg_attr(all(feature = "std", feature = "serde"), derive(serde::Deserialize))] pub struct SignedPayloadExtra { pub spec_version: u32, pub tx_version: u32, pub genesis: [u8; 32], pub mortality_checkpoint: [u8; 32], } pub type Transaction = tx::Transaction; ================================================ FILE: substrate/abi/src/liquidity_tokens.rs ================================================ use serai_primitives::{Balance, SeraiAddress}; #[derive( Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking, )] #[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize, borsh::BorshDeserialize))] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub enum Call { burn { balance: Balance }, transfer { to: SeraiAddress, balance: Balance }, } #[derive( Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking, )] #[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize, borsh::BorshDeserialize))] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub enum Event { Mint { to: SeraiAddress, balance: Balance }, Burn { from: SeraiAddress, balance: Balance }, Transfer { from: SeraiAddress, to: SeraiAddress, balance: Balance }, } ================================================ FILE: substrate/abi/src/signals.rs ================================================ use serai_primitives::{NetworkId, SeraiAddress}; use serai_validator_sets_primitives::ValidatorSet; pub use serai_signals_primitives as primitives; use primitives::SignalId; #[derive( Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking, )] #[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize, borsh::BorshDeserialize))] #[cfg_attr(feature = "serde", derive(serde::Serialize))] #[cfg_attr(all(feature = "std", feature = "serde"), derive(serde::Deserialize))] pub enum Call { register_retirement_signal { in_favor_of: [u8; 32] }, revoke_retirement_signal { retirement_signal_id: [u8; 32] }, favor { signal_id: SignalId, for_network: NetworkId }, revoke_favor { signal_id: SignalId, for_network: NetworkId }, stand_against { signal_id: SignalId, for_network: NetworkId }, } #[derive( Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking, )] #[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize, borsh::BorshDeserialize))] #[cfg_attr(feature = "serde", derive(serde::Serialize))] #[cfg_attr(all(feature = "std", feature = "serde"), derive(serde::Deserialize))] pub enum Event { RetirementSignalRegistered { signal_id: [u8; 32], in_favor_of: [u8; 32], registrant: SeraiAddress, }, RetirementSignalRevoked { signal_id: [u8; 32], }, SignalFavored { signal_id: SignalId, by: SeraiAddress, for_network: NetworkId, }, SetInFavor { signal_id: SignalId, set: ValidatorSet, }, RetirementSignalLockedIn { signal_id: [u8; 32], }, SetNoLongerInFavor { signal_id: SignalId, set: ValidatorSet, }, FavorRevoked { signal_id: SignalId, by: SeraiAddress, for_network: NetworkId, }, AgainstSignal { signal_id: SignalId, who: SeraiAddress, for_network: NetworkId, }, } ================================================ FILE: substrate/abi/src/system.rs ================================================ use frame_system::DispatchEventInfo; use frame_support::sp_runtime::DispatchError; use serai_primitives::SeraiAddress; #[derive( Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking, )] pub enum Event { ExtrinsicSuccess { dispatch_info: DispatchEventInfo }, ExtrinsicFailed { dispatch_error: DispatchError, dispatch_info: DispatchEventInfo }, CodeUpdated, NewAccount { account: SeraiAddress }, KilledAccount { account: SeraiAddress }, } ================================================ FILE: substrate/abi/src/timestamp.rs ================================================ #[derive( Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking, )] #[cfg_attr(feature = "serde", derive(serde::Serialize))] #[cfg_attr(all(feature = "std", feature = "serde"), derive(serde::Deserialize))] pub enum Call { set { #[codec(compact)] now: u64, }, } ================================================ FILE: substrate/abi/src/tx.rs ================================================ #![allow(deprecated)] use scale::Encode; use sp_core::sr25519::{Public, Signature}; use sp_runtime::traits::Verify; use serai_primitives::SeraiAddress; use frame_support::dispatch::GetDispatchInfo; pub trait TransactionMember: Clone + PartialEq + Eq + core::fmt::Debug + scale::Encode + scale::Decode { } impl TransactionMember for T { } type TransactionEncodeAs<'a, Extra> = (&'a crate::Call, &'a Option<(SeraiAddress, Signature, Extra)>); type TransactionDecodeAs = (crate::Call, Option<(SeraiAddress, Signature, Extra)>); // We use our own Transaction struct, over UncheckedExtrinsic, for more control, a bit more // simplicity, and in order to be immune to https://github.com/paritytech/polkadot-sdk/issues/2947 #[allow(clippy::multiple_bound_locations)] #[derive(Clone, PartialEq, Eq, Debug, scale::DecodeWithMemTracking)] pub struct Transaction< Call: 'static + TransactionMember + From, Extra: 'static + TransactionMember, > { call: crate::Call, mapped_call: Call, signature: Option<(SeraiAddress, Signature, Extra)>, } impl, Extra: 'static + TransactionMember> Transaction { pub fn new(call: crate::Call, signature: Option<(SeraiAddress, Signature, Extra)>) -> Self { Self { call: call.clone(), mapped_call: call.into(), signature } } pub fn call(&self) -> &crate::Call { &self.call } pub fn signer(&self) -> Option { self.signature.as_ref().map(|(address, _sig, _extra)| *address) } } impl, Extra: 'static + TransactionMember> scale::Encode for Transaction { fn using_encoded R>(&self, f: F) -> R { let tx: TransactionEncodeAs = (&self.call, &self.signature); tx.using_encoded(f) } } impl, Extra: 'static + TransactionMember> scale::Decode for Transaction { fn decode(input: &mut I) -> Result { let (call, signature) = TransactionDecodeAs::decode(input)?; let mapped_call = Call::from(call.clone()); Ok(Self { call, mapped_call, signature }) } } #[cfg(feature = "serde")] mod _serde { use scale::Encode; use serde::ser::*; use super::*; impl, Extra: 'static + TransactionMember> Serialize for Transaction { fn serialize(&self, serializer: S) -> Result { let encoded = self.encode(); serializer.serialize_bytes(&encoded) } } #[cfg(feature = "std")] use serde::de::*; #[cfg(feature = "std")] impl< 'a, Call: 'static + TransactionMember + From, Extra: 'static + TransactionMember, > Deserialize<'a> for Transaction { fn deserialize>(de: D) -> Result { let bytes = sp_core::bytes::deserialize(de)?; ::decode(&mut &bytes[..]) .map_err(|e| serde::de::Error::custom(format!("invalid transaction: {e}"))) } } } impl< Call: 'static + TransactionMember + From + TryInto, Extra: 'static + TransactionMember, > sp_runtime::traits::Extrinsic for Transaction { type Call = Call; type SignaturePayload = (SeraiAddress, Signature, Extra); fn is_signed(&self) -> Option { Some(self.signature.is_some()) } fn new(call: Call, signature: Option) -> Option { Some(Self { call: call.clone().try_into().ok()?, mapped_call: call, signature }) } } impl< Call: 'static + TransactionMember + From + TryInto, Extra: 'static + TransactionMember, > frame_support::sp_runtime::traits::ExtrinsicCall for Transaction { type Call = Call; fn call(&self) -> &Call { &self.mapped_call } } impl< Call: 'static + TransactionMember + From + GetDispatchInfo, Extra: 'static + TransactionMember, > GetDispatchInfo for Transaction { fn get_dispatch_info(&self) -> frame_support::dispatch::DispatchInfo { self.mapped_call.get_dispatch_info() } } use sp_runtime::generic::ExtrinsicFormat; impl< Call: 'static + TransactionMember + From + sp_runtime::traits::Dispatchable, Extra: 'static + TransactionMember + sp_runtime::traits::TransactionExtension, > sp_runtime::traits::BlindCheckable for Transaction { type Checked = sp_runtime::generic::CheckedExtrinsic; fn check( self, ) -> Result { Ok(match self.signature { Some((signer, signature, extra)) => { if !signature .verify((&self.call, &extra, extra.implicit()?).encode().as_slice(), &signer.into()) { Err(sp_runtime::transaction_validity::InvalidTransaction::BadProof)? } sp_runtime::generic::CheckedExtrinsic { format: ExtrinsicFormat::Signed(signer.into(), extra), function: self.mapped_call, } } None => sp_runtime::generic::CheckedExtrinsic { format: ExtrinsicFormat::Bare, function: self.mapped_call, }, }) } } impl< Call: 'static + TransactionMember + From + TryInto, Extra: 'static + TransactionMember, > frame_support::traits::InherentBuilder for Transaction { /// Panics if the inherent isn't supported. // TODO: Don't panic here fn new_inherent(call: Self::Call) -> Self { sp_runtime::traits::Extrinsic::new(call, None).expect("trying to build an unsupported inherent") } } ================================================ FILE: substrate/abi/src/validator_sets.rs ================================================ use sp_core::{ConstU32, bounded::BoundedVec}; pub use serai_validator_sets_primitives as primitives; use serai_primitives::*; use serai_validator_sets_primitives::*; #[derive( Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking, )] #[cfg_attr(feature = "serde", derive(serde::Serialize))] #[cfg_attr(all(feature = "std", feature = "serde"), derive(serde::Deserialize))] pub enum Call { set_keys { network: ExternalNetworkId, removed_participants: BoundedVec>, key_pair: KeyPair, signature: Signature, }, report_slashes { network: ExternalNetworkId, slashes: BoundedVec<(SeraiAddress, u32), ConstU32<{ MAX_KEY_SHARES_PER_SET / 3 }>>, signature: Signature, }, allocate { network: NetworkId, amount: Amount, }, deallocate { network: NetworkId, amount: Amount, }, claim_deallocation { network: NetworkId, session: Session, }, } #[derive( Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale::DecodeWithMemTracking, )] #[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize, borsh::BorshDeserialize))] #[cfg_attr(feature = "serde", derive(serde::Serialize))] #[cfg_attr(all(feature = "std", feature = "serde"), derive(serde::Deserialize))] pub enum Event { NewSet { set: ValidatorSet, }, ParticipantRemoved { set: ValidatorSet, removed: SeraiAddress, }, KeyGen { set: ExternalValidatorSet, key_pair: KeyPair, }, AcceptedHandover { set: ValidatorSet, }, SetRetired { set: ValidatorSet, }, AllocationIncreased { validator: SeraiAddress, network: NetworkId, amount: Amount, }, AllocationDecreased { validator: SeraiAddress, network: NetworkId, amount: Amount, delayed_until: Option, }, DeallocationClaimed { validator: SeraiAddress, network: NetworkId, session: Session, }, } ================================================ FILE: substrate/client/Cargo.toml ================================================ [package] name = "serai-client" version = "0.1.0" description = "Client library for the Serai network" license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/substrate/client" authors = ["Luke Parker "] keywords = ["serai"] edition = "2021" rust-version = "1.82" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] zeroize = "^1.5" thiserror = { version = "1", optional = true } hex = "0.4" scale = { package = "parity-scale-codec", version = "3" } serde = { version = "1", features = ["derive"], optional = true } serde_json = { version = "1", optional = true } serai-abi = { path = "../abi", version = "0.1" } multiaddr = { version = "0.18", optional = true } sp-core = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", optional = true } sp-runtime = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", optional = true } frame-system = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", optional = true } async-lock = "3" simple-request = { path = "../../common/request", version = "0.1", optional = true } bitcoin = { version = "0.32", optional = true } dalek-ff-group = { path = "../../crypto/dalek-ff-group", optional = true } ciphersuite = { path = "../../crypto/ciphersuite", version = "0.4", optional = true } monero-wallet = { git = "https://github.com/monero-oxide/monero-oxide", rev = "32e6b5fe5ba9e1ea3e68da882550005122a11d22", version = "0.1.0", default-features = false, features = ["std"], optional = true } [dev-dependencies] rand_core = "0.6" hex = "0.4" blake2 = "0.10" dalek-ff-group = { path = "../../crypto/dalek-ff-group" } ciphersuite = { path = "../../crypto/ciphersuite" } dkg-musig = { path = "../../crypto/dkg/musig" } frost = { package = "modular-frost", path = "../../crypto/frost", features = ["tests"] } schnorrkel = { path = "../../crypto/schnorrkel", package = "frost-schnorrkel" } tokio = "1" dockertest = "0.5" serai-docker-tests = { path = "../../tests/docker" } [features] serai = ["thiserror", "serde", "serde_json", "serai-abi/serde", "multiaddr", "sp-core", "sp-runtime", "frame-system", "simple-request"] borsh = ["serai-abi/borsh"] networks = [] bitcoin = ["networks", "dep:bitcoin"] monero = ["networks", "dalek-ff-group", "ciphersuite", "monero-wallet"] # Assumes the default usage is to use Serai as a DEX, which doesn't actually # require connecting to a Serai node default = ["bitcoin", "monero"] ================================================ FILE: substrate/client/LICENSE ================================================ MIT License Copyright (c) 2022-2023 Luke Parker Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: substrate/client/src/lib.rs ================================================ #![expect(clippy::cast_possible_truncation)] #[cfg(feature = "networks")] pub mod networks; #[cfg(feature = "serai")] mod serai; #[cfg(feature = "serai")] pub use serai::*; #[cfg(not(feature = "serai"))] pub use serai_abi::primitives; #[cfg(not(feature = "serai"))] mod other_primitives { pub mod coins { pub use serai_abi::coins::primitives; } pub mod validator_sets { pub use serai_abi::validator_sets::primitives; } pub mod in_instructions { pub use serai_abi::in_instructions::primitives; } } #[cfg(not(feature = "serai"))] pub use other_primitives::*; #[cfg(test)] mod tests; ================================================ FILE: substrate/client/src/networks/bitcoin.rs ================================================ use core::{str::FromStr, fmt}; use scale::{Encode, Decode}; use bitcoin::{ hashes::{Hash as HashTrait, hash160::Hash}, PubkeyHash, ScriptHash, network::Network, WitnessVersion, WitnessProgram, ScriptBuf, address::{AddressType, NetworkChecked, Address as BAddress}, }; #[derive(Clone, Eq, Debug)] pub struct Address(ScriptBuf); impl PartialEq for Address { fn eq(&self, other: &Self) -> bool { // Since Serai defines the Bitcoin-address specification as a variant of the script alone, // define equivalency as the script alone self.0 == other.0 } } impl From
for ScriptBuf { fn from(addr: Address) -> ScriptBuf { addr.0 } } impl FromStr for Address { type Err = (); fn from_str(str: &str) -> Result { Address::new( BAddress::from_str(str) .map_err(|_| ())? .require_network(Network::Bitcoin) .map_err(|_| ())? .script_pubkey(), ) .ok_or(()) } } impl fmt::Display for Address { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { BAddress::::from_script(&self.0, Network::Bitcoin) .map_err(|_| fmt::Error)? .fmt(f) } } // SCALE-encoded variant of Monero addresses. #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] enum EncodedAddress { P2PKH([u8; 20]), P2SH([u8; 20]), P2WPKH([u8; 20]), P2WSH([u8; 32]), P2TR([u8; 32]), } impl TryFrom> for Address { type Error = (); fn try_from(data: Vec) -> Result { Ok(Address(match EncodedAddress::decode(&mut data.as_ref()).map_err(|_| ())? { EncodedAddress::P2PKH(hash) => { ScriptBuf::new_p2pkh(&PubkeyHash::from_raw_hash(Hash::from_byte_array(hash))) } EncodedAddress::P2SH(hash) => { ScriptBuf::new_p2sh(&ScriptHash::from_raw_hash(Hash::from_byte_array(hash))) } EncodedAddress::P2WPKH(hash) => { ScriptBuf::new_witness_program(&WitnessProgram::new(WitnessVersion::V0, &hash).unwrap()) } EncodedAddress::P2WSH(hash) => { ScriptBuf::new_witness_program(&WitnessProgram::new(WitnessVersion::V0, &hash).unwrap()) } EncodedAddress::P2TR(key) => { ScriptBuf::new_witness_program(&WitnessProgram::new(WitnessVersion::V1, &key).unwrap()) } })) } } fn try_to_vec(addr: &Address) -> Result, ()> { let parsed_addr = BAddress::::from_script(&addr.0, Network::Bitcoin).map_err(|_| ())?; Ok( (match parsed_addr.address_type() { Some(AddressType::P2pkh) => { EncodedAddress::P2PKH(*parsed_addr.pubkey_hash().unwrap().as_raw_hash().as_byte_array()) } Some(AddressType::P2sh) => { EncodedAddress::P2SH(*parsed_addr.script_hash().unwrap().as_raw_hash().as_byte_array()) } Some(AddressType::P2wpkh) => { let program = parsed_addr.witness_program().ok_or(())?; let program = program.program().as_bytes(); EncodedAddress::P2WPKH(program.try_into().map_err(|_| ())?) } Some(AddressType::P2wsh) => { let program = parsed_addr.witness_program().ok_or(())?; let program = program.program().as_bytes(); EncodedAddress::P2WSH(program.try_into().map_err(|_| ())?) } Some(AddressType::P2tr) => { let program = parsed_addr.witness_program().ok_or(())?; let program = program.program().as_bytes(); EncodedAddress::P2TR(program.try_into().map_err(|_| ())?) } _ => Err(())?, }) .encode(), ) } impl From
for Vec { fn from(addr: Address) -> Vec { // Safe since only encodable addresses can be created try_to_vec(&addr).unwrap() } } impl Address { pub fn new(address: ScriptBuf) -> Option { let res = Self(address); if try_to_vec(&res).is_ok() { return Some(res); } None } } ================================================ FILE: substrate/client/src/networks/mod.rs ================================================ #[cfg(feature = "bitcoin")] pub mod bitcoin; #[cfg(feature = "monero")] pub mod monero; ================================================ FILE: substrate/client/src/networks/monero.rs ================================================ use core::{str::FromStr, fmt}; use scale::{Encode, Decode}; use dalek_ff_group::Ed25519; use ciphersuite::Ciphersuite; use monero_wallet::address::{AddressError, Network, AddressType, MoneroAddress}; #[derive(Clone, PartialEq, Eq, Debug)] pub struct Address(MoneroAddress); impl Address { pub fn new(address: MoneroAddress) -> Option
{ if address.payment_id().is_some() { return None; } Some(Address(address)) } } impl FromStr for Address { type Err = AddressError; fn from_str(str: &str) -> Result { MoneroAddress::from_str(Network::Mainnet, str).map(Address) } } impl fmt::Display for Address { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.0.fmt(f) } } // SCALE-encoded variant of Monero addresses. #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] enum EncodedAddressType { Legacy, Subaddress, Featured(u8), } #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] struct EncodedAddress { kind: EncodedAddressType, spend: [u8; 32], view: [u8; 32], } impl TryFrom> for Address { type Error = (); fn try_from(data: Vec) -> Result { // Decode as SCALE let addr = EncodedAddress::decode(&mut data.as_ref()).map_err(|_| ())?; // Convert over Ok(Address(MoneroAddress::new( Network::Mainnet, match addr.kind { EncodedAddressType::Legacy => AddressType::Legacy, EncodedAddressType::Subaddress => AddressType::Subaddress, EncodedAddressType::Featured(flags) => { let subaddress = (flags & 1) != 0; let integrated = (flags & (1 << 1)) != 0; let guaranteed = (flags & (1 << 2)) != 0; if integrated { Err(())?; } AddressType::Featured { subaddress, payment_id: None, guaranteed } } }, Ed25519::read_G::<&[u8]>(&mut addr.spend.as_ref()).map_err(|_| ())?.0, Ed25519::read_G::<&[u8]>(&mut addr.view.as_ref()).map_err(|_| ())?.0, ))) } } #[allow(clippy::from_over_into)] impl Into for Address { fn into(self) -> MoneroAddress { self.0 } } #[allow(clippy::from_over_into)] impl Into> for Address { fn into(self) -> Vec { EncodedAddress { kind: match self.0.kind() { AddressType::Legacy => EncodedAddressType::Legacy, AddressType::LegacyIntegrated(_) => { panic!("integrated address became Serai Monero address") } AddressType::Subaddress => EncodedAddressType::Subaddress, AddressType::Featured { subaddress, payment_id, guaranteed } => { debug_assert!(payment_id.is_none()); EncodedAddressType::Featured(u8::from(*subaddress) + (u8::from(*guaranteed) << 2)) } }, spend: self.0.spend().compress().0, view: self.0.view().compress().0, } .encode() } } ================================================ FILE: substrate/client/src/serai/coins.rs ================================================ use scale::Encode; use serai_abi::primitives::{SeraiAddress, Amount, Coin, Balance}; pub use serai_abi::coins::primitives; use primitives::OutInstructionWithBalance; use crate::{TemporalSerai, SeraiError}; const PALLET: &str = "Coins"; pub type CoinsEvent = serai_abi::coins::Event; #[derive(Clone, Copy)] pub struct SeraiCoins<'a>(pub(crate) &'a TemporalSerai<'a>); impl<'a> SeraiCoins<'a> { pub async fn mint_events(&self) -> Result, SeraiError> { self .0 .events(|event| { if let serai_abi::Event::Coins(event) = event { if matches!(event, CoinsEvent::Mint { .. }) { Some(event.clone()) } else { None } } else { None } }) .await } pub async fn burn_with_instruction_events(&self) -> Result, SeraiError> { self .0 .events(|event| { if let serai_abi::Event::Coins(event) = event { if matches!(event, CoinsEvent::BurnWithInstruction { .. }) { Some(event.clone()) } else { None } } else { None } }) .await } pub async fn coin_supply(&self, coin: Coin) -> Result { Ok(self.0.storage(PALLET, "Supply", coin).await?.unwrap_or(Amount(0))) } pub async fn coin_balance( &self, coin: Coin, address: SeraiAddress, ) -> Result { Ok( self .0 .storage( PALLET, "Balances", (sp_core::hashing::blake2_128(&address.encode()), &address.0, coin), ) .await? .unwrap_or(Amount(0)), ) } pub fn transfer(to: SeraiAddress, balance: Balance) -> serai_abi::Call { serai_abi::Call::Coins(serai_abi::coins::Call::transfer { to, balance }) } pub fn burn(balance: Balance) -> serai_abi::Call { serai_abi::Call::Coins(serai_abi::coins::Call::burn { balance }) } pub fn burn_with_instruction(instruction: OutInstructionWithBalance) -> serai_abi::Call { serai_abi::Call::Coins(serai_abi::coins::Call::burn_with_instruction { instruction }) } } ================================================ FILE: substrate/client/src/serai/dex.rs ================================================ use sp_core::bounded_vec::BoundedVec; use serai_abi::primitives::{Amount, Coin, ExternalCoin, SeraiAddress}; use crate::{SeraiError, TemporalSerai}; pub type DexEvent = serai_abi::dex::Event; const PALLET: &str = "Dex"; #[derive(Clone, Copy)] pub struct SeraiDex<'a>(pub(crate) &'a TemporalSerai<'a>); impl<'a> SeraiDex<'a> { pub async fn events(&self) -> Result, SeraiError> { self .0 .events( |event| if let serai_abi::Event::Dex(event) = event { Some(event.clone()) } else { None }, ) .await } pub fn add_liquidity( coin: ExternalCoin, coin_amount: Amount, sri_amount: Amount, min_coin_amount: Amount, min_sri_amount: Amount, address: SeraiAddress, ) -> serai_abi::Call { serai_abi::Call::Dex(serai_abi::dex::Call::add_liquidity { coin, coin_desired: coin_amount.0, sri_desired: sri_amount.0, coin_min: min_coin_amount.0, sri_min: min_sri_amount.0, mint_to: address, }) } pub fn swap( from_coin: Coin, to_coin: Coin, amount_in: Amount, amount_out_min: Amount, address: SeraiAddress, ) -> serai_abi::Call { let path = if to_coin.is_native() { BoundedVec::try_from(vec![from_coin, Coin::Serai]).unwrap() } else if from_coin.is_native() { BoundedVec::try_from(vec![Coin::Serai, to_coin]).unwrap() } else { BoundedVec::try_from(vec![from_coin, Coin::Serai, to_coin]).unwrap() }; serai_abi::Call::Dex(serai_abi::dex::Call::swap_exact_tokens_for_tokens { path, amount_in: amount_in.0, amount_out_min: amount_out_min.0, send_to: address, }) } /// Returns the reserves of `coin:SRI` pool. pub async fn get_reserves( &self, coin: ExternalCoin, ) -> Result, SeraiError> { self.0.runtime_api("DexApi_get_reserves", (Coin::from(coin), Coin::Serai)).await } pub async fn oracle_value(&self, coin: ExternalCoin) -> Result, SeraiError> { self.0.storage(PALLET, "SecurityOracleValue", coin).await } } ================================================ FILE: substrate/client/src/serai/genesis_liquidity.rs ================================================ pub use serai_abi::genesis_liquidity::primitives; use primitives::{Values, LiquidityAmount}; use serai_abi::primitives::*; use sp_core::sr25519::Signature; use scale::Encode; use crate::{Serai, SeraiError, TemporalSerai, Transaction}; pub type GenesisLiquidityEvent = serai_abi::genesis_liquidity::Event; const PALLET: &str = "GenesisLiquidity"; #[derive(Clone, Copy)] pub struct SeraiGenesisLiquidity<'a>(pub(crate) &'a TemporalSerai<'a>); impl<'a> SeraiGenesisLiquidity<'a> { pub async fn events(&self) -> Result, SeraiError> { self .0 .events(|event| { if let serai_abi::Event::GenesisLiquidity(event) = event { Some(event.clone()) } else { None } }) .await } pub fn oraclize_values(values: Values, signature: Signature) -> Transaction { Serai::unsigned(serai_abi::Call::GenesisLiquidity( serai_abi::genesis_liquidity::Call::oraclize_values { values, signature }, )) } pub fn remove_coin_liquidity(balance: ExternalBalance) -> serai_abi::Call { serai_abi::Call::GenesisLiquidity(serai_abi::genesis_liquidity::Call::remove_coin_liquidity { balance, }) } pub async fn liquidity( &self, address: &SeraiAddress, coin: ExternalCoin, ) -> Result { Ok( self .0 .storage( PALLET, "Liquidity", (coin, sp_core::hashing::blake2_128(&address.encode()), &address.0), ) .await? .unwrap_or(LiquidityAmount::zero()), ) } pub async fn supply(&self, coin: ExternalCoin) -> Result { Ok(self.0.storage(PALLET, "Supply", coin).await?.unwrap_or(LiquidityAmount::zero())) } pub async fn genesis_complete_block(&self) -> Result, SeraiError> { self.0.storage(PALLET, "GenesisCompleteBlock", ()).await } } ================================================ FILE: substrate/client/src/serai/in_instructions.rs ================================================ pub use serai_abi::in_instructions::primitives; use primitives::SignedBatch; use crate::{ primitives::{BlockHash, ExternalNetworkId}, Transaction, SeraiError, Serai, TemporalSerai, }; pub type InInstructionsEvent = serai_abi::in_instructions::Event; const PALLET: &str = "InInstructions"; #[derive(Clone, Copy)] pub struct SeraiInInstructions<'a>(pub(crate) &'a TemporalSerai<'a>); impl<'a> SeraiInInstructions<'a> { pub async fn latest_block_for_network( &self, network: ExternalNetworkId, ) -> Result, SeraiError> { self.0.storage(PALLET, "LatestNetworkBlock", network).await } pub async fn last_batch_for_network( &self, network: ExternalNetworkId, ) -> Result, SeraiError> { self.0.storage(PALLET, "LastBatch", network).await } pub async fn batch_events(&self) -> Result, SeraiError> { self .0 .events(|event| { if let serai_abi::Event::InInstructions(event) = event { if matches!(event, InInstructionsEvent::Batch { .. }) { Some(event.clone()) } else { None } } else { None } }) .await } pub fn execute_batch(batch: SignedBatch) -> Transaction { Serai::unsigned(serai_abi::Call::InInstructions( serai_abi::in_instructions::Call::execute_batch { batch }, )) } } ================================================ FILE: substrate/client/src/serai/liquidity_tokens.rs ================================================ use scale::Encode; use serai_abi::primitives::{Amount, ExternalBalance, ExternalCoin, SeraiAddress}; use crate::{TemporalSerai, SeraiError}; const PALLET: &str = "LiquidityTokens"; #[derive(Clone, Copy)] pub struct SeraiLiquidityTokens<'a>(pub(crate) &'a TemporalSerai<'a>); impl<'a> SeraiLiquidityTokens<'a> { pub async fn token_supply(&self, coin: ExternalCoin) -> Result { Ok(self.0.storage(PALLET, "Supply", coin).await?.unwrap_or(Amount(0))) } pub async fn token_balance( &self, coin: ExternalCoin, address: SeraiAddress, ) -> Result { Ok( self .0 .storage( PALLET, "Balances", (sp_core::hashing::blake2_128(&address.encode()), &address.0, coin), ) .await? .unwrap_or(Amount(0)), ) } pub fn transfer(to: SeraiAddress, balance: ExternalBalance) -> serai_abi::Call { serai_abi::Call::LiquidityTokens(serai_abi::liquidity_tokens::Call::transfer { to, balance: balance.into(), }) } pub fn burn(balance: ExternalBalance) -> serai_abi::Call { serai_abi::Call::LiquidityTokens(serai_abi::liquidity_tokens::Call::burn { balance: balance.into(), }) } } ================================================ FILE: substrate/client/src/serai/mod.rs ================================================ use thiserror::Error; use async_lock::RwLock; use simple_request::{hyper, Request, Client}; use scale::{Decode, Encode}; use serde::{Serialize, Deserialize, de::DeserializeOwned}; pub use sp_core::{ Pair as PairTrait, sr25519::{Public, Pair}, }; pub use serai_abi as abi; pub use abi::{primitives, Transaction}; use abi::*; pub use primitives::{SeraiAddress, Signature, Amount}; use primitives::{Header, NetworkId}; pub mod coins; pub use coins::SeraiCoins; pub mod dex; pub use dex::SeraiDex; pub mod in_instructions; pub use in_instructions::SeraiInInstructions; pub mod validator_sets; pub use validator_sets::SeraiValidatorSets; pub mod genesis_liquidity; pub use genesis_liquidity::SeraiGenesisLiquidity; pub mod liquidity_tokens; pub use liquidity_tokens::SeraiLiquidityTokens; #[derive(Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode)] pub struct Block { pub header: Header, pub transactions: Vec, } impl Block { pub fn hash(&self) -> [u8; 32] { self.header.hash().into() } pub fn number(&self) -> u64 { self.header.number } /// Returns the time of this block, set by its producer, in milliseconds since the epoch. pub fn time(&self) -> Result { for transaction in &self.transactions { if let Call::Timestamp(timestamp::Call::set { now }) = transaction.call() { return Ok(*now); } } Err(SeraiError::InvalidNode("no time was present in block".to_string())) } } #[derive(Error, Debug)] pub enum SeraiError { #[error("failed to communicate with serai")] ConnectionError, #[error("node is faulty: {0}")] InvalidNode(String), #[error("error in response: {0}")] ErrorInResponse(String), #[error("serai-client library was intended for a different runtime version: {0}")] InvalidRuntime(String), } #[derive(Clone)] pub struct Serai { url: String, client: Client, genesis: [u8; 32], } type EventsInBlock = Vec>; pub struct TemporalSerai<'a> { serai: &'a Serai, block: [u8; 32], events: RwLock>, } impl<'a> Clone for TemporalSerai<'a> { fn clone(&self) -> Self { Self { serai: self.serai, block: self.block, events: RwLock::new(None) } } } impl Serai { pub async fn call( &self, method: &str, params: Req, ) -> Result { let request = Request::from( hyper::Request::post(&self.url) .header("Content-Type", "application/json") .body( serde_json::to_vec( &serde_json::json!({ "jsonrpc": "2.0", "id": 1, "method": method, "params": params }), ) .unwrap() .into(), ) .unwrap(), ); #[derive(Deserialize)] pub struct Error { message: String, } #[derive(Deserialize)] #[serde(untagged)] enum RpcResponse { Ok { result: T }, Err { error: Error }, } let mut res = self .client .request(request) .await .map_err(|_| SeraiError::ConnectionError)? .body() .await .map_err(|_| SeraiError::ConnectionError)?; let res: RpcResponse = serde_json::from_reader(&mut res).map_err(|e| { SeraiError::InvalidRuntime(format!( "response was a different type than expected: {:?}", e.classify() )) })?; match res { RpcResponse::Ok { result } => Ok(result), RpcResponse::Err { error } => Err(SeraiError::ErrorInResponse(error.message)), } } fn hex_decode(str: String) -> Result, SeraiError> { (if let Some(stripped) = str.strip_prefix("0x") { hex::decode(stripped) } else { hex::decode(str) }) .map_err(|_| SeraiError::InvalidNode("expected hex from node wasn't hex".to_string())) } pub async fn block_hash(&self, number: u64) -> Result, SeraiError> { let hash: Option = self.call("chain_getBlockHash", [number]).await?; let Some(hash) = hash else { return Ok(None) }; Self::hex_decode(hash)? .try_into() .map_err(|_| SeraiError::InvalidNode("didn't respond to getBlockHash with hash".to_string())) .map(Some) } pub async fn new(url: String) -> Result { let client = Client::with_connection_pool(); let mut res = Serai { url, client, genesis: [0xfe; 32] }; res.genesis = res.block_hash(0).await?.ok_or_else(|| { SeraiError::InvalidNode("node didn't have the first block's hash".to_string()) })?; Ok(res) } fn unsigned(call: Call) -> Transaction { Transaction::new(call, None) } pub fn sign(&self, signer: &Pair, call: Call, nonce: u32, tip: u64) -> Transaction { const SPEC_VERSION: u32 = 1; const TX_VERSION: u32 = 1; let extra = Extra { era: sp_runtime::generic::Era::Immortal, nonce, tip }; let signature_payload = ( &call, &extra, SignedPayloadExtra { spec_version: SPEC_VERSION, tx_version: TX_VERSION, genesis: self.genesis, mortality_checkpoint: self.genesis, }, ) .encode(); let signature = signer.sign(&signature_payload); Transaction::new(call, Some((signer.public().into(), signature, extra))) } pub async fn publish(&self, tx: &Transaction) -> Result<(), SeraiError> { // Drop the returned hash, which is the hash of the raw extrinsic, as extrinsics are allowed // to share hashes and this hash is accordingly useless/unsafe // If we are to return something, it should be block included in and position within block let _: String = self.call("author_submitExtrinsic", [hex::encode(tx.encode())]).await?; Ok(()) } pub async fn latest_finalized_block_hash(&self) -> Result<[u8; 32], SeraiError> { let hash: String = self.call("chain_getFinalizedHead", ()).await?; Self::hex_decode(hash)?.try_into().map_err(|_| { SeraiError::InvalidNode("didn't respond to getFinalizedHead with hash".to_string()) }) } pub async fn header(&self, hash: [u8; 32]) -> Result, SeraiError> { self.call("chain_getHeader", [hex::encode(hash)]).await } pub async fn block(&self, hash: [u8; 32]) -> Result, SeraiError> { let block: Option = self.call("chain_getBlockBin", [hex::encode(hash)]).await?; let Some(block) = block else { return Ok(None) }; let Ok(bytes) = Self::hex_decode(block) else { Err(SeraiError::InvalidNode("didn't return a hex-encoded block".to_string()))? }; let Ok(block) = Block::decode(&mut bytes.as_slice()) else { Err(SeraiError::InvalidNode("didn't return a block".to_string()))? }; Ok(Some(block)) } pub async fn latest_finalized_block(&self) -> Result { let latest = self.latest_finalized_block_hash().await?; let Some(block) = self.block(latest).await? else { Err(SeraiError::InvalidNode("node didn't have a latest block".to_string()))? }; Ok(block) } // There is no provided method for this // TODO: Add one to Serai pub async fn is_finalized(&self, header: &Header) -> Result { // Get the latest finalized block let finalized = self.latest_finalized_block_hash().await?; // If the latest finalized block is this block, return true if finalized == header.hash().as_ref() { return Ok(true); } let Some(finalized) = self.header(finalized).await? else { Err(SeraiError::InvalidNode("couldn't get finalized header".to_string()))? }; // If the finalized block has a lower number, this block can't be finalized if finalized.number < header.number { return Ok(false); } // This block, if finalized, comes before the finalized block // If we request the hash of this block's number, Substrate will return the hash on the main // chain // If that hash is this hash, this block is finalized let Some(hash) = self.block_hash(header.number).await? else { // This is an error since there is a finalized block at this index Err(SeraiError::InvalidNode( "couldn't get block hash for a block number below the finalized block".to_string(), ))? }; Ok(header.hash().as_ref() == hash) } pub async fn finalized_block_by_number(&self, number: u64) -> Result, SeraiError> { let hash = self.block_hash(number).await?; let Some(hash) = hash else { return Ok(None) }; let Some(block) = self.block(hash).await? else { return Ok(None) }; if !self.is_finalized(&block.header).await? { return Ok(None); } Ok(Some(block)) } /* /// A stream which yields whenever new block(s) have been finalized. pub async fn newly_finalized_block( &self, ) -> Result>, SeraiError> { Ok(self.0.rpc().subscribe_finalized_block_headers().await .map_err(|_| SeraiError::ConnectionError)?.map( |next| { next.map_err(|_| SeraiError::ConnectionError)?; Ok(()) }, )) } pub async fn nonce(&self, address: &SeraiAddress) -> Result { self .0 .rpc() .system_account_next_index(&sp_core::sr25519::Public::from(address.0).to_string()) .await .map_err(|_| SeraiError::ConnectionError) } */ /// Create a TemporalSerai bound to whatever is currently the latest finalized block. /// /// The binding occurs at time of call. This does not track the latest finalized block and update /// itself. pub async fn as_of_latest_finalized_block(&self) -> Result { let latest = self.latest_finalized_block_hash().await?; Ok(TemporalSerai { serai: self, block: latest, events: RwLock::new(None) }) } /// Returns a TemporalSerai able to retrieve state as of the specified block. pub fn as_of(&self, block: [u8; 32]) -> TemporalSerai { TemporalSerai { serai: self, block, events: RwLock::new(None) } } /// Return the P2P Multiaddrs for the validators of the specified network. pub async fn p2p_validators( &self, network: NetworkId, ) -> Result, SeraiError> { self.call("p2p_validators", [network]).await } } impl<'a> TemporalSerai<'a> { async fn events( &self, filter_map: impl Fn(&Event) -> Option, ) -> Result, SeraiError> { let mut events = self.events.read().await; if events.is_none() { drop(events); let mut events_write = self.events.write().await; if events_write.is_none() { *events_write = Some(self.storage("System", "Events", ()).await?.unwrap_or(vec![])); } drop(events_write); events = self.events.read().await; } let mut res = vec![]; for event in events.as_ref().unwrap() { if let Some(event) = filter_map(&event.event) { res.push(event); } } Ok(res) } async fn storage( &self, pallet: &'static str, name: &'static str, key: K, ) -> Result, SeraiError> { // TODO: Make this const? let mut full_key = sp_core::hashing::twox_128(pallet.as_bytes()).to_vec(); full_key.extend(sp_core::hashing::twox_128(name.as_bytes())); full_key.extend(key.encode()); let res: Option = self.serai.call("state_getStorage", [hex::encode(full_key), hex::encode(self.block)]).await?; let Some(res) = res else { return Ok(None) }; let res = Serai::hex_decode(res)?; Ok(Some(R::decode(&mut res.as_slice()).map_err(|_| { SeraiError::InvalidRuntime(format!( "different type present at storage location, raw value: {}", hex::encode(res) )) })?)) } async fn runtime_api( &self, method: &'static str, params: P, ) -> Result { let result: String = self .serai .call( "state_call", [method.to_string(), hex::encode(params.encode()), hex::encode(self.block)], ) .await?; let bytes = Serai::hex_decode(result.clone())?; R::decode(&mut bytes.as_slice()).map_err(|_| { SeraiError::InvalidRuntime(format!( "different type than what is expected to be returned, raw value: {}", hex::encode(result) )) }) } pub fn coins(&'a self) -> SeraiCoins<'a> { SeraiCoins(self) } pub fn dex(&'a self) -> SeraiDex<'a> { SeraiDex(self) } pub fn in_instructions(&'a self) -> SeraiInInstructions<'a> { SeraiInInstructions(self) } pub fn validator_sets(&'a self) -> SeraiValidatorSets<'a> { SeraiValidatorSets(self) } pub fn genesis_liquidity(&'a self) -> SeraiGenesisLiquidity { SeraiGenesisLiquidity(self) } pub fn liquidity_tokens(&'a self) -> SeraiLiquidityTokens { SeraiLiquidityTokens(self) } } ================================================ FILE: substrate/client/src/serai/validator_sets.rs ================================================ use scale::Encode; use sp_core::sr25519::{Public, Signature}; use serai_abi::{primitives::Amount, validator_sets::primitives::ExternalValidatorSet}; pub use serai_abi::validator_sets::primitives; use primitives::{Session, KeyPair}; use crate::{ primitives::{NetworkId, ExternalNetworkId, SeraiAddress}, Transaction, Serai, TemporalSerai, SeraiError, }; const PALLET: &str = "ValidatorSets"; pub type ValidatorSetsEvent = serai_abi::validator_sets::Event; #[derive(Clone, Copy)] pub struct SeraiValidatorSets<'a>(pub(crate) &'a TemporalSerai<'a>); impl<'a> SeraiValidatorSets<'a> { pub async fn new_set_events(&self) -> Result, SeraiError> { self .0 .events(|event| { if let serai_abi::Event::ValidatorSets(event) = event { if matches!(event, ValidatorSetsEvent::NewSet { .. }) { Some(event.clone()) } else { None } } else { None } }) .await } pub async fn participant_removed_events(&self) -> Result, SeraiError> { self .0 .events(|event| { if let serai_abi::Event::ValidatorSets(event) = event { if matches!(event, ValidatorSetsEvent::ParticipantRemoved { .. }) { Some(event.clone()) } else { None } } else { None } }) .await } pub async fn key_gen_events(&self) -> Result, SeraiError> { self .0 .events(|event| { if let serai_abi::Event::ValidatorSets(event) = event { if matches!(event, ValidatorSetsEvent::KeyGen { .. }) { Some(event.clone()) } else { None } } else { None } }) .await } pub async fn accepted_handover_events(&self) -> Result, SeraiError> { self .0 .events(|event| { if let serai_abi::Event::ValidatorSets(event) = event { if matches!(event, ValidatorSetsEvent::AcceptedHandover { .. }) { Some(event.clone()) } else { None } } else { None } }) .await } pub async fn set_retired_events(&self) -> Result, SeraiError> { self .0 .events(|event| { if let serai_abi::Event::ValidatorSets(event) = event { if matches!(event, ValidatorSetsEvent::SetRetired { .. }) { Some(event.clone()) } else { None } } else { None } }) .await } pub async fn session(&self, network: NetworkId) -> Result, SeraiError> { self.0.storage(PALLET, "CurrentSession", network).await } pub async fn participants( &self, network: NetworkId, ) -> Result>, SeraiError> { self.0.storage(PALLET, "Participants", network).await } pub async fn allocation_per_key_share( &self, network: NetworkId, ) -> Result, SeraiError> { self.0.storage(PALLET, "AllocationPerKeyShare", network).await } pub async fn total_allocated_stake( &self, network: NetworkId, ) -> Result, SeraiError> { self.0.storage(PALLET, "TotalAllocatedStake", network).await } pub async fn allocation( &self, network: NetworkId, key: Public, ) -> Result, SeraiError> { self .0 .storage( PALLET, "Allocations", (sp_core::hashing::blake2_128(&(network, key).encode()), (network, key)), ) .await } pub async fn pending_deallocations( &self, network: NetworkId, account: Public, session: Session, ) -> Result, SeraiError> { self .0 .storage( PALLET, "PendingDeallocations", (sp_core::hashing::blake2_128(&(network, account).encode()), (network, account, session)), ) .await } pub async fn active_network_validators( &self, network: NetworkId, ) -> Result, SeraiError> { self.0.runtime_api("SeraiRuntimeApi_validators", network).await } // TODO: Store these separately since we almost never need both at once? pub async fn keys(&self, set: ExternalValidatorSet) -> Result, SeraiError> { self.0.storage(PALLET, "Keys", (sp_core::hashing::twox_64(&set.encode()), set)).await } pub async fn key_pending_slash_report( &self, network: ExternalNetworkId, ) -> Result, SeraiError> { self.0.storage(PALLET, "PendingSlashReport", network).await } pub async fn session_begin_block( &self, network: NetworkId, session: Session, ) -> Result, SeraiError> { self.0.storage(PALLET, "SessionBeginBlock", (network, session)).await } pub fn set_keys( network: ExternalNetworkId, removed_participants: sp_runtime::BoundedVec< SeraiAddress, sp_core::ConstU32<{ primitives::MAX_KEY_SHARES_PER_SET / 3 }>, >, key_pair: KeyPair, signature: Signature, ) -> Transaction { Serai::unsigned(serai_abi::Call::ValidatorSets(serai_abi::validator_sets::Call::set_keys { network, removed_participants, key_pair, signature, })) } pub fn allocate(network: NetworkId, amount: Amount) -> serai_abi::Call { serai_abi::Call::ValidatorSets(serai_abi::validator_sets::Call::allocate { network, amount }) } pub fn deallocate(network: NetworkId, amount: Amount) -> serai_abi::Call { serai_abi::Call::ValidatorSets(serai_abi::validator_sets::Call::deallocate { network, amount }) } pub fn report_slashes( network: ExternalNetworkId, slashes: sp_runtime::BoundedVec< (SeraiAddress, u32), sp_core::ConstU32<{ primitives::MAX_KEY_SHARES_PER_SET / 3 }>, >, signature: Signature, ) -> Transaction { Serai::unsigned(serai_abi::Call::ValidatorSets( serai_abi::validator_sets::Call::report_slashes { network, slashes, signature }, )) } } ================================================ FILE: substrate/client/src/tests/mod.rs ================================================ #[cfg(feature = "networks")] mod networks; ================================================ FILE: substrate/client/src/tests/networks/bitcoin.rs ================================================ // TODO: Test the address back and forth ================================================ FILE: substrate/client/src/tests/networks/mod.rs ================================================ #[cfg(feature = "bitcoin")] mod bitcoin; #[cfg(feature = "monero")] mod monero; ================================================ FILE: substrate/client/src/tests/networks/monero.rs ================================================ // TODO: Test the address back and forth ================================================ FILE: substrate/client/tests/batch.rs ================================================ use rand_core::{RngCore, OsRng}; use blake2::{ digest::{consts::U32, Digest}, Blake2b, }; use scale::Encode; use serai_client::{ primitives::{Amount, BlockHash, ExternalBalance, ExternalCoin, SeraiAddress}, in_instructions::{ primitives::{InInstruction, InInstructionWithBalance, Batch}, InInstructionsEvent, }, coins::CoinsEvent, Serai, }; mod common; use common::in_instructions::provide_batch; serai_test!( publish_batch: (|serai: Serai| async move { let id = 0; let mut block_hash = BlockHash([0; 32]); OsRng.fill_bytes(&mut block_hash.0); let mut address = SeraiAddress::new([0; 32]); OsRng.fill_bytes(&mut address.0); let coin = ExternalCoin::Bitcoin; let network = coin.network(); let amount = Amount(OsRng.next_u64().saturating_add(1)); let balance = ExternalBalance { coin, amount }; let batch = Batch { network, id, block: block_hash, instructions: vec![InInstructionWithBalance { instruction: InInstruction::Transfer(address), balance, }], }; let block = provide_batch(&serai, batch.clone()).await; let serai = serai.as_of(block); { let serai = serai.in_instructions(); let latest_finalized = serai.latest_block_for_network(network).await.unwrap(); assert_eq!(latest_finalized, Some(block_hash)); let batches = serai.batch_events().await.unwrap(); assert_eq!( batches, vec![InInstructionsEvent::Batch { network, id, block: block_hash, instructions_hash: Blake2b::::digest(batch.instructions.encode()).into(), }] ); } let serai = serai.coins(); assert_eq!( serai.mint_events().await.unwrap(), vec![CoinsEvent::Mint { to: address, balance: balance.into() }] ); assert_eq!(serai.coin_supply(coin.into()).await.unwrap(), amount); assert_eq!(serai.coin_balance(coin.into(), address).await.unwrap(), amount); }) ); ================================================ FILE: substrate/client/tests/burn.rs ================================================ use rand_core::{RngCore, OsRng}; use blake2::{ digest::{consts::U32, Digest}, Blake2b, }; use scale::Encode; use serai_abi::coins::primitives::OutInstructionWithBalance; use sp_core::Pair; use serai_client::{ primitives::{ Amount, ExternalCoin, ExternalBalance, BlockHash, SeraiAddress, Data, ExternalAddress, insecure_pair_from_name, }, in_instructions::{ InInstructionsEvent, primitives::{InInstruction, InInstructionWithBalance, Batch}, }, coins::{primitives::OutInstruction, CoinsEvent}, Serai, SeraiCoins, }; mod common; use common::{tx::publish_tx, in_instructions::provide_batch}; serai_test!( burn: (|serai: Serai| async move { let id = 0; let mut block_hash = BlockHash([0; 32]); OsRng.fill_bytes(&mut block_hash.0); let pair = insecure_pair_from_name("Dave"); let public = pair.public(); let address = SeraiAddress::from(public); let coin = ExternalCoin::Bitcoin; let network = coin.network(); let amount = Amount(OsRng.next_u64().saturating_add(1)); let balance = ExternalBalance { coin, amount }; let batch = Batch { network, id, block: block_hash, instructions: vec![InInstructionWithBalance { instruction: InInstruction::Transfer(address), balance, }], }; let block = provide_batch(&serai, batch.clone()).await; let instruction = { let serai = serai.as_of(block); let batches = serai.in_instructions().batch_events().await.unwrap(); assert_eq!( batches, vec![InInstructionsEvent::Batch { network, id, block: block_hash, instructions_hash: Blake2b::::digest(batch.instructions.encode()).into(), }] ); assert_eq!( serai.coins().mint_events().await.unwrap(), vec![CoinsEvent::Mint { to: address, balance: balance.into() }] ); assert_eq!(serai.coins().coin_supply(coin.into()).await.unwrap(), amount); assert_eq!(serai.coins().coin_balance(coin.into(), address).await.unwrap(), amount); // Now burn it let mut rand_bytes = vec![0; 32]; OsRng.fill_bytes(&mut rand_bytes); let external_address = ExternalAddress::new(rand_bytes).unwrap(); let mut rand_bytes = vec![0; 32]; OsRng.fill_bytes(&mut rand_bytes); let data = Data::new(rand_bytes).unwrap(); OutInstructionWithBalance { balance, instruction: OutInstruction { address: external_address, data: Some(data) }, } }; let block = publish_tx( &serai, &serai.sign(&pair, SeraiCoins::burn_with_instruction(instruction.clone()), 0, 0), ) .await; let serai = serai.as_of(block); let serai = serai.coins(); let events = serai.burn_with_instruction_events().await.unwrap(); assert_eq!(events, vec![CoinsEvent::BurnWithInstruction { from: address, instruction }]); assert_eq!(serai.coin_supply(coin.into()).await.unwrap(), Amount(0)); assert_eq!(serai.coin_balance(coin.into(), address).await.unwrap(), Amount(0)); }) ); ================================================ FILE: substrate/client/tests/common/dex.rs ================================================ use serai_abi::primitives::{Amount, Coin, ExternalCoin}; use serai_client::{Serai, SeraiDex}; use sp_core::{sr25519::Pair, Pair as PairTrait}; use crate::common::tx::publish_tx; #[allow(dead_code)] pub async fn add_liquidity( serai: &Serai, coin: ExternalCoin, coin_amount: Amount, sri_amount: Amount, nonce: u32, pair: Pair, ) -> [u8; 32] { let address = pair.public(); let tx = serai.sign( &pair, SeraiDex::add_liquidity(coin, coin_amount, sri_amount, Amount(1), Amount(1), address.into()), nonce, 0, ); publish_tx(serai, &tx).await } #[allow(dead_code)] pub async fn swap( serai: &Serai, from_coin: Coin, to_coin: Coin, amount_in: Amount, amount_out_min: Amount, nonce: u32, pair: Pair, ) -> [u8; 32] { let address = pair.public(); let tx = serai.sign( &pair, SeraiDex::swap(from_coin, to_coin, amount_in, amount_out_min, address.into()), nonce, Default::default(), ); publish_tx(serai, &tx).await } ================================================ FILE: substrate/client/tests/common/genesis_liquidity.rs ================================================ use std::collections::HashMap; use rand_core::{RngCore, OsRng}; use zeroize::Zeroizing; use dalek_ff_group::Ristretto; use ciphersuite::Ciphersuite; use dkg_musig::musig; use schnorrkel::Schnorrkel; use sp_core::Pair as PairTrait; use serai_abi::{ genesis_liquidity::primitives::{oraclize_values_message, Values}, in_instructions::primitives::{Batch, InInstruction, InInstructionWithBalance}, primitives::{ insecure_pair_from_name, Amount, ExternalBalance, BlockHash, ExternalCoin, ExternalNetworkId, NetworkId, SeraiAddress, EXTERNAL_COINS, }, validator_sets::primitives::{musig_context, Session, ValidatorSet}, }; use serai_client::{Serai, SeraiGenesisLiquidity}; use crate::common::{in_instructions::provide_batch, tx::publish_tx}; #[allow(dead_code)] pub async fn set_up_genesis( serai: &Serai, values: &HashMap, ) -> (HashMap>, HashMap) { // make accounts with amounts let mut accounts = HashMap::new(); for coin in EXTERNAL_COINS { // make 5 accounts per coin let mut values = vec![]; for _ in 0 .. 5 { let mut address = SeraiAddress::new([0; 32]); OsRng.fill_bytes(&mut address.0); values.push((address, Amount(OsRng.next_u64() % 10u64.pow(coin.decimals())))); } accounts.insert(coin, values); } // send a batch per coin let mut batch_ids: HashMap = HashMap::new(); for coin in EXTERNAL_COINS { // set up instructions let instructions = accounts[&coin] .iter() .map(|(addr, amount)| InInstructionWithBalance { instruction: InInstruction::GenesisLiquidity(*addr), balance: ExternalBalance { coin, amount: *amount }, }) .collect::>(); // set up bloch hash let mut block = BlockHash([0; 32]); OsRng.fill_bytes(&mut block.0); // set up batch id batch_ids .entry(coin.network()) .and_modify(|v| { *v += 1; }) .or_insert(0); let batch = Batch { network: coin.network(), id: batch_ids[&coin.network()], block, instructions }; provide_batch(serai, batch).await; } // set values relative to each other. We can do that without checking for genesis period blocks // since we are running in test(fast-epoch) mode. // TODO: Random values here let values = Values { monero: values[&ExternalCoin::Monero], ether: values[&ExternalCoin::Ether], dai: values[&ExternalCoin::Dai], }; set_values(serai, &values).await; (accounts, batch_ids) } #[allow(dead_code)] async fn set_values(serai: &Serai, values: &Values) { // prepare a Musig tx to oraclize the relative values let pair = insecure_pair_from_name("Alice"); let public = pair.public(); // we publish the tx in set 1 let set = ValidatorSet { session: Session(1), network: NetworkId::Serai }; let public_key = ::read_G::<&[u8]>(&mut public.0.as_ref()).unwrap(); let secret_key = ::read_F::<&[u8]>( &mut pair.as_ref().secret.to_bytes()[.. 32].as_ref(), ) .unwrap(); assert_eq!(Ristretto::generator() * secret_key, public_key); let threshold_keys = musig::(musig_context(set), Zeroizing::new(secret_key), &[public_key]).unwrap(); let sig = frost::tests::sign_without_caching( &mut OsRng, frost::tests::algorithm_machines( &mut OsRng, &Schnorrkel::new(b"substrate"), &HashMap::from([(threshold_keys.params().i(), threshold_keys.into())]), ), &oraclize_values_message(&set, values), ); // oraclize values let _ = publish_tx(serai, &SeraiGenesisLiquidity::oraclize_values(*values, sig.to_bytes().into())) .await; } ================================================ FILE: substrate/client/tests/common/in_instructions.rs ================================================ use rand_core::{RngCore, OsRng}; use blake2::{ digest::{consts::U32, Digest}, Blake2b, }; use scale::Encode; use sp_core::Pair; use serai_client::{ primitives::{insecure_pair_from_name, BlockHash, ExternalBalance, SeraiAddress}, validator_sets::primitives::{ExternalValidatorSet, KeyPair}, in_instructions::{ primitives::{Batch, SignedBatch, batch_message, InInstruction, InInstructionWithBalance}, InInstructionsEvent, }, SeraiInInstructions, Serai, }; use crate::common::{tx::publish_tx, validator_sets::set_keys}; #[allow(dead_code)] pub async fn provide_batch(serai: &Serai, batch: Batch) -> [u8; 32] { let serai_latest = serai.as_of_latest_finalized_block().await.unwrap(); let session = serai_latest.validator_sets().session(batch.network.into()).await.unwrap().unwrap(); let set = ExternalValidatorSet { session, network: batch.network }; let pair = insecure_pair_from_name(&format!("ValidatorSet {set:?}")); let keys = if let Some(keys) = serai_latest.validator_sets().keys(set).await.unwrap() { keys } else { let keys = KeyPair(pair.public(), vec![].try_into().unwrap()); set_keys(serai, set, keys.clone(), &[insecure_pair_from_name("Alice")]).await; keys }; assert_eq!(keys.0, pair.public()); let block = publish_tx( serai, &SeraiInInstructions::execute_batch(SignedBatch { batch: batch.clone(), signature: pair.sign(&batch_message(&batch)), }), ) .await; let batches = serai.as_of(block).in_instructions().batch_events().await.unwrap(); // TODO: impl From for BatchEvent? assert_eq!( batches, vec![InInstructionsEvent::Batch { network: batch.network, id: batch.id, block: batch.block, instructions_hash: Blake2b::::digest(batch.instructions.encode()).into(), }], ); // TODO: Check the tokens events block } #[allow(dead_code)] pub async fn mint_coin( serai: &Serai, balance: ExternalBalance, batch_id: u32, address: SeraiAddress, ) -> [u8; 32] { let mut block_hash = BlockHash([0; 32]); OsRng.fill_bytes(&mut block_hash.0); let batch = Batch { network: balance.coin.network(), id: batch_id, block: block_hash, instructions: vec![InInstructionWithBalance { instruction: InInstruction::Transfer(address), balance, }], }; provide_batch(serai, batch).await } ================================================ FILE: substrate/client/tests/common/mod.rs ================================================ pub mod tx; pub mod validator_sets; pub mod in_instructions; pub mod dex; pub mod genesis_liquidity; #[macro_export] macro_rules! serai_test { ($($name: ident: $test: expr)*) => { $( #[tokio::test] async fn $name() { use std::collections::HashMap; use dockertest::{ PullPolicy, StartPolicy, LogOptions, LogAction, LogPolicy, LogSource, Image, TestBodySpecification, DockerTest, }; serai_docker_tests::build("serai".to_string()); let handle = concat!("serai_client-serai_node-", stringify!($name)); let composition = TestBodySpecification::with_image( Image::with_repository("serai-dev-serai").pull_policy(PullPolicy::Never), ) .replace_cmd(vec![ "serai-node".to_string(), "--dev".to_string(), "--unsafe-rpc-external".to_string(), "--rpc-cors".to_string(), "all".to_string(), ]) .replace_env( HashMap::from([ ("RUST_LOG".to_string(), "runtime=debug".to_string()), ("KEY".to_string(), " ".to_string()), ]) ) .set_publish_all_ports(true) .set_handle(handle) .set_start_policy(StartPolicy::Strict) .set_log_options(Some(LogOptions { action: LogAction::Forward, policy: LogPolicy::Always, source: LogSource::Both, })); let mut test = DockerTest::new().with_network(dockertest::Network::Isolated); test.provide_container(composition); test.run_async(|ops| async move { // Sleep until the Substrate RPC starts let mut ticks = 0; let serai_rpc = loop { // Bound execution to 60 seconds if ticks > 60 { panic!("Serai node didn't start within 60 seconds"); } tokio::time::sleep(core::time::Duration::from_secs(1)).await; ticks += 1; let Some(serai_rpc) = ops.handle(handle).host_port(9944) else { continue }; let serai_rpc = format!("http://{}:{}", serai_rpc.0, serai_rpc.1); let Ok(client) = Serai::new(serai_rpc.clone()).await else { continue }; if client.latest_finalized_block_hash().await.is_err() { continue; } break serai_rpc; }; #[allow(clippy::redundant_closure_call)] $test(Serai::new(serai_rpc).await.unwrap()).await; }).await; } )* } } #[macro_export] macro_rules! serai_test_fast_epoch { ($($name: ident: $test: expr)*) => { $( #[tokio::test] async fn $name() { use std::collections::HashMap; use dockertest::{ PullPolicy, StartPolicy, LogOptions, LogAction, LogPolicy, LogSource, Image, TestBodySpecification, DockerTest, }; serai_docker_tests::build("serai-fast-epoch".to_string()); let handle = concat!("serai_client-serai_node-", stringify!($name)); let composition = TestBodySpecification::with_image( Image::with_repository("serai-dev-serai-fast-epoch").pull_policy(PullPolicy::Never), ) .replace_cmd(vec![ "serai-node".to_string(), "--dev".to_string(), "--unsafe-rpc-external".to_string(), "--rpc-cors".to_string(), "all".to_string(), ]) .replace_env( HashMap::from([ ("RUST_LOG".to_string(), "runtime=debug".to_string()), ("KEY".to_string(), " ".to_string()), ]) ) .set_publish_all_ports(true) .set_handle(handle) .set_start_policy(StartPolicy::Strict) .set_log_options(Some(LogOptions { action: LogAction::Forward, policy: LogPolicy::Always, source: LogSource::Both, })); let mut test = DockerTest::new().with_network(dockertest::Network::Isolated); test.provide_container(composition); test.run_async(|ops| async move { // Sleep until the Substrate RPC starts let serai_rpc = ops.handle(handle).host_port(9944).unwrap(); let serai_rpc = format!("http://{}:{}", serai_rpc.0, serai_rpc.1); // Bound execution to 60 seconds for _ in 0 .. 60 { tokio::time::sleep(core::time::Duration::from_secs(1)).await; let Ok(client) = Serai::new(serai_rpc.clone()).await else { continue }; if client.latest_finalized_block_hash().await.is_err() { continue; } break; } #[allow(clippy::redundant_closure_call)] $test(Serai::new(serai_rpc).await.unwrap()).await; }).await; } )* } } ================================================ FILE: substrate/client/tests/common/tx.rs ================================================ use core::time::Duration; use tokio::time::sleep; use serai_client::{Transaction, Serai}; #[allow(dead_code)] pub async fn publish_tx(serai: &Serai, tx: &Transaction) -> [u8; 32] { let mut latest = serai .block(serai.latest_finalized_block_hash().await.unwrap()) .await .unwrap() .unwrap() .number(); serai.publish(tx).await.unwrap(); // Get the block it was included in // TODO: Add an RPC method for this/check the guarantee on the subscription let mut ticks = 0; loop { latest += 1; let block = { let mut block; while { block = serai.finalized_block_by_number(latest).await.unwrap(); block.is_none() } { sleep(Duration::from_secs(1)).await; ticks += 1; if ticks > 60 { panic!("60 seconds without inclusion in a finalized block"); } } block.unwrap() }; for transaction in &block.transactions { if transaction == tx { return block.hash(); } } } } ================================================ FILE: substrate/client/tests/common/validator_sets.rs ================================================ use std::collections::HashMap; use serai_abi::primitives::NetworkId; use zeroize::Zeroizing; use rand_core::OsRng; use sp_core::{sr25519::Pair, Pair as PairTrait}; use dalek_ff_group::Ristretto; use ciphersuite::Ciphersuite; use dkg_musig::musig; use schnorrkel::Schnorrkel; use serai_client::{ validator_sets::{ primitives::{ExternalValidatorSet, KeyPair, musig_context, set_keys_message}, ValidatorSetsEvent, }, Amount, Serai, SeraiValidatorSets, }; use crate::common::tx::publish_tx; #[allow(dead_code)] pub async fn set_keys( serai: &Serai, set: ExternalValidatorSet, key_pair: KeyPair, pairs: &[Pair], ) -> [u8; 32] { let mut pub_keys = vec![]; for pair in pairs { let public_key = ::read_G::<&[u8]>(&mut pair.public().0.as_ref()).unwrap(); pub_keys.push(public_key); } let mut threshold_keys = vec![]; for i in 0 .. pairs.len() { let secret_key = ::read_F::<&[u8]>( &mut pairs[i].as_ref().secret.to_bytes()[.. 32].as_ref(), ) .unwrap(); assert_eq!(Ristretto::generator() * secret_key, pub_keys[i]); threshold_keys.push( musig::(musig_context(set.into()), Zeroizing::new(secret_key), &pub_keys).unwrap(), ); } let mut musig_keys = HashMap::new(); for tk in threshold_keys { musig_keys.insert(tk.params().i(), tk.into()); } let sig = frost::tests::sign_without_caching( &mut OsRng, frost::tests::algorithm_machines(&mut OsRng, &Schnorrkel::new(b"substrate"), &musig_keys), &set_keys_message(&set, &[], &key_pair), ); // Set the key pair let block = publish_tx( serai, &SeraiValidatorSets::set_keys( set.network, vec![].try_into().unwrap(), key_pair.clone(), sig.to_bytes().into(), ), ) .await; assert_eq!( serai.as_of(block).validator_sets().key_gen_events().await.unwrap(), vec![ValidatorSetsEvent::KeyGen { set, key_pair: key_pair.clone() }] ); assert_eq!(serai.as_of(block).validator_sets().keys(set).await.unwrap(), Some(key_pair)); block } #[allow(dead_code)] pub async fn allocate_stake( serai: &Serai, network: NetworkId, amount: Amount, pair: &Pair, nonce: u32, ) -> [u8; 32] { // get the call let tx = serai.sign(pair, SeraiValidatorSets::allocate(network, amount), nonce, 0); publish_tx(serai, &tx).await } #[allow(dead_code)] pub async fn deallocate_stake( serai: &Serai, network: NetworkId, amount: Amount, pair: &Pair, nonce: u32, ) -> [u8; 32] { // get the call let tx = serai.sign(pair, SeraiValidatorSets::deallocate(network, amount), nonce, 0); publish_tx(serai, &tx).await } ================================================ FILE: substrate/client/tests/dex.rs ================================================ use rand_core::{RngCore, OsRng}; use sp_core::{Pair as PairTrait, bounded_vec::BoundedVec}; use serai_abi::in_instructions::primitives::DexCall; use serai_client::{ primitives::{ Amount, Coin, Balance, BlockHash, insecure_pair_from_name, ExternalAddress, SeraiAddress, ExternalCoin, ExternalBalance, }, in_instructions::primitives::{ InInstruction, InInstructionWithBalance, Batch, IN_INSTRUCTION_EXECUTOR, OutAddress, }, dex::DexEvent, Serai, }; mod common; use common::{ in_instructions::{provide_batch, mint_coin}, dex::{add_liquidity as common_add_liquidity, swap as common_swap}, }; // TODO: Calculate all constants in the following tests // TODO: Check LP token, coin balances // TODO: Modularize common code // TODO: Check Transfer events serai_test!( add_liquidity: (|serai: Serai| async move { let coin = ExternalCoin::Monero; let pair = insecure_pair_from_name("Ferdie"); // mint sriXMR in the account so that we can add liq. // Ferdie account is already pre-funded with SRI. mint_coin( &serai, ExternalBalance { coin, amount: Amount(100_000_000_000_000) }, 0, pair.clone().public().into(), ) .await; // add liquidity let coin_amount = Amount(50_000_000_000_000); let sri_amount = Amount(50_000_000_000_000); let block = common_add_liquidity(&serai, coin, coin_amount, sri_amount, 0, pair.clone() ).await; // get only the add liq events let mut events = serai.as_of(block).dex().events().await.unwrap(); events.retain(|e| matches!(e, DexEvent::LiquidityAdded { .. })); assert_eq!( events, vec![DexEvent::LiquidityAdded { who: pair.public().into(), mint_to: pair.public().into(), pool_id: coin, coin_amount: coin_amount.0, sri_amount: sri_amount.0, lp_token_minted: 49_999999990000 }] ); }) // Tests coin -> SRI and SRI -> coin swaps. swap_coin_to_sri: (|serai: Serai| async move { let coin = ExternalCoin::Ether; let pair = insecure_pair_from_name("Ferdie"); // mint sriXMR in the account so that we can add liq. // Ferdie account is already pre-funded with SRI. mint_coin( &serai, ExternalBalance { coin, amount: Amount(100_000_000_000_000) }, 0, pair.clone().public().into(), ) .await; // add liquidity common_add_liquidity(&serai, coin, Amount(50_000_000_000_000), Amount(50_000_000_000_000), 0, pair.clone() ).await; // now that we have our liquid pool, swap some coin to SRI. let mut amount_in = Amount(25_000_000_000_000); let mut block = common_swap( &serai, coin.into(), Coin::Serai, amount_in, Amount(1), 1, pair.clone()) .await; // get only the swap events let mut events = serai.as_of(block).dex().events().await.unwrap(); events.retain(|e| matches!(e, DexEvent::SwapExecuted { .. })); let mut path = BoundedVec::try_from(vec![coin.into(), Coin::Serai]).unwrap(); assert_eq!( events, vec![DexEvent::SwapExecuted { who: pair.clone().public().into(), send_to: pair.public().into(), path, amount_in: amount_in.0, amount_out: 16633299966633 }] ); // now swap some SRI to coin amount_in = Amount(10_000_000_000_000); block = common_swap( &serai, Coin::Serai, coin.into(), amount_in, Amount(1), 2, pair.clone() ).await; // get only the swap events let mut events = serai.as_of(block).dex().events().await.unwrap(); events.retain(|e| matches!(e, DexEvent::SwapExecuted { .. })); path = BoundedVec::try_from(vec![Coin::Serai, coin.into()]).unwrap(); assert_eq!( events, vec![DexEvent::SwapExecuted { who: pair.clone().public().into(), send_to: pair.public().into(), path, amount_in: amount_in.0, amount_out: 17254428681101 }] ); }) swap_coin_to_coin: (|serai: Serai| async move { let coin1 = ExternalCoin::Monero; let coin2 = ExternalCoin::Dai; let pair = insecure_pair_from_name("Ferdie"); // mint coins mint_coin( &serai, ExternalBalance { coin: coin1, amount: Amount(100_000_000_000_000) }, 0, pair.clone().public().into(), ) .await; mint_coin( &serai, ExternalBalance { coin: coin2, amount: Amount(100_000_000_000_000) }, 0, pair.clone().public().into(), ) .await; // add liquidity to pools common_add_liquidity(&serai, coin1, Amount(50_000_000_000_000), Amount(50_000_000_000_000), 0, pair.clone() ).await; common_add_liquidity(&serai, coin2, Amount(50_000_000_000_000), Amount(50_000_000_000_000), 1, pair.clone() ).await; // swap coin1 -> coin2 let amount_in = Amount(25_000_000_000_000); let block = common_swap( &serai, coin1.into(), coin2.into(), amount_in, Amount(1), 2, pair.clone() ).await; // get only the swap events let mut events = serai.as_of(block).dex().events().await.unwrap(); events.retain(|e| matches!(e, DexEvent::SwapExecuted { .. })); let path = BoundedVec::try_from(vec![coin1.into(), Coin::Serai, coin2.into()]).unwrap(); assert_eq!( events, vec![DexEvent::SwapExecuted { who: pair.clone().public().into(), send_to: pair.public().into(), path, amount_in: amount_in.0, amount_out: 12453103964435, }] ); }) add_liquidity_in_instructions: (|serai: Serai| async move { let coin = ExternalCoin::Bitcoin; let pair = insecure_pair_from_name("Ferdie"); let mut batch_id = 0; // mint sriBTC in the account so that we can add liq. // Ferdie account is already pre-funded with SRI. mint_coin( &serai, ExternalBalance { coin, amount: Amount(100_000_000_000_000) }, batch_id, pair.clone().public().into(), ) .await; batch_id += 1; // add liquidity common_add_liquidity(&serai, coin, Amount(5_000_000_000_000), Amount(500_000_000_000), 0, pair.clone() ).await; // now that we have our liquid SRI/BTC pool, we can add more liquidity to it via an // InInstruction let mut block_hash = BlockHash([0; 32]); OsRng.fill_bytes(&mut block_hash.0); let batch = Batch { network: coin.network(), id: batch_id, block: block_hash, instructions: vec![InInstructionWithBalance { instruction: InInstruction::Dex(DexCall::SwapAndAddLiquidity(pair.public().into())), balance: ExternalBalance { coin, amount: Amount(20_000_000_000_000) }, }], }; let block = provide_batch(&serai, batch).await; let mut events = serai.as_of(block).dex().events().await.unwrap(); events.retain(|e| matches!(e, DexEvent::LiquidityAdded { .. })); assert_eq!( events, vec![DexEvent::LiquidityAdded { who: IN_INSTRUCTION_EXECUTOR, mint_to: pair.public().into(), pool_id: coin, coin_amount: 10_000_000_000_000, // half of sent amount sri_amount: 111_333_778_668, lp_token_minted: 1_054_092_553_383 }] ); }) swap_in_instructions: (|serai: Serai| async move { let coin1 = ExternalCoin::Monero; let coin2 = ExternalCoin::Ether; let pair = insecure_pair_from_name("Ferdie"); let mut coin1_batch_id = 0; let mut coin2_batch_id = 0; // mint coins mint_coin( &serai, ExternalBalance { coin: coin1, amount: Amount(10_000_000_000_000_000) }, coin1_batch_id, pair.clone().public().into(), ) .await; coin1_batch_id += 1; mint_coin( &serai, ExternalBalance { coin: coin2, amount: Amount(100_000_000_000_000) }, coin2_batch_id, pair.clone().public().into(), ) .await; coin2_batch_id += 1; // add liquidity to pools common_add_liquidity(&serai, coin1, Amount(5_000_000_000_000_000), // monero has 12 decimals Amount(50_000_000_000), 0, pair.clone() ).await; common_add_liquidity(&serai, coin2, Amount(5_000_000_000_000), // ether still has 8 in our codebase Amount(500_000_000_000), 1, pair.clone() ).await; // rand address bytes let mut rand_bytes = vec![0; 32]; OsRng.fill_bytes(&mut rand_bytes); // XMR -> ETH { // make an out address let out_address = OutAddress::External(ExternalAddress::new(rand_bytes.clone()).unwrap()); // amount is the min out amount let out_balance = Balance { coin: coin2.into(), amount: Amount(1) }; // now that we have our pools, we can try to swap let mut block_hash = BlockHash([0; 32]); OsRng.fill_bytes(&mut block_hash.0); let batch = Batch { network: coin1.network(), id: coin1_batch_id, block: block_hash, instructions: vec![InInstructionWithBalance { instruction: InInstruction::Dex(DexCall::Swap(out_balance, out_address)), balance: ExternalBalance { coin: coin1, amount: Amount(200_000_000_000_000) }, }], }; let block = provide_batch(&serai, batch).await; coin1_batch_id += 1; let mut events = serai.as_of(block).dex().events().await.unwrap(); events.retain(|e| matches!(e, DexEvent::SwapExecuted { .. })); let path = BoundedVec::try_from(vec![coin1.into(), Coin::Serai, coin2.into()]).unwrap(); assert_eq!( events, vec![DexEvent::SwapExecuted { who: IN_INSTRUCTION_EXECUTOR, send_to: IN_INSTRUCTION_EXECUTOR, path, amount_in: 200_000_000_000_000, amount_out: 19_044_944_233 }] ); } // ETH -> sriXMR { // make an out address let out_address = OutAddress::Serai(SeraiAddress::new(rand_bytes.clone().try_into().unwrap())); // amount is the min out amount let out_balance = Balance { coin: coin1.into(), amount: Amount(1) }; // now that we have our pools, we can try to swap let mut block_hash = BlockHash([0; 32]); OsRng.fill_bytes(&mut block_hash.0); let batch = Batch { network: coin2.network(), id: coin2_batch_id, block: block_hash, instructions: vec![InInstructionWithBalance { instruction: InInstruction::Dex(DexCall::Swap(out_balance, out_address.clone())), balance: ExternalBalance { coin: coin2, amount: Amount(200_000_000_000) }, }], }; let block = provide_batch(&serai, batch).await; let mut events = serai.as_of(block).dex().events().await.unwrap(); events.retain(|e| matches!(e, DexEvent::SwapExecuted { .. })); let path = BoundedVec::try_from(vec![coin2.into(), Coin::Serai, coin1.into()]).unwrap(); assert_eq!( events, vec![DexEvent::SwapExecuted { who: IN_INSTRUCTION_EXECUTOR, send_to: out_address.as_native().unwrap(), path, amount_in: 200_000_000_000, amount_out: 1487294253782353 }] ); } // XMR -> SRI { // make an out address let out_address = OutAddress::Serai(SeraiAddress::new(rand_bytes.try_into().unwrap())); // amount is the min out amount let out_balance = Balance { coin: Coin::Serai, amount: Amount(1) }; // now that we have our pools, we can try to swap let mut block_hash = BlockHash([0; 32]); OsRng.fill_bytes(&mut block_hash.0); let batch = Batch { network: coin1.network(), id: coin1_batch_id, block: block_hash, instructions: vec![InInstructionWithBalance { instruction: InInstruction::Dex(DexCall::Swap(out_balance, out_address.clone())), balance: ExternalBalance { coin: coin1, amount: Amount(100_000_000_000_000) }, }], }; let block = provide_batch(&serai, batch).await; let mut events = serai.as_of(block).dex().events().await.unwrap(); events.retain(|e| matches!(e, DexEvent::SwapExecuted { .. })); let path = BoundedVec::try_from(vec![coin1.into(), Coin::Serai]).unwrap(); assert_eq!( events, vec![DexEvent::SwapExecuted { who: IN_INSTRUCTION_EXECUTOR, send_to: out_address.as_native().unwrap(), path, amount_in: 100_000_000_000_000, amount_out: 1_762_662_819 }] ); } }) ); ================================================ FILE: substrate/client/tests/dht.rs ================================================ use serai_client::{primitives::ExternalNetworkId, Serai}; #[tokio::test] async fn dht() { use dockertest::{ PullPolicy, StartPolicy, LogOptions, LogAction, LogPolicy, LogSource, Image, TestBodySpecification, DockerTest, }; serai_docker_tests::build("serai".to_string()); let handle = |name: &str| format!("serai_client-serai_node-{name}"); let composition = |name: &str| { TestBodySpecification::with_image( Image::with_repository("serai-dev-serai").pull_policy(PullPolicy::Never), ) .replace_env( [("SERAI_NAME".to_string(), name.to_string()), ("KEY".to_string(), " ".to_string())].into(), ) .set_publish_all_ports(true) .set_handle(handle(name)) .set_start_policy(StartPolicy::Strict) .set_log_options(Some(LogOptions { action: LogAction::Forward, policy: LogPolicy::Always, source: LogSource::Both, })) }; let mut test = DockerTest::new().with_network(dockertest::Network::Isolated); test.provide_container(composition("alice")); test.provide_container(composition("bob")); test.provide_container(composition("charlie")); test.provide_container(composition("dave")); test .run_async(|ops| async move { // Sleep until the Substrate RPC starts let alice = handle("alice"); let serai_rpc = ops.handle(&alice).host_port(9944).unwrap(); let serai_rpc = format!("http://{}:{}", serai_rpc.0, serai_rpc.1); // Sleep for a minute tokio::time::sleep(core::time::Duration::from_secs(60)).await; // Check the DHT has been populated assert!(!Serai::new(serai_rpc.clone()) .await .unwrap() .p2p_validators(ExternalNetworkId::Bitcoin.into()) .await .unwrap() .is_empty()); }) .await; } ================================================ FILE: substrate/client/tests/emissions.rs ================================================ use std::{time::Duration, collections::HashMap}; use rand_core::{RngCore, OsRng}; use serai_client::TemporalSerai; use serai_abi::{ emissions::primitives::{INITIAL_REWARD_PER_BLOCK, SECURE_BY}, in_instructions::primitives::Batch, primitives::{ BlockHash, ExternalBalance, ExternalCoin, ExternalNetworkId, EXTERNAL_NETWORKS, FAST_EPOCH_DURATION, FAST_EPOCH_INITIAL_PERIOD, NETWORKS, TARGET_BLOCK_TIME, Amount, NetworkId, }, validator_sets::primitives::Session, }; use serai_client::Serai; mod common; use common::{genesis_liquidity::set_up_genesis, in_instructions::provide_batch}; serai_test_fast_epoch!( emissions: (|serai: Serai| async move { test_emissions(serai).await; }) ); async fn send_batches(serai: &Serai, ids: &mut HashMap) { for network in EXTERNAL_NETWORKS { // set up batch id ids .entry(network) .and_modify(|v| { *v += 1; }) .or_insert(0); // set up block hash let mut block = BlockHash([0; 32]); OsRng.fill_bytes(&mut block.0); provide_batch(serai, Batch { network, id: ids[&network], block, instructions: vec![] }).await; } } async fn test_emissions(serai: Serai) { // set up the genesis let values = HashMap::from([ (ExternalCoin::Monero, 184100), (ExternalCoin::Ether, 4785000), (ExternalCoin::Dai, 1500), ]); let (_, mut batch_ids) = set_up_genesis(&serai, &values).await; // wait until genesis is complete let mut genesis_complete_block = None; while genesis_complete_block.is_none() { tokio::time::sleep(Duration::from_secs(1)).await; genesis_complete_block = serai .as_of_latest_finalized_block() .await .unwrap() .genesis_liquidity() .genesis_complete_block() .await .unwrap(); } for _ in 0 .. 3 { // get current stakes let mut current_stake = HashMap::new(); for n in NETWORKS { // TODO: investigate why serai network TAS isn't visible at session 0. let stake = serai .as_of_latest_finalized_block() .await .unwrap() .validator_sets() .total_allocated_stake(n) .await .unwrap() .unwrap_or(Amount(0)) .0; current_stake.insert(n, stake); } // wait for a session change let current_session = wait_for_session_change(&serai).await; // get last block let last_block = serai.latest_finalized_block().await.unwrap(); let serai_latest = serai.as_of(last_block.hash()); let change_block_number = last_block.number(); // get distances to ec security & block count of the previous session let (distances, total_distance) = get_distances(&serai_latest, ¤t_stake).await; let block_count = get_session_blocks(&serai_latest, current_session - 1).await; // calculate how much reward in this session let reward_this_epoch = if change_block_number < (genesis_complete_block.unwrap() + FAST_EPOCH_INITIAL_PERIOD) { block_count * INITIAL_REWARD_PER_BLOCK } else { let blocks_until = SECURE_BY - change_block_number; let block_reward = total_distance / blocks_until; block_count * block_reward }; let reward_per_network = distances .into_iter() .map(|(n, distance)| { let reward = u64::try_from( u128::from(reward_this_epoch).saturating_mul(u128::from(distance)) / u128::from(total_distance), ) .unwrap(); (n, reward) }) .collect::>(); // retire the prev-set so that TotalAllocatedStake updated. send_batches(&serai, &mut batch_ids).await; for (n, reward) in reward_per_network { let stake = serai .as_of_latest_finalized_block() .await .unwrap() .validator_sets() .total_allocated_stake(n) .await .unwrap() .unwrap_or(Amount(0)) .0; // all reward should automatically staked for the network since we are in initial period. assert_eq!(stake, *current_stake.get(&n).unwrap() + reward); } // TODO: check stake per address? // TODO: check post ec security era } } /// Returns the required stake in terms SRI for a given `Balance`. async fn required_stake(serai: &TemporalSerai<'_>, balance: ExternalBalance) -> u64 { // This is inclusive to an increase in accuracy let sri_per_coin = serai.dex().oracle_value(balance.coin).await.unwrap().unwrap_or(Amount(0)); // See dex-pallet for the reasoning on these let coin_decimals = balance.coin.decimals().max(5); let accuracy_increase = u128::from(10u64.pow(coin_decimals)); let total_coin_value = u64::try_from(u128::from(balance.amount.0) * u128::from(sri_per_coin.0) / accuracy_increase) .unwrap_or(u64::MAX); // required stake formula (COIN_VALUE * 1.5) + margin(20%) let required_stake = total_coin_value.saturating_mul(3).saturating_div(2); required_stake.saturating_add(total_coin_value.saturating_div(5)) } async fn wait_for_session_change(serai: &Serai) -> u32 { let current_session = serai .as_of_latest_finalized_block() .await .unwrap() .validator_sets() .session(NetworkId::Serai) .await .unwrap() .unwrap() .0; let next_session = current_session + 1; // lets wait double the epoch time. tokio::time::timeout( tokio::time::Duration::from_secs(FAST_EPOCH_DURATION * TARGET_BLOCK_TIME * 2), async { while serai .as_of_latest_finalized_block() .await .unwrap() .validator_sets() .session(NetworkId::Serai) .await .unwrap() .unwrap() .0 < next_session { tokio::time::sleep(Duration::from_secs(6)).await; } }, ) .await .unwrap(); next_session } async fn get_distances( serai: &TemporalSerai<'_>, current_stake: &HashMap, ) -> (HashMap, u64) { // we should be in the initial period, so calculate how much each network supposedly get.. // we can check the supply to see how much coin hence liability we have. let mut distances: HashMap = HashMap::new(); let mut total_distance = 0; for n in EXTERNAL_NETWORKS { let mut required = 0; for c in n.coins() { let amount = serai.coins().coin_supply(c.into()).await.unwrap(); required += required_stake(serai, ExternalBalance { coin: c, amount }).await; } let mut current = *current_stake.get(&n.into()).unwrap(); if current > required { current = required; } let distance = required - current; total_distance += distance; distances.insert(n.into(), distance); } // add serai network portion(20%) let new_total_distance = total_distance.saturating_mul(10) / 8; distances.insert(NetworkId::Serai, new_total_distance - total_distance); total_distance = new_total_distance; (distances, total_distance) } async fn get_session_blocks(serai: &TemporalSerai<'_>, session: u32) -> u64 { let begin_block = serai .validator_sets() .session_begin_block(NetworkId::Serai, Session(session)) .await .unwrap() .unwrap(); let next_begin_block = serai .validator_sets() .session_begin_block(NetworkId::Serai, Session(session + 1)) .await .unwrap() .unwrap(); next_begin_block.saturating_sub(begin_block) } ================================================ FILE: substrate/client/tests/genesis_liquidity.rs ================================================ use std::{time::Duration, collections::HashMap}; use serai_client::Serai; use serai_abi::primitives::{Amount, Coin, ExternalCoin, COINS, EXTERNAL_COINS, GENESIS_SRI}; use serai_client::genesis_liquidity::primitives::{ GENESIS_LIQUIDITY_ACCOUNT, INITIAL_GENESIS_LP_SHARES, }; mod common; use common::genesis_liquidity::set_up_genesis; serai_test_fast_epoch!( genesis_liquidity: (|serai: Serai| async move { test_genesis_liquidity(serai).await; }) ); pub async fn test_genesis_liquidity(serai: Serai) { // set up the genesis let values = HashMap::from([ (ExternalCoin::Monero, 184100), (ExternalCoin::Ether, 4785000), (ExternalCoin::Dai, 1500), ]); let (accounts, _) = set_up_genesis(&serai, &values).await; // wait until genesis is complete while serai .as_of_latest_finalized_block() .await .unwrap() .genesis_liquidity() .genesis_complete_block() .await .unwrap() .is_none() { tokio::time::sleep(Duration::from_secs(1)).await; } // check total SRI supply is +100M // there are 6 endowed accounts in dev-net. Take this into consideration when checking // for the total sri minted at this time. let serai = serai.as_of_latest_finalized_block().await.unwrap(); let sri = serai.coins().coin_supply(Coin::Serai).await.unwrap(); let endowed_amount: u64 = 1 << 60; let total_sri = (6 * endowed_amount) + GENESIS_SRI; assert_eq!(sri, Amount(total_sri)); // check genesis account has no coins, all transferred to pools. for coin in COINS { let amount = serai.coins().coin_balance(coin, GENESIS_LIQUIDITY_ACCOUNT).await.unwrap(); assert_eq!(amount.0, 0); } // check pools has proper liquidity let mut pool_amounts = HashMap::new(); let mut total_value = 0u128; for coin in EXTERNAL_COINS { let total_coin = accounts[&coin].iter().fold(0u128, |acc, value| acc + u128::from(value.1 .0)); let value = if coin != ExternalCoin::Bitcoin { (total_coin * u128::from(values[&coin])) / 10u128.pow(coin.decimals()) } else { total_coin }; total_value += value; pool_amounts.insert(coin, (total_coin, value)); } // check distributed SRI per pool let mut total_sri_distributed = 0u128; for coin in EXTERNAL_COINS { let sri = if coin == *EXTERNAL_COINS.last().unwrap() { u128::from(GENESIS_SRI).checked_sub(total_sri_distributed).unwrap() } else { (pool_amounts[&coin].1 * u128::from(GENESIS_SRI)) / total_value }; total_sri_distributed += sri; let reserves = serai.dex().get_reserves(coin).await.unwrap().unwrap(); assert_eq!(u128::from(reserves.0 .0), pool_amounts[&coin].0); // coin side assert_eq!(u128::from(reserves.1 .0), sri); // SRI side } // check each liquidity provider got liquidity tokens proportional to their value for coin in EXTERNAL_COINS { let liq_supply = serai.genesis_liquidity().supply(coin).await.unwrap(); for (acc, amount) in &accounts[&coin] { let acc_liq_shares = serai.genesis_liquidity().liquidity(acc, coin).await.unwrap().shares; // since we can't test the ratios directly(due to integer division giving 0) // we test whether they give the same result when multiplied by another constant. // Following test ensures the account in fact has the right amount of shares. let mut shares_ratio = (INITIAL_GENESIS_LP_SHARES * acc_liq_shares) / liq_supply.shares; let amounts_ratio = (INITIAL_GENESIS_LP_SHARES * amount.0) / u64::try_from(pool_amounts[&coin].0).unwrap(); // we can tolerate 1 unit diff between them due to integer division. if shares_ratio.abs_diff(amounts_ratio) == 1 { shares_ratio = amounts_ratio; } assert_eq!(shares_ratio, amounts_ratio); } } // TODO: test remove the liq before/after genesis ended. } ================================================ FILE: substrate/client/tests/time.rs ================================================ use std::time::{Duration, SystemTime}; use tokio::time::sleep; use serai_client::Serai; mod common; serai_test!( time: (|serai: Serai| async move { let mut number = serai.latest_finalized_block().await.unwrap().number(); let mut done = 0; while done < 3 { // Wait for the next block let block = serai.latest_finalized_block().await.unwrap(); if block.number() == number { sleep(Duration::from_secs(1)).await; continue; } number = block.number(); // Make sure the time we extract from the block is within 5 seconds of now let now = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs(); assert!(now.saturating_sub(block.time().unwrap()) < 5); done += 1; } }) ); ================================================ FILE: substrate/client/tests/validator_sets.rs ================================================ use rand_core::{RngCore, OsRng}; use sp_core::{ sr25519::{Public, Pair}, Pair as PairTrait, }; use serai_client::{ primitives::{ NETWORKS, NetworkId, BlockHash, insecure_pair_from_name, FAST_EPOCH_DURATION, TARGET_BLOCK_TIME, ExternalNetworkId, Amount, }, validator_sets::{ primitives::{Session, ValidatorSet, ExternalValidatorSet, KeyPair}, ValidatorSetsEvent, }, in_instructions::{ primitives::{Batch, SignedBatch, batch_message}, SeraiInInstructions, }, Serai, }; mod common; use common::{ tx::publish_tx, validator_sets::{allocate_stake, deallocate_stake, set_keys}, }; fn get_random_key_pair() -> KeyPair { let mut ristretto_key = [0; 32]; OsRng.fill_bytes(&mut ristretto_key); let mut external_key = vec![0; 33]; OsRng.fill_bytes(&mut external_key); KeyPair(Public::from(ristretto_key), external_key.try_into().unwrap()) } async fn get_ordered_keys(serai: &Serai, network: NetworkId, accounts: &[Pair]) -> Vec { // retrieve the current session validators so that we know the order of the keys // that is necessary for the correct musig signature. let validators = serai .as_of_latest_finalized_block() .await .unwrap() .validator_sets() .active_network_validators(network) .await .unwrap(); // collect the pairs of the validators let mut pairs = vec![]; for v in validators { let p = accounts.iter().find(|pair| pair.public() == v).unwrap().clone(); pairs.push(p); } pairs } serai_test!( set_keys_test: (|serai: Serai| async move { let network = ExternalNetworkId::Bitcoin; let set = ExternalValidatorSet { session: Session(0), network }; let pair = insecure_pair_from_name("Alice"); let public = pair.public(); // Neither of these keys are validated // The external key is infeasible to validate on-chain, the Ristretto key is feasible // TODO: Should the Ristretto key be validated? let key_pair = get_random_key_pair(); // Make sure the genesis is as expected assert_eq!( serai .as_of(serai.finalized_block_by_number(0).await.unwrap().unwrap().hash()) .validator_sets() .new_set_events() .await .unwrap(), NETWORKS .iter() .copied() .map(|network| ValidatorSetsEvent::NewSet { set: ValidatorSet { session: Session(0), network } }) .collect::>(), ); { let vs_serai = serai.as_of_latest_finalized_block().await.unwrap(); let vs_serai = vs_serai.validator_sets(); let participants = vs_serai.participants(set.network.into()).await .unwrap() .unwrap() .into_iter() .map(|(k, _)| k) .collect::>(); let participants_ref: &[_] = participants.as_ref(); assert_eq!(participants_ref, [public].as_ref()); } let block = set_keys(&serai, set, key_pair.clone(), &[pair]).await; // While the set_keys function should handle this, it's beneficial to // independently test it let serai = serai.as_of(block); let serai = serai.validator_sets(); assert_eq!( serai.key_gen_events().await.unwrap(), vec![ValidatorSetsEvent::KeyGen { set, key_pair: key_pair.clone() }] ); assert_eq!(serai.keys(set).await.unwrap(), Some(key_pair)); }) ); #[tokio::test] async fn validator_set_rotation() { use dockertest::{ PullPolicy, StartPolicy, LogOptions, LogAction, LogPolicy, LogSource, Image, TestBodySpecification, DockerTest, }; use std::collections::HashMap; serai_docker_tests::build("serai-fast-epoch".to_string()); let handle = |name| format!("serai_client-serai_node-{name}"); let composition = |name| { TestBodySpecification::with_image( Image::with_repository("serai-dev-serai-fast-epoch").pull_policy(PullPolicy::Never), ) .replace_cmd(vec![ "serai-node".to_string(), "--unsafe-rpc-external".to_string(), "--rpc-cors".to_string(), "all".to_string(), "--chain".to_string(), "local".to_string(), format!("--{name}"), ]) .replace_env(HashMap::from([ ("RUST_LOG".to_string(), "runtime=debug".to_string()), ("KEY".to_string(), " ".to_string()), ])) .set_publish_all_ports(true) .set_handle(handle(name)) .set_start_policy(StartPolicy::Strict) .set_log_options(Some(LogOptions { action: LogAction::Forward, policy: LogPolicy::Always, source: LogSource::Both, })) }; let mut test = DockerTest::new().with_network(dockertest::Network::Isolated); test.provide_container(composition("alice")); test.provide_container(composition("bob")); test.provide_container(composition("charlie")); test.provide_container(composition("dave")); test.provide_container(composition("eve")); test .run_async(|ops| async move { // Sleep until the Substrate RPC starts let alice = handle("alice"); let alice_rpc = ops.handle(&alice).host_port(9944).unwrap(); let alice_rpc = format!("http://{}:{}", alice_rpc.0, alice_rpc.1); // Sleep for some time tokio::time::sleep(core::time::Duration::from_secs(20)).await; let serai = Serai::new(alice_rpc.clone()).await.unwrap(); // Make sure the genesis is as expected assert_eq!( serai .as_of(serai.finalized_block_by_number(0).await.unwrap().unwrap().hash()) .validator_sets() .new_set_events() .await .unwrap(), NETWORKS .iter() .copied() .map(|network| ValidatorSetsEvent::NewSet { set: ValidatorSet { session: Session(0), network } }) .collect::>(), ); // genesis accounts let accounts = vec![ insecure_pair_from_name("Alice"), insecure_pair_from_name("Bob"), insecure_pair_from_name("Charlie"), insecure_pair_from_name("Dave"), insecure_pair_from_name("Eve"), ]; // amounts for single key share per network let key_shares = HashMap::from([ (NetworkId::Serai, Amount(50_000 * 10_u64.pow(8))), (NetworkId::External(ExternalNetworkId::Bitcoin), Amount(1_000_000 * 10_u64.pow(8))), (NetworkId::External(ExternalNetworkId::Monero), Amount(100_000 * 10_u64.pow(8))), (NetworkId::External(ExternalNetworkId::Ethereum), Amount(1_000_000 * 10_u64.pow(8))), ]); // genesis participants per network #[allow(clippy::redundant_closure_for_method_calls)] let default_participants = accounts[.. 4].to_vec().iter().map(|pair| pair.public()).collect::>(); let mut participants = HashMap::from([ (NetworkId::Serai, default_participants.clone()), (NetworkId::External(ExternalNetworkId::Bitcoin), default_participants.clone()), (NetworkId::External(ExternalNetworkId::Monero), default_participants.clone()), (NetworkId::External(ExternalNetworkId::Ethereum), default_participants), ]); // test the set rotation for (i, network) in NETWORKS.into_iter().enumerate() { let participants = participants.get_mut(&network).unwrap(); // we start the chain with 4 default participants that has a single key share each participants.sort(); verify_session_and_active_validators(&serai, network, 0, participants).await; // add 1 participant let last_participant = accounts[4].clone(); let hash = allocate_stake( &serai, network, key_shares[&network], &last_participant, i.try_into().unwrap(), ) .await; participants.push(last_participant.public()); // the session at which set changes becomes active let activation_session = get_session_at_which_changes_activate(&serai, network, hash).await; // set the keys if it is an external set if network != NetworkId::Serai { let set = ExternalValidatorSet { session: Session(0), network: network.try_into().unwrap() }; let key_pair = get_random_key_pair(); let pairs = get_ordered_keys(&serai, network, &accounts).await; set_keys(&serai, set, key_pair, &pairs).await; } // verify participants.sort(); verify_session_and_active_validators(&serai, network, activation_session, participants) .await; // remove 1 participant let participant_to_remove = accounts[1].clone(); let hash = deallocate_stake( &serai, network, key_shares[&network], &participant_to_remove, i.try_into().unwrap(), ) .await; participants.swap_remove( participants.iter().position(|k| *k == participant_to_remove.public()).unwrap(), ); let activation_session = get_session_at_which_changes_activate(&serai, network, hash).await; if network != NetworkId::Serai { // set the keys if it is an external set let set = ExternalValidatorSet { session: Session(1), network: network.try_into().unwrap() }; // we need the whole substrate key pair to sign the batch let (substrate_pair, key_pair) = { let pair = insecure_pair_from_name("session-1-key-pair"); let public = pair.public(); let mut external_key = vec![0; 33]; OsRng.fill_bytes(&mut external_key); (pair, KeyPair(public, external_key.try_into().unwrap())) }; let pairs = get_ordered_keys(&serai, network, &accounts).await; set_keys(&serai, set, key_pair, &pairs).await; // provide a batch to complete the handover and retire the previous set let mut block_hash = BlockHash([0; 32]); OsRng.fill_bytes(&mut block_hash.0); let batch = Batch { network: network.try_into().unwrap(), id: 0, block: block_hash, instructions: vec![], }; publish_tx( &serai, &SeraiInInstructions::execute_batch(SignedBatch { batch: batch.clone(), signature: substrate_pair.sign(&batch_message(&batch)), }), ) .await; } // verify participants.sort(); verify_session_and_active_validators(&serai, network, activation_session, participants) .await; // check pending deallocations let pending = serai .as_of_latest_finalized_block() .await .unwrap() .validator_sets() .pending_deallocations( network, participant_to_remove.public(), Session(activation_session + 1), ) .await .unwrap(); assert_eq!(pending, Some(key_shares[&network])); } }) .await; } async fn session_for_block(serai: &Serai, block: [u8; 32], network: NetworkId) -> u32 { serai.as_of(block).validator_sets().session(network).await.unwrap().unwrap().0 } async fn verify_session_and_active_validators( serai: &Serai, network: NetworkId, session: u32, participants: &[Public], ) { // wait until the active session. let block = tokio::time::timeout( core::time::Duration::from_secs(FAST_EPOCH_DURATION * TARGET_BLOCK_TIME * 2), async move { loop { let mut block = serai.latest_finalized_block_hash().await.unwrap(); if session_for_block(serai, block, network).await < session { // Sleep a block tokio::time::sleep(core::time::Duration::from_secs(TARGET_BLOCK_TIME)).await; continue; } while session_for_block(serai, block, network).await > session { block = serai.block(block).await.unwrap().unwrap().header.parent_hash.0; } assert_eq!(session_for_block(serai, block, network).await, session); break block; } }, ) .await .unwrap(); let serai_for_block = serai.as_of(block); // verify session let s = serai_for_block.validator_sets().session(network).await.unwrap().unwrap(); assert_eq!(s.0, session); // verify participants let mut validators = serai_for_block.validator_sets().active_network_validators(network).await.unwrap(); validators.sort(); assert_eq!(validators, participants); // make sure finalization continues as usual after the changes let current_finalized_block = serai.latest_finalized_block().await.unwrap().header.number; tokio::time::timeout(core::time::Duration::from_secs(TARGET_BLOCK_TIME * 10), async move { let mut finalized_block = serai.latest_finalized_block().await.unwrap().header.number; while finalized_block <= current_finalized_block + 2 { tokio::time::sleep(core::time::Duration::from_secs(TARGET_BLOCK_TIME)).await; finalized_block = serai.latest_finalized_block().await.unwrap().header.number; } }) .await .unwrap(); // TODO: verify key shares as well? } async fn get_session_at_which_changes_activate( serai: &Serai, network: NetworkId, hash: [u8; 32], ) -> u32 { let session = session_for_block(serai, hash, network).await; // changes should be active in the next session if network == NetworkId::Serai { // it takes 1 extra session for serai net to make the changes active. session + 2 } else { session + 1 } } ================================================ FILE: substrate/coins/pallet/Cargo.toml ================================================ [package] name = "serai-coins-pallet" version = "0.1.0" description = "Coins pallet for Serai" license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/substrate/coins/pallet" authors = ["Akil Demir "] edition = "2021" rust-version = "1.74" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [package.metadata.cargo-machete] ignored = ["scale"] [lints] workspace = true [dependencies] scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive"] } frame-system = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } frame-support = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-core = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-std = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-runtime = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } pallet-transaction-payment = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } serai-primitives = { path = "../../primitives", default-features = false, features = ["serde"] } coins-primitives = { package = "serai-coins-primitives", path = "../primitives", default-features = false } [dev-dependencies] sp-io = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false, features = ["std"] } [features] std = [ "frame-system/std", "frame-support/std", "sp-core/std", "sp-std/std", "sp-runtime/std", "pallet-transaction-payment/std", "serai-primitives/std", "coins-primitives/std", ] try-runtime = [ "frame-system/try-runtime", "frame-support/try-runtime", "sp-runtime/try-runtime", ] runtime-benchmarks = [ "frame-system/runtime-benchmarks", "frame-support/runtime-benchmarks", ] default = ["std"] ================================================ FILE: substrate/coins/pallet/LICENSE ================================================ AGPL-3.0-only license Copyright (c) 2023 Luke Parker This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License Version 3 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see . ================================================ FILE: substrate/coins/pallet/src/lib.rs ================================================ #![cfg_attr(not(feature = "std"), no_std)] #[cfg(test)] mod mock; #[cfg(test)] mod tests; use serai_primitives::{Balance, Coin, ExternalBalance, SubstrateAmount}; pub trait AllowMint { fn is_allowed(balance: &ExternalBalance) -> bool; } impl AllowMint for () { fn is_allowed(_: &ExternalBalance) -> bool { true } } // TODO: Investigate why Substrate generates this #[allow(unreachable_patterns, clippy::cast_possible_truncation)] #[frame_support::pallet] pub mod pallet { use super::*; use sp_std::{vec::Vec, any::TypeId}; use sp_core::sr25519::Public; use sp_runtime::{ traits::{DispatchInfoOf, PostDispatchInfoOf}, transaction_validity::{TransactionValidityError, InvalidTransaction}, }; use frame_system::pallet_prelude::*; use frame_support::pallet_prelude::*; use pallet_transaction_payment::{Config as TpConfig, OnChargeTransaction}; use serai_primitives::*; pub use coins_primitives as primitives; use primitives::*; type LiquidityTokensInstance = crate::Instance1; #[pallet::config] pub trait Config: frame_system::Config { type AllowMint: AllowMint; } #[pallet::genesis_config] #[derive(Clone, Debug)] pub struct GenesisConfig, I: 'static = ()> { pub accounts: Vec<(T::AccountId, Balance)>, pub _ignore: PhantomData, } impl, I: 'static> Default for GenesisConfig { fn default() -> Self { GenesisConfig { accounts: Default::default(), _ignore: Default::default() } } } #[pallet::error] pub enum Error { AmountOverflowed, NotEnoughCoins, BurnWithInstructionNotAllowed, MintNotAllowed, } #[pallet::event] #[pallet::generate_deposit(fn deposit_event)] pub enum Event, I: 'static = ()> { Mint { to: Public, balance: Balance }, Burn { from: Public, balance: Balance }, BurnWithInstruction { from: Public, instruction: OutInstructionWithBalance }, Transfer { from: Public, to: Public, balance: Balance }, } #[pallet::pallet] pub struct Pallet(_); /// The amount of coins each account has. // Identity is used as the second key's hasher due to it being a non-manipulatable fixed-space // ID. #[pallet::storage] #[pallet::getter(fn balances)] pub type Balances, I: 'static = ()> = StorageDoubleMap<_, Blake2_128Concat, Public, Identity, Coin, SubstrateAmount, ValueQuery>; /// The total supply of each coin. // We use Identity type here again due to reasons stated in the Balances Storage. #[pallet::storage] #[pallet::getter(fn supply)] pub type Supply, I: 'static = ()> = StorageMap<_, Identity, Coin, SubstrateAmount, ValueQuery>; #[pallet::genesis_build] impl, I: 'static> BuildGenesisConfig for GenesisConfig { fn build(&self) { // initialize the supply of the coins // TODO: Don't use COINS yet GenesisConfig so we can safely expand COINS for c in &COINS { Supply::::set(c, 0); } // initialize the genesis accounts for (account, balance) in &self.accounts { Pallet::::mint(*account, *balance).unwrap(); } } } #[pallet::hooks] impl, I: 'static> Hooks> for Pallet { fn on_initialize(_: BlockNumberFor) -> Weight { // burn the fees collected previous block let coin = Coin::Serai; let amount = Self::balance(FEE_ACCOUNT.into(), coin); // we can unwrap, we are not burning more then what we have // If this errors, it'll halt the runtime however (due to being called at the start of every // block), requiring extra care when reviewing Self::burn_internal(FEE_ACCOUNT.into(), Balance { coin, amount }).unwrap(); Weight::zero() // TODO } } impl, I: 'static> Pallet { /// Returns the balance of a given account for `coin`. pub fn balance(of: Public, coin: Coin) -> Amount { Amount(Self::balances(of, coin)) } fn decrease_balance_internal(from: Public, balance: Balance) -> Result<(), Error> { let coin = &balance.coin; // sub amount from account let new_amount = Self::balances(from, coin) .checked_sub(balance.amount.0) .ok_or(Error::::NotEnoughCoins)?; // save if new_amount == 0 { Balances::::remove(from, coin); } else { Balances::::set(from, coin, new_amount); } Ok(()) } fn increase_balance_internal(to: Public, balance: Balance) -> Result<(), Error> { let coin = &balance.coin; // add amount to account let new_amount = Self::balances(to, coin) .checked_add(balance.amount.0) .ok_or(Error::::AmountOverflowed)?; // save Balances::::set(to, coin, new_amount); Ok(()) } /// Mint `balance` to the given account. /// /// Errors if any amount overflows. pub fn mint(to: Public, balance: Balance) -> Result<(), Error> { // If the coin isn't Serai, which we're always allowed to mint, and the mint isn't explicitly // allowed, error if !ExternalCoin::try_from(balance.coin) .map(|coin| T::AllowMint::is_allowed(&ExternalBalance { coin, amount: balance.amount })) .unwrap_or(true) { Err(Error::::MintNotAllowed)?; } // update the balance Self::increase_balance_internal(to, balance)?; // update the supply let new_supply = Self::supply(balance.coin) .checked_add(balance.amount.0) .ok_or(Error::::AmountOverflowed)?; Supply::::set(balance.coin, new_supply); Self::deposit_event(Event::Mint { to, balance }); Ok(()) } /// Burn `balance` from the specified account. fn burn_internal(from: Public, balance: Balance) -> Result<(), Error> { // don't waste time if amount == 0 if balance.amount.0 == 0 { return Ok(()); } // update the balance Self::decrease_balance_internal(from, balance)?; // update the supply let new_supply = Self::supply(balance.coin).checked_sub(balance.amount.0).unwrap(); Supply::::set(balance.coin, new_supply); Ok(()) } /// Transfer `balance` from `from` to `to`. pub fn transfer_internal( from: Public, to: Public, balance: Balance, ) -> Result<(), Error> { // update balances of accounts Self::decrease_balance_internal(from, balance)?; Self::increase_balance_internal(to, balance)?; Self::deposit_event(Event::Transfer { from, to, balance }); Ok(()) } } #[pallet::call] impl, I: 'static> Pallet { #[pallet::call_index(0)] #[pallet::weight((0, DispatchClass::Normal))] // TODO pub fn transfer(origin: OriginFor, to: Public, balance: Balance) -> DispatchResult { let from = ensure_signed(origin)?; Self::transfer_internal(from, to, balance)?; Ok(()) } /// Burn `balance` from the caller. #[pallet::call_index(1)] #[pallet::weight((0, DispatchClass::Normal))] // TODO pub fn burn(origin: OriginFor, balance: Balance) -> DispatchResult { let from = ensure_signed(origin)?; Self::burn_internal(from, balance)?; Self::deposit_event(Event::Burn { from, balance }); Ok(()) } /// Burn `balance` with `OutInstructionWithBalance` from the caller. #[pallet::call_index(2)] #[pallet::weight((0, DispatchClass::Normal))] // TODO pub fn burn_with_instruction( origin: OriginFor, instruction: OutInstructionWithBalance, ) -> DispatchResult { if TypeId::of::() == TypeId::of::() { Err(Error::::BurnWithInstructionNotAllowed)?; } let from = ensure_signed(origin)?; Self::burn_internal(from, instruction.balance.into())?; Self::deposit_event(Event::BurnWithInstruction { from, instruction }); Ok(()) } } impl OnChargeTransaction for Pallet where T: TpConfig, { type Balance = SubstrateAmount; type LiquidityInfo = Option; fn withdraw_fee( who: &Public, _call: &T::RuntimeCall, _dispatch_info: &DispatchInfoOf, fee: Self::Balance, _tip: Self::Balance, ) -> Result { if fee == 0 { return Ok(None); } let balance = Balance { coin: Coin::Serai, amount: Amount(fee) }; match Self::transfer_internal(*who, FEE_ACCOUNT.into(), balance) { Err(_) => Err(InvalidTransaction::Payment)?, Ok(()) => Ok(Some(fee)), } } fn can_withdraw_fee( who: &Public, _call: &T::RuntimeCall, _dispatch_info: &DispatchInfoOf, fee: Self::Balance, _tip: Self::Balance, ) -> Result<(), TransactionValidityError> { if fee == 0 { return Ok(()); } if Self::balance(*who, Coin::Serai).0 < fee { Err(InvalidTransaction::Payment)?; } Ok(()) } fn correct_and_deposit_fee( who: &Public, _dispatch_info: &DispatchInfoOf, _post_info: &PostDispatchInfoOf, corrected_fee: Self::Balance, _tip: Self::Balance, already_withdrawn: Self::LiquidityInfo, ) -> Result<(), TransactionValidityError> { if let Some(paid) = already_withdrawn { let refund_amount = paid.saturating_sub(corrected_fee); let balance = Balance { coin: Coin::Serai, amount: Amount(refund_amount) }; Self::transfer_internal(FEE_ACCOUNT.into(), *who, balance) .map_err(|_| TransactionValidityError::Invalid(InvalidTransaction::Payment))?; } Ok(()) } } } pub use pallet::*; ================================================ FILE: substrate/coins/pallet/src/mock.rs ================================================ //! Test environment for Coins pallet. use super::*; use frame_support::{construct_runtime, derive_impl}; use sp_core::sr25519::Public; use sp_runtime::{traits::IdentityLookup, BuildStorage}; use crate as coins; type Block = frame_system::mocking::MockBlock; construct_runtime!( pub enum Test { System: frame_system, Coins: coins, } ); #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { type AccountId = Public; type Lookup = IdentityLookup; type Block = Block; } impl Config for Test { type AllowMint = (); } pub(crate) fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); crate::GenesisConfig:: { accounts: vec![], _ignore: Default::default() } .assimilate_storage(&mut t) .unwrap(); let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| System::set_block_number(0)); ext } ================================================ FILE: substrate/coins/pallet/src/tests.rs ================================================ use crate::{mock::*, primitives::*}; use frame_system::RawOrigin; use sp_core::Pair; use serai_primitives::*; pub type CoinsEvent = crate::Event; #[test] fn mint() { new_test_ext().execute_with(|| { // minting u64::MAX should work let coin = Coin::Serai; let to = insecure_pair_from_name("random1").public(); let balance = Balance { coin, amount: Amount(u64::MAX) }; Coins::mint(to, balance).unwrap(); assert_eq!(Coins::balance(to, coin), balance.amount); // minting more should fail assert!(Coins::mint(to, Balance { coin, amount: Amount(1) }).is_err()); // supply now should be equal to sum of the accounts balance sum assert_eq!(Coins::supply(coin), balance.amount.0); // test events let mint_events = System::events() .iter() .filter_map(|event| { if let RuntimeEvent::Coins(e) = &event.event { if matches!(e, CoinsEvent::Mint { .. }) { Some(e.clone()) } else { None } } else { None } }) .collect::>(); assert_eq!(mint_events, vec![CoinsEvent::Mint { to, balance }]); }) } #[test] fn burn_with_instruction() { new_test_ext().execute_with(|| { // mint some coin let coin = Coin::External(ExternalCoin::Bitcoin); let to = insecure_pair_from_name("random1").public(); let balance = Balance { coin, amount: Amount(10 * 10u64.pow(coin.decimals())) }; Coins::mint(to, balance).unwrap(); assert_eq!(Coins::balance(to, coin), balance.amount); assert_eq!(Coins::supply(coin), balance.amount.0); // we shouldn't be able to burn more than what we have let mut instruction = OutInstructionWithBalance { instruction: OutInstruction { address: ExternalAddress::new(vec![]).unwrap(), data: None }, balance: ExternalBalance { coin: coin.try_into().unwrap(), amount: Amount(balance.amount.0 + 1), }, }; assert!( Coins::burn_with_instruction(RawOrigin::Signed(to).into(), instruction.clone()).is_err() ); // it should now work instruction.balance.amount = balance.amount; Coins::burn_with_instruction(RawOrigin::Signed(to).into(), instruction.clone()).unwrap(); // balance & supply now should be back to 0 assert_eq!(Coins::balance(to, coin), Amount(0)); assert_eq!(Coins::supply(coin), 0); let burn_events = System::events() .iter() .filter_map(|event| { if let RuntimeEvent::Coins(e) = &event.event { if matches!(e, CoinsEvent::BurnWithInstruction { .. }) { Some(e.clone()) } else { None } } else { None } }) .collect::>(); assert_eq!(burn_events, vec![CoinsEvent::BurnWithInstruction { from: to, instruction }]); }) } #[test] fn transfer() { new_test_ext().execute_with(|| { // mint some coin let coin = Coin::External(ExternalCoin::Bitcoin); let from = insecure_pair_from_name("random1").public(); let balance = Balance { coin, amount: Amount(10 * 10u64.pow(coin.decimals())) }; Coins::mint(from, balance).unwrap(); assert_eq!(Coins::balance(from, coin), balance.amount); assert_eq!(Coins::supply(coin), balance.amount.0); // we can't send more than what we have let to = insecure_pair_from_name("random2").public(); assert!(Coins::transfer( RawOrigin::Signed(from).into(), to, Balance { coin, amount: Amount(balance.amount.0 + 1) } ) .is_err()); // we can send it all Coins::transfer(RawOrigin::Signed(from).into(), to, balance).unwrap(); // check the balances assert_eq!(Coins::balance(from, coin), Amount(0)); assert_eq!(Coins::balance(to, coin), balance.amount); // supply shouldn't change assert_eq!(Coins::supply(coin), balance.amount.0); }) } ================================================ FILE: substrate/coins/primitives/Cargo.toml ================================================ [package] name = "serai-coins-primitives" version = "0.1.0" description = "Serai coins primitives" license = "MIT" authors = ["Luke Parker "] edition = "2021" rust-version = "1.74" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] zeroize = { version = "^1.5", features = ["derive"], optional = true } borsh = { version = "1", default-features = false, features = ["derive", "de_strict_order"], optional = true } serde = { version = "1", default-features = false, features = ["derive", "alloc"], optional = true } scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive"] } serai-primitives = { path = "../../primitives", default-features = false } [dev-dependencies] sp-runtime = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } [features] std = ["zeroize", "borsh?/std", "serde?/std", "scale/std", "sp-runtime/std", "serai-primitives/std"] borsh = ["dep:borsh", "serai-primitives/borsh"] serde = ["dep:serde", "serai-primitives/serde"] default = ["std"] ================================================ FILE: substrate/coins/primitives/LICENSE ================================================ MIT License Copyright (c) 2023 Luke Parker Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: substrate/coins/primitives/src/lib.rs ================================================ #![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(not(feature = "std"), no_std)] #![expect(clippy::cast_possible_truncation)] #[cfg(feature = "std")] use zeroize::Zeroize; #[cfg(feature = "borsh")] use borsh::{BorshSerialize, BorshDeserialize}; #[cfg(feature = "serde")] use serde::{Serialize, Deserialize}; use scale::{Encode, Decode, DecodeWithMemTracking, MaxEncodedLen}; use serai_primitives::{system_address, Data, ExternalAddress, ExternalBalance, SeraiAddress}; pub const FEE_ACCOUNT: SeraiAddress = system_address(b"Coins-fees"); #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen)] #[cfg_attr(feature = "std", derive(Zeroize))] #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct OutInstruction { pub address: ExternalAddress, pub data: Option, } #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen)] #[cfg_attr(feature = "std", derive(Zeroize))] #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct OutInstructionWithBalance { pub instruction: OutInstruction, pub balance: ExternalBalance, } #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen)] #[cfg_attr(feature = "std", derive(Zeroize))] #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub enum Destination { Native(SeraiAddress), External(OutInstruction), } #[test] fn address() { use sp_runtime::traits::TrailingZeroInput; assert_eq!( FEE_ACCOUNT, SeraiAddress::decode(&mut TrailingZeroInput::new(b"Coins-fees")).unwrap() ); } ================================================ FILE: substrate/dex/pallet/Cargo.toml ================================================ [package] name = "serai-dex-pallet" version = "0.1.0" description = "DEX pallet for Serai" license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/substrate/dex/pallet" authors = ["Parity Technologies , Akil Demir "] edition = "2021" rust-version = "1.74" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [package.metadata.cargo-machete] ignored = ["scale"] [lints] workspace = true [dependencies] scale = { package = "parity-scale-codec", version = "3.6.1", default-features = false } sp-std = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-io = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-api = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-runtime = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-core = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } frame-system = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } frame-support = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } frame-benchmarking = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false, optional = true } coins-pallet = { package = "serai-coins-pallet", path = "../../coins/pallet", default-features = false } serai-primitives = { path = "../../primitives", default-features = false } [dev-dependencies] rand_core = { version = "0.6", default-features = false, features = ["getrandom"] } [features] default = ["std"] std = [ "scale/std", "sp-std/std", "sp-io/std", "sp-api/std", "sp-runtime/std", "sp-core/std", "serai-primitives/std", "frame-system/std", "frame-support/std", "frame-benchmarking?/std", "coins-pallet/std", ] runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "frame-system/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-benchmarking/runtime-benchmarks", ] try-runtime = [ "sp-runtime/try-runtime", "frame-system/try-runtime", "frame-support/try-runtime", ] ================================================ FILE: substrate/dex/pallet/LICENSE-AGPL3 ================================================ AGPL-3.0-only license Copyright (c) 2023 Luke Parker This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License Version 3 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see . ================================================ FILE: substrate/dex/pallet/LICENSE-APACHE2 ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. NOTE Individual files contain the following tag instead of the full license text. SPDX-License-Identifier: Apache-2.0 This enables machine processing of license information based on the SPDX License Identifiers that are here available: http://spdx.org/licenses/ ================================================ FILE: substrate/dex/pallet/src/benchmarking.rs ================================================ // This file was originally: // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // It has been forked into a crate distributed under the AGPL 3.0. // Please check the current distribution for up-to-date copyright and licensing information. //! Dex pallet benchmarking. use super::*; use frame_benchmarking::{benchmarks, whitelisted_caller}; use frame_support::{assert_ok, storage::bounded_vec::BoundedVec}; use frame_system::RawOrigin as SystemOrigin; use sp_runtime::traits::StaticLookup; use sp_std::{ops::Div, prelude::*}; use serai_primitives::{Amount, Balance}; use crate::Pallet as Dex; use coins_pallet::Pallet as Coins; const INITIAL_COIN_BALANCE: u64 = 1_000_000_000; type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; type LiquidityTokens = coins_pallet::Pallet; fn create_coin(coin: &ExternalCoin) -> (T::AccountId, AccountIdLookupOf) { let caller: T::AccountId = whitelisted_caller(); let caller_lookup = T::Lookup::unlookup(caller); assert_ok!(Coins::::mint( caller, Balance { coin: Coin::native(), amount: Amount(SubstrateAmount::MAX.div(1000u64)) } )); assert_ok!(Coins::::mint( caller, Balance { coin: (*coin).into(), amount: Amount(INITIAL_COIN_BALANCE) } )); (caller, caller_lookup) } fn create_coin_and_pool( coin: &ExternalCoin, ) -> (ExternalCoin, T::AccountId, AccountIdLookupOf) { let (caller, caller_lookup) = create_coin::(coin); assert_ok!(Dex::::create_pool(*coin)); (*coin, caller, caller_lookup) } benchmarks! { add_liquidity { let coin1 = Coin::native(); let coin2 = ExternalCoin::Bitcoin; let (lp_token, caller, _) = create_coin_and_pool::(&coin2); let add_amount: u64 = 1000; }: _( SystemOrigin::Signed(caller), coin2, 1000u64, add_amount, 0u64, 0u64, caller ) verify { let pool_id = Dex::::get_pool_id(coin1, coin2.into()).unwrap(); let lp_minted = Dex::::calc_lp_amount_for_zero_supply( add_amount, 1000u64, ).unwrap(); assert_eq!( LiquidityTokens::::balance(caller, lp_token.into()).0, lp_minted ); assert_eq!( Coins::::balance(Dex::::get_pool_account(pool_id), Coin::native()).0, add_amount ); assert_eq!( Coins::::balance( Dex::::get_pool_account(pool_id), ExternalCoin::Bitcoin.into(), ).0, 1000 ); } remove_liquidity { let coin1 = Coin::native(); let coin2 = ExternalCoin::Monero; let (lp_token, caller, _) = create_coin_and_pool::(&coin2); let add_amount: u64 = 100; let lp_minted = Dex::::calc_lp_amount_for_zero_supply( add_amount, 1000u64 ).unwrap(); let remove_lp_amount: u64 = lp_minted.checked_div(10).unwrap(); Dex::::add_liquidity( SystemOrigin::Signed(caller).into(), coin2, 1000u64, add_amount, 0u64, 0u64, caller, )?; let total_supply = LiquidityTokens::::supply(Coin::from(lp_token)); }: _( SystemOrigin::Signed(caller), coin2, remove_lp_amount, 0u64, 0u64, caller ) verify { let new_total_supply = LiquidityTokens::::supply(Coin::from(lp_token)); assert_eq!( new_total_supply, total_supply - remove_lp_amount ); } swap_exact_tokens_for_tokens { let native = Coin::native(); let coin1 = ExternalCoin::Bitcoin; let coin2 = ExternalCoin::Ether; let (_, caller, _) = create_coin_and_pool::(&coin1); let (_, _) = create_coin::(&coin2); Dex::::add_liquidity( SystemOrigin::Signed(caller).into(), coin1, 200u64, // TODO: this call otherwise fails with `InsufficientLiquidityMinted` if we don't multiply // with 3. Might be again related to their expectance on ed being > 1. 100 * 3, 0u64, 0u64, caller, )?; let swap_amount = 100u64; // since we only allow the native-coin pools, then the worst case scenario would be to swap // coin1-native-coin2 Dex::::create_pool(coin2)?; Dex::::add_liquidity( SystemOrigin::Signed(caller).into(), coin2, 1000u64, 500, 0u64, 0u64, caller, )?; let path = vec![Coin::from(coin1), native, Coin::from(coin2)]; let path = BoundedVec::<_, T::MaxSwapPathLength>::try_from(path).unwrap(); let native_balance = Coins::::balance(caller, native).0; let coin1_balance = Coins::::balance(caller, ExternalCoin::Bitcoin.into()).0; }: _(SystemOrigin::Signed(caller), path, swap_amount, 1u64, caller) verify { let ed_bump = 2u64; let new_coin1_balance = Coins::::balance(caller, ExternalCoin::Bitcoin.into()).0; assert_eq!(new_coin1_balance, coin1_balance - 100u64); } swap_tokens_for_exact_tokens { let native = Coin::native(); let coin1 = ExternalCoin::Bitcoin; let coin2 = ExternalCoin::Ether; let (_, caller, _) = create_coin_and_pool::(&coin1); let (_, _) = create_coin::(&coin2); Dex::::add_liquidity( SystemOrigin::Signed(caller).into(), coin1, 500u64, 1000, 0u64, 0u64, caller, )?; // since we only allow the native-coin pools, then the worst case scenario would be to swap // coin1-native-coin2 Dex::::create_pool(coin2)?; Dex::::add_liquidity( SystemOrigin::Signed(caller).into(), coin2, 1000u64, 500, 0u64, 0u64, caller, )?; let path = vec![Coin::from(coin1), native, Coin::from(coin2)]; let path: BoundedVec<_, T::MaxSwapPathLength> = BoundedVec::try_from(path).unwrap(); let coin2_balance = Coins::::balance(caller, ExternalCoin::Ether.into()).0; }: _( SystemOrigin::Signed(caller), path.clone(), 100u64, 1000, caller ) verify { let new_coin2_balance = Coins::::balance(caller, ExternalCoin::Ether.into()).0; assert_eq!(new_coin2_balance, coin2_balance + 100u64); } impl_benchmark_test_suite!(Dex, crate::mock::new_test_ext(), crate::mock::Test); } ================================================ FILE: substrate/dex/pallet/src/lib.rs ================================================ // This file was originally: // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // It has been forked into a crate distributed under the AGPL 3.0. // Please check the current distribution for up-to-date copyright and licensing information. //! # Serai Dex pallet //! //! Serai Dex pallet based on the [Uniswap V2](https://github.com/Uniswap/v2-core) logic. //! //! ## Overview //! //! This pallet allows you to: //! //! - [create a liquidity pool](`Pallet::create_pool()`) for 2 coins //! - [provide the liquidity](`Pallet::add_liquidity()`) and receive back an LP token //! - [exchange the LP token back to coins](`Pallet::remove_liquidity()`) //! - [swap a specific amount of coins for another](`Pallet::swap_exact_tokens_for_tokens()`) if //! there is a pool created, or //! - [swap some coins for a specific amount of //! another](`Pallet::swap_tokens_for_exact_tokens()`). //! - [query for an exchange price](`DexApi::quote_price_exact_tokens_for_tokens`) via //! a runtime call endpoint //! - [query the size of a liquidity pool](`DexApi::get_reserves`) via a runtime api //! endpoint. //! //! The `quote_price_exact_tokens_for_tokens` and `quote_price_tokens_for_exact_tokens` functions //! both take a path parameter of the route to take. If you want to swap from native coin to //! non-native coin 1, you would pass in a path of `[DOT, 1]` or `[1, DOT]`. If you want to swap //! from non-native coin 1 to non-native coin 2, you would pass in a path of `[1, DOT, 2]`. //! //! (For an example of configuring this pallet to use `MultiLocation` as an coin id, see the //! cumulus repo). //! //! Here is an example `state_call` that asks for a quote of a pool of native versus coin 1: //! //! ```text //! curl -sS -H "Content-Type: application/json" -d \ //! '{ //! "id": 1, //! "jsonrpc": "2.0", //! "method": "state_call", //! "params": [ //! "DexApi_quote_price_tokens_for_exact_tokens", //! "0x0101000000000000000000000011000000000000000000" //! ] //! }' \ //! http://localhost:9933/ //! ``` //! (This can be run against the kitchen sync node in the `node` folder of this repo.) #![deny(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] use frame_support::traits::DefensiveOption; #[cfg(feature = "runtime-benchmarks")] mod benchmarking; mod types; pub mod weights; #[cfg(test)] mod tests; #[cfg(test)] mod mock; use frame_support::{ensure, pallet_prelude::*, BoundedBTreeSet}; use frame_system::{ pallet_prelude::{BlockNumberFor, OriginFor}, ensure_signed, }; pub use pallet::*; use sp_runtime::{ traits::{TrailingZeroInput, IntegerSquareRoot}, DispatchError, }; use serai_primitives::*; use sp_std::prelude::*; pub use types::*; pub use weights::WeightInfo; // TODO: Investigate why Substrate generates these #[allow( unreachable_patterns, clippy::cast_possible_truncation, clippy::no_effect_underscore_binding )] #[frame_support::pallet] pub mod pallet { use super::*; use sp_core::sr25519::Public; use coins_pallet::{Pallet as CoinsPallet, Config as CoinsConfig}; /// Pool ID. /// /// The pool's `AccountId` is derived from this type. Any changes to the type may necessitate a /// migration. pub type PoolId = ExternalCoin; /// LiquidityTokens Pallet as an instance of coins pallet. pub type LiquidityTokens = coins_pallet::Pallet; /// A type used for amount conversions. pub type HigherPrecisionBalance = u128; #[pallet::pallet] pub struct Pallet(_); #[pallet::config] pub trait Config: frame_system::Config + CoinsConfig + coins_pallet::Config { /// A % the liquidity providers will take of every swap. Represents 10ths of a percent. #[pallet::constant] type LPFee: Get; /// The minimum LP token amount that could be minted. Ameliorates rounding errors. #[pallet::constant] type MintMinLiquidity: Get; /// The max number of hops in a swap. #[pallet::constant] type MaxSwapPathLength: Get; /// Last N number of blocks that oracle keeps track of the prices. #[pallet::constant] type MedianPriceWindowLength: Get; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; } /// Map from `PoolId` to `()`. This establishes whether a pool has been officially /// created rather than people sending tokens directly to a pool's public account. #[pallet::storage] pub type Pools = StorageMap<_, Blake2_128Concat, PoolId, (), OptionQuery>; #[pallet::storage] #[pallet::getter(fn spot_price_for_block)] pub type SpotPriceForBlock = StorageDoubleMap<_, Identity, BlockNumberFor, Identity, ExternalCoin, Amount, OptionQuery>; /// Moving window of prices from each block. /// /// The [u8; 8] key is the amount's big endian bytes, and u16 is the amount of inclusions in this /// multi-set. Since the underlying map is lexicographically sorted, this map stores amounts from /// low to high. #[pallet::storage] pub type SpotPrices = StorageDoubleMap<_, Identity, ExternalCoin, Identity, [u8; 8], u16, OptionQuery>; // SpotPrices, yet with keys stored in reverse lexicographic order. #[pallet::storage] pub type ReverseSpotPrices = StorageDoubleMap<_, Identity, ExternalCoin, Identity, [u8; 8], (), OptionQuery>; /// Current length of the `SpotPrices` map. #[pallet::storage] pub type SpotPricesLength = StorageMap<_, Identity, ExternalCoin, u16, OptionQuery>; /// Current position of the median within the `SpotPrices` map; #[pallet::storage] pub type CurrentMedianPosition = StorageMap<_, Identity, ExternalCoin, u16, OptionQuery>; /// Current median price of the prices in the `SpotPrices` map at any given time. #[pallet::storage] #[pallet::getter(fn median_price)] pub type MedianPrice = StorageMap<_, Identity, ExternalCoin, Amount, OptionQuery>; /// The price used for evaluating economic security, which is the highest observed median price. #[pallet::storage] #[pallet::getter(fn security_oracle_value)] pub type SecurityOracleValue = StorageMap<_, Identity, ExternalCoin, Amount, OptionQuery>; /// Total swap volume of a given pool in terms of SRI. #[pallet::storage] #[pallet::getter(fn swap_volume)] pub type SwapVolume = StorageMap<_, Identity, PoolId, u64, OptionQuery>; impl Pallet { fn restore_median( coin: ExternalCoin, mut current_median_pos: u16, mut current_median: Amount, length: u16, ) { // 1 -> 0 (the only value) // 2 -> 1 (the higher element), 4 -> 2 (the higher element) // 3 -> 1 (the true median) let target_median_pos = length / 2; while current_median_pos < target_median_pos { // Get the amount of presences for the current element let key = current_median.0.to_be_bytes(); let presences = SpotPrices::::get(coin, key).unwrap(); // > is correct, not >=. // Consider: // - length = 1, current_median_pos = 0, presences = 1, target_median_pos = 0 // - length = 2, current_median_pos = 0, presences = 2, target_median_pos = 1 // - length = 2, current_median_pos = 0, presences = 1, target_median_pos = 1 if (current_median_pos + presences) > target_median_pos { break; } current_median_pos += presences; let key = SpotPrices::::hashed_key_for(coin, key); let next_price = SpotPrices::::iter_key_prefix_from(coin, key).next().unwrap(); current_median = Amount(u64::from_be_bytes(next_price)); } while current_median_pos > target_median_pos { // Get the next element let key = reverse_lexicographic_order(current_median.0.to_be_bytes()); let key = ReverseSpotPrices::::hashed_key_for(coin, key); let next_price = ReverseSpotPrices::::iter_key_prefix_from(coin, key).next().unwrap(); let next_price = reverse_lexicographic_order(next_price); current_median = Amount(u64::from_be_bytes(next_price)); // Get its amount of presences let presences = SpotPrices::::get(coin, current_median.0.to_be_bytes()).unwrap(); // Adjust from next_value_first_pos to this_value_first_pos by substracting this value's // amount of times present current_median_pos -= presences; if current_median_pos <= target_median_pos { break; } } CurrentMedianPosition::::set(coin, Some(current_median_pos)); MedianPrice::::set(coin, Some(current_median)); } pub(crate) fn insert_into_median(coin: ExternalCoin, amount: Amount) { let new_quantity_of_presences = SpotPrices::::get(coin, amount.0.to_be_bytes()).unwrap_or(0) + 1; SpotPrices::::set(coin, amount.0.to_be_bytes(), Some(new_quantity_of_presences)); if new_quantity_of_presences == 1 { ReverseSpotPrices::::set( coin, reverse_lexicographic_order(amount.0.to_be_bytes()), Some(()), ); } let new_length = SpotPricesLength::::get(coin).unwrap_or(0) + 1; SpotPricesLength::::set(coin, Some(new_length)); let Some(current_median) = MedianPrice::::get(coin) else { MedianPrice::::set(coin, Some(amount)); CurrentMedianPosition::::set(coin, Some(0)); return; }; let mut current_median_pos = CurrentMedianPosition::::get(coin).unwrap(); // If this is being inserted before the current median, the current median's position has // increased if amount < current_median { current_median_pos += 1; } Self::restore_median(coin, current_median_pos, current_median, new_length); } pub(crate) fn remove_from_median(coin: ExternalCoin, amount: Amount) { let mut current_median = MedianPrice::::get(coin).unwrap(); let mut current_median_pos = CurrentMedianPosition::::get(coin).unwrap(); if amount < current_median { current_median_pos -= 1; } let new_quantity_of_presences = SpotPrices::::get(coin, amount.0.to_be_bytes()).unwrap() - 1; if new_quantity_of_presences == 0 { let normal_key = amount.0.to_be_bytes(); SpotPrices::::remove(coin, normal_key); ReverseSpotPrices::::remove(coin, reverse_lexicographic_order(amount.0.to_be_bytes())); // If we've removed the current item at this position, update to the item now at this // position if amount == current_median { let key = SpotPrices::::hashed_key_for(coin, normal_key); current_median = Amount(u64::from_be_bytes( SpotPrices::::iter_key_prefix_from(coin, key).next().unwrap(), )); } } else { SpotPrices::::set(coin, amount.0.to_be_bytes(), Some(new_quantity_of_presences)); } let new_length = SpotPricesLength::::get(coin).unwrap() - 1; SpotPricesLength::::set(coin, Some(new_length)); Self::restore_median(coin, current_median_pos, current_median, new_length); } } // Pallet's events. #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { /// A successful call of the `CreatePool` extrinsic will create this event. PoolCreated { /// The pool id associated with the pool. Note that the order of the coins may not be /// the same as the order specified in the create pool extrinsic. pool_id: PoolId, /// The account ID of the pool. pool_account: T::AccountId, }, /// A successful call of the `AddLiquidity` extrinsic will create this event. LiquidityAdded { /// The account that the liquidity was taken from. who: T::AccountId, /// The account that the liquidity tokens were minted to. mint_to: T::AccountId, /// The pool id of the pool that the liquidity was added to. pool_id: PoolId, /// The amount of the coin that was added to the pool. coin_amount: SubstrateAmount, /// The amount of the SRI that was added to the pool. sri_amount: SubstrateAmount, /// The amount of lp tokens that were minted of that id. lp_token_minted: SubstrateAmount, }, /// A successful call of the `RemoveLiquidity` extrinsic will create this event. LiquidityRemoved { /// The account that the liquidity tokens were burned from. who: T::AccountId, /// The account that the coins were transferred to. withdraw_to: T::AccountId, /// The pool id that the liquidity was removed from. pool_id: PoolId, /// The amount of the first coin that was removed from the pool. coin_amount: SubstrateAmount, /// The amount of the second coin that was removed from the pool. sri_amount: SubstrateAmount, /// The amount of lp tokens that were burned of that id. lp_token_burned: SubstrateAmount, }, /// Coins have been converted from one to another. Both `SwapExactTokenForToken` /// and `SwapTokenForExactToken` will generate this event. SwapExecuted { /// Which account was the instigator of the swap. who: T::AccountId, /// The account that the coins were transferred to. send_to: T::AccountId, /// The route of coin ids that the swap went through. /// E.g. A -> SRI -> B path: BoundedVec, /// The amount of the first coin that was swapped. amount_in: SubstrateAmount, /// The amount of the second coin that was received. amount_out: SubstrateAmount, }, } #[pallet::error] pub enum Error { /// Provided coins are equal. EqualCoins, /// Pool already exists. PoolExists, /// Desired amount can't be zero. WrongDesiredAmount, /// Provided amount should be greater than or equal to the existential deposit/coin's /// minimum amount. CoinAmountLessThanMinimum, /// Provided amount should be greater than or equal to the existential deposit/coin's /// minimum amount. SriAmountLessThanMinimum, /// Reserve needs to always be greater than or equal to the existential deposit/coin's /// minimum amount. ReserveLeftLessThanMinimum, /// Desired amount can't be equal to the pool reserve. AmountOutTooHigh, /// The pool doesn't exist. PoolNotFound, /// An overflow happened. Overflow, /// The minimum amount requirement for the first token in the pair wasn't met. CoinOneDepositDidNotMeetMinimum, /// The minimum amount requirement for the second token in the pair wasn't met. CoinTwoDepositDidNotMeetMinimum, /// The minimum amount requirement for the first token in the pair wasn't met. CoinOneWithdrawalDidNotMeetMinimum, /// The minimum amount requirement for the second token in the pair wasn't met. CoinTwoWithdrawalDidNotMeetMinimum, /// Optimal calculated amount is less than desired. OptimalAmountLessThanDesired, /// Insufficient liquidity minted. InsufficientLiquidityMinted, /// Requested liquidity can't be zero. ZeroLiquidity, /// Amount can't be zero. ZeroAmount, /// Calculated amount out is less than provided minimum amount. ProvidedMinimumNotSufficientForSwap, /// Provided maximum amount is not sufficient for swap. ProvidedMaximumNotSufficientForSwap, /// The provided path must consists of 2 coins at least. InvalidPath, /// It was not possible to calculate path data. PathError, /// The provided path must consists of unique coins. NonUniquePath, /// Unable to find an element in an array/vec that should have one-to-one correspondence /// with another. For example, an array of coins constituting a `path` should have a /// corresponding array of `amounts` along the path. CorrespondenceError, } #[pallet::hooks] impl Hooks> for Pallet { fn on_finalize(n: BlockNumberFor) { // we run this on on_finalize because we want to use the last price of the block for a coin. // This prevents the exploit where a malicious block proposer spikes the price in either // direction, then includes a swap in the other direction (ensuring they don't get arbitraged // against) // Since they'll have to leave the spike present at the end of the block, making the next // block the one to include any arbitrage transactions (which there's no guarantee they'll // produce), this cannot be done in a way without significant risk for coin in Pools::::iter_keys() { // insert the new price to our oracle window // The spot price for 1 coin, in atomic units, to SRI is used let sri_per_coin = if let Ok((sri_balance, coin_balance)) = Self::get_reserves(&Coin::Serai, &coin.into()) { // We use 1 coin to handle rounding errors which may occur with atomic units // If we used atomic units, any coin whose atomic unit is worth less than SRI's atomic // unit would cause a 'price' of 0 // If the decimals aren't large enough to provide sufficient buffer, use 10,000 let coin_decimals = coin.decimals().max(5); let accuracy_increase = HigherPrecisionBalance::from(SubstrateAmount::pow(10, coin_decimals)); u64::try_from( accuracy_increase * HigherPrecisionBalance::from(sri_balance) / HigherPrecisionBalance::from(coin_balance), ) .unwrap_or(u64::MAX) } else { 0 }; let sri_per_coin = Amount(sri_per_coin); SpotPriceForBlock::::set(n, coin, Some(sri_per_coin)); Self::insert_into_median(coin, sri_per_coin); if SpotPricesLength::::get(coin).unwrap() > T::MedianPriceWindowLength::get() { let old = n - T::MedianPriceWindowLength::get().into(); let old_price = SpotPriceForBlock::::get(old, coin).unwrap(); SpotPriceForBlock::::remove(old, coin); Self::remove_from_median(coin, old_price); } // update the oracle value let median = Self::median_price(coin).unwrap_or(Amount(0)); let oracle_value = Self::security_oracle_value(coin).unwrap_or(Amount(0)); if median > oracle_value { SecurityOracleValue::::set(coin, Some(median)); } } } } impl Pallet { /// Creates an empty liquidity pool and an associated new `lp_token` coin /// (the id of which is returned in the `Event::PoolCreated` event). /// /// Once a pool is created, someone may [`Pallet::add_liquidity`] to it. pub(crate) fn create_pool(coin: ExternalCoin) -> DispatchResult { // get pool_id let pool_id = Self::get_pool_id(coin.into(), Coin::native())?; ensure!(!Pools::::contains_key(pool_id), Error::::PoolExists); let pool_account = Self::get_pool_account(pool_id); frame_system::Pallet::::inc_providers(&pool_account); Pools::::insert(pool_id, ()); Self::deposit_event(Event::PoolCreated { pool_id, pool_account }); Ok(()) } /// A hook to be called whenever a network's session is rotated. pub fn on_new_session(network: NetworkId) { // Only track the price for non-SRI coins as this is SRI denominated if let NetworkId::External(n) = network { for coin in n.coins() { SecurityOracleValue::::set(coin, Self::median_price(coin)); } } } } /// Pallet's callable functions. #[pallet::call] impl Pallet { /// Provide liquidity into the pool of `coin1` and `coin2`. /// NOTE: an optimal amount of coin1 and coin2 will be calculated and /// might be different than the provided `amount1_desired`/`amount2_desired` /// thus you should provide the min amount you're happy to provide. /// Params `amount1_min`/`amount2_min` represent that. /// `mint_to` will be sent the liquidity tokens that represent this share of the pool. /// /// Once liquidity is added, someone may successfully call /// [`Pallet::swap_exact_tokens_for_tokens`] successfully. #[pallet::call_index(0)] #[pallet::weight(T::WeightInfo::add_liquidity())] #[allow(clippy::too_many_arguments)] pub fn add_liquidity( origin: OriginFor, coin: ExternalCoin, coin_desired: SubstrateAmount, sri_desired: SubstrateAmount, coin_min: SubstrateAmount, sri_min: SubstrateAmount, mint_to: T::AccountId, ) -> DispatchResult { let sender = ensure_signed(origin)?; ensure!((sri_desired > 0) && (coin_desired > 0), Error::::WrongDesiredAmount); let pool_id = Self::get_pool_id(coin.into(), Coin::native())?; // create the pool if it doesn't exist. We can just attempt to do that because our checks // far enough to allow that. if Pools::::get(pool_id).is_none() { Self::create_pool(coin)?; } let pool_account = Self::get_pool_account(pool_id); let sri_reserve = Self::get_balance(&pool_account, Coin::Serai); let coin_reserve = Self::get_balance(&pool_account, coin.into()); let sri_amount: SubstrateAmount; let coin_amount: SubstrateAmount; if (sri_reserve == 0) || (coin_reserve == 0) { sri_amount = sri_desired; coin_amount = coin_desired; } else { let coin_optimal = Self::quote(sri_desired, sri_reserve, coin_reserve)?; if coin_optimal <= coin_desired { ensure!(coin_optimal >= coin_min, Error::::CoinTwoDepositDidNotMeetMinimum); sri_amount = sri_desired; coin_amount = coin_optimal; } else { let sri_optimal = Self::quote(coin_desired, coin_reserve, sri_reserve)?; ensure!(sri_optimal <= sri_desired, Error::::OptimalAmountLessThanDesired); ensure!(sri_optimal >= sri_min, Error::::CoinOneDepositDidNotMeetMinimum); sri_amount = sri_optimal; coin_amount = coin_desired; } } ensure!(sri_amount.saturating_add(sri_reserve) >= 1, Error::::SriAmountLessThanMinimum); ensure!(coin_amount.saturating_add(coin_reserve) >= 1, Error::::CoinAmountLessThanMinimum); Self::transfer( &sender, &pool_account, Balance { coin: Coin::Serai, amount: Amount(sri_amount) }, )?; Self::transfer( &sender, &pool_account, Balance { coin: coin.into(), amount: Amount(coin_amount) }, )?; let total_supply = LiquidityTokens::::supply(Coin::from(coin)); let lp_token_amount: SubstrateAmount; if total_supply == 0 { lp_token_amount = Self::calc_lp_amount_for_zero_supply(sri_amount, coin_amount)?; LiquidityTokens::::mint( pool_account, Balance { coin: coin.into(), amount: Amount(T::MintMinLiquidity::get()) }, )?; } else { let side1 = Self::mul_div(sri_amount, total_supply, sri_reserve)?; let side2 = Self::mul_div(coin_amount, total_supply, coin_reserve)?; lp_token_amount = side1.min(side2); } ensure!( lp_token_amount > T::MintMinLiquidity::get(), Error::::InsufficientLiquidityMinted ); LiquidityTokens::::mint( mint_to, Balance { coin: coin.into(), amount: Amount(lp_token_amount) }, )?; Self::deposit_event(Event::LiquidityAdded { who: sender, mint_to, pool_id, coin_amount, sri_amount, lp_token_minted: lp_token_amount, }); Ok(()) } /// Allows you to remove liquidity by providing the `lp_token_burn` tokens that will be /// burned in the process. With the usage of `amount1_min_receive`/`amount2_min_receive` /// it's possible to control the min amount of returned tokens you're happy with. #[pallet::call_index(1)] #[pallet::weight(T::WeightInfo::remove_liquidity())] pub fn remove_liquidity( origin: OriginFor, coin: ExternalCoin, lp_token_burn: SubstrateAmount, coin_min_receive: SubstrateAmount, sri_min_receive: SubstrateAmount, withdraw_to: T::AccountId, ) -> DispatchResult { let sender = ensure_signed(origin.clone())?; let pool_id = Self::get_pool_id(coin.into(), Coin::native()).unwrap(); ensure!(lp_token_burn > 0, Error::::ZeroLiquidity); Pools::::get(pool_id).as_ref().ok_or(Error::::PoolNotFound)?; let pool_account = Self::get_pool_account(pool_id); let sri_reserve = Self::get_balance(&pool_account, Coin::Serai); let coin_reserve = Self::get_balance(&pool_account, coin.into()); let total_supply = LiquidityTokens::::supply(Coin::from(coin)); let lp_redeem_amount = lp_token_burn; let sri_amount = Self::mul_div(lp_redeem_amount, sri_reserve, total_supply)?; let coin_amount = Self::mul_div(lp_redeem_amount, coin_reserve, total_supply)?; ensure!( (sri_amount != 0) && (sri_amount >= sri_min_receive), Error::::CoinOneWithdrawalDidNotMeetMinimum ); ensure!( (coin_amount != 0) && (coin_amount >= coin_min_receive), Error::::CoinTwoWithdrawalDidNotMeetMinimum ); let sri_reserve_left = sri_reserve.saturating_sub(sri_amount); let coin_reserve_left = coin_reserve.saturating_sub(coin_amount); ensure!(sri_reserve_left >= 1, Error::::ReserveLeftLessThanMinimum); ensure!(coin_reserve_left >= 1, Error::::ReserveLeftLessThanMinimum); // burn the provided lp token amount that includes the fee LiquidityTokens::::burn( origin, Balance { coin: coin.into(), amount: Amount(lp_token_burn) }, )?; Self::transfer( &pool_account, &withdraw_to, Balance { coin: Coin::Serai, amount: Amount(sri_amount) }, )?; Self::transfer( &pool_account, &withdraw_to, Balance { coin: coin.into(), amount: Amount(coin_amount) }, )?; Self::deposit_event(Event::LiquidityRemoved { who: sender, withdraw_to, pool_id, coin_amount, sri_amount, lp_token_burned: lp_token_burn, }); Ok(()) } /// Swap the exact amount of `coin1` into `coin2`. /// `amount_out_min` param allows you to specify the min amount of the `coin2` /// you're happy to receive. /// /// [`DexApi::quote_price_exact_tokens_for_tokens`] runtime call can be called /// for a quote. #[pallet::call_index(2)] #[pallet::weight(T::WeightInfo::swap_exact_tokens_for_tokens())] pub fn swap_exact_tokens_for_tokens( origin: OriginFor, path: BoundedVec, amount_in: SubstrateAmount, amount_out_min: SubstrateAmount, send_to: T::AccountId, ) -> DispatchResult { let sender = ensure_signed(origin)?; Self::do_swap_exact_tokens_for_tokens( sender, path, amount_in, Some(amount_out_min), send_to, )?; Ok(()) } /// Swap any amount of `coin1` to get the exact amount of `coin2`. /// `amount_in_max` param allows to specify the max amount of the `coin1` /// you're happy to provide. /// /// [`DexApi::quote_price_tokens_for_exact_tokens`] runtime call can be called /// for a quote. #[pallet::call_index(3)] #[pallet::weight(T::WeightInfo::swap_tokens_for_exact_tokens())] pub fn swap_tokens_for_exact_tokens( origin: OriginFor, path: BoundedVec, amount_out: SubstrateAmount, amount_in_max: SubstrateAmount, send_to: T::AccountId, ) -> DispatchResult { let sender = ensure_signed(origin)?; Self::do_swap_tokens_for_exact_tokens( sender, path, amount_out, Some(amount_in_max), send_to, )?; Ok(()) } } impl Pallet { /// Swap exactly `amount_in` of coin `path[0]` for coin `path[1]`. /// If an `amount_out_min` is specified, it will return an error if it is unable to acquire /// the amount desired. /// /// Withdraws the `path[0]` coin from `sender`, deposits the `path[1]` coin to `send_to`. /// /// If successful, returns the amount of `path[1]` acquired for the `amount_in`. pub fn do_swap_exact_tokens_for_tokens( sender: T::AccountId, path: BoundedVec, amount_in: SubstrateAmount, amount_out_min: Option, send_to: T::AccountId, ) -> Result { ensure!(amount_in > 0, Error::::ZeroAmount); if let Some(amount_out_min) = amount_out_min { ensure!(amount_out_min > 0, Error::::ZeroAmount); } Self::validate_swap_path(&path)?; let amounts = Self::get_amounts_out(amount_in, &path)?; let amount_out = *amounts.last().defensive_ok_or("get_amounts_out() returned an empty result")?; if let Some(amount_out_min) = amount_out_min { ensure!(amount_out >= amount_out_min, Error::::ProvidedMinimumNotSufficientForSwap); } Self::do_swap(sender, &amounts, path, send_to)?; Ok(amount_out) } /// Take the `path[0]` coin and swap some amount for `amount_out` of the `path[1]`. If an /// `amount_in_max` is specified, it will return an error if acquiring `amount_out` would be /// too costly. /// /// Withdraws `path[0]` coin from `sender`, deposits the `path[1]` coin to `send_to`, /// /// If successful returns the amount of the `path[0]` taken to provide `path[1]`. pub fn do_swap_tokens_for_exact_tokens( sender: T::AccountId, path: BoundedVec, amount_out: SubstrateAmount, amount_in_max: Option, send_to: T::AccountId, ) -> Result { ensure!(amount_out > 0, Error::::ZeroAmount); if let Some(amount_in_max) = amount_in_max { ensure!(amount_in_max > 0, Error::::ZeroAmount); } Self::validate_swap_path(&path)?; let amounts = Self::get_amounts_in(amount_out, &path)?; let amount_in = *amounts.first().defensive_ok_or("get_amounts_in() returned an empty result")?; if let Some(amount_in_max) = amount_in_max { ensure!(amount_in <= amount_in_max, Error::::ProvidedMaximumNotSufficientForSwap); } Self::do_swap(sender, &amounts, path, send_to)?; Ok(amount_in) } /// Transfer an `amount` of `coin_id`. fn transfer( from: &T::AccountId, to: &T::AccountId, balance: Balance, ) -> Result { CoinsPallet::::transfer_internal(*from, *to, balance)?; Ok(balance.amount) } /// Convert a `HigherPrecisionBalance` type to an `SubstrateAmount`. pub(crate) fn convert_hpb_to_coin_balance( amount: HigherPrecisionBalance, ) -> Result> { amount.try_into().map_err(|_| Error::::Overflow) } /// Swap coins along a `path`, depositing in `send_to`. pub(crate) fn do_swap( sender: T::AccountId, amounts: &[SubstrateAmount], path: BoundedVec, send_to: T::AccountId, ) -> Result<(), DispatchError> { ensure!(amounts.len() > 1, Error::::CorrespondenceError); if let Some([coin1, coin2]) = &path.get(0 .. 2) { let pool_id = Self::get_pool_id(*coin1, *coin2)?; let pool_account = Self::get_pool_account(pool_id); // amounts should always contain a corresponding element to path. let first_amount = amounts.first().ok_or(Error::::CorrespondenceError)?; Self::transfer( &sender, &pool_account, Balance { coin: *coin1, amount: Amount(*first_amount) }, )?; let mut i = 0; let path_len = u32::try_from(path.len()).unwrap(); #[allow(clippy::explicit_counter_loop)] for coins_pair in path.windows(2) { if let [coin1, coin2] = coins_pair { let pool_id = Self::get_pool_id(*coin1, *coin2)?; let pool_account = Self::get_pool_account(pool_id); let amount_out = amounts.get((i + 1) as usize).ok_or(Error::::CorrespondenceError)?; let to = if i < path_len - 2 { let coin3 = path.get((i + 2) as usize).ok_or(Error::::PathError)?; Self::get_pool_account(Self::get_pool_id(*coin2, *coin3)?) } else { send_to }; let reserve = Self::get_balance(&pool_account, *coin2); let reserve_left = reserve.saturating_sub(*amount_out); ensure!(reserve_left >= 1, Error::::ReserveLeftLessThanMinimum); Self::transfer( &pool_account, &to, Balance { coin: *coin2, amount: Amount(*amount_out) }, )?; // update the volume let swap_volume = if *coin1 == Coin::Serai { amounts.get(i as usize).ok_or(Error::::CorrespondenceError)? } else { amount_out }; let existing = SwapVolume::::get(pool_id).unwrap_or(0); let new_volume = existing.saturating_add(*swap_volume); SwapVolume::::set(pool_id, Some(new_volume)); } i += 1; } Self::deposit_event(Event::SwapExecuted { who: sender, send_to, path, amount_in: *first_amount, amount_out: *amounts.last().expect("Always has more than 1 element"), }); } else { return Err(Error::::InvalidPath.into()); } Ok(()) } /// The account ID of the pool. /// /// This actually does computation. If you need to keep using it, then make sure you cache /// the value and only call this once. pub fn get_pool_account(pool_id: PoolId) -> T::AccountId { let encoded_pool_id = sp_io::hashing::blake2_256(&Encode::encode(&pool_id)[..]); Decode::decode(&mut TrailingZeroInput::new(encoded_pool_id.as_ref())) .expect("infinite length input; no invalid inputs for type; qed") } /// Get the `owner`'s balance of `coin`, which could be the chain's native coin or another /// fungible. Returns a value in the form of an `Amount`. fn get_balance(owner: &T::AccountId, coin: Coin) -> SubstrateAmount { CoinsPallet::::balance(*owner, coin).0 } /// Returns a pool id constructed from 2 coins. /// We expect deterministic order, so (coin1, coin2) or (coin2, coin1) returns the same /// result. Coins have to be different and one of them should be Coin::Serai. pub fn get_pool_id(coin1: Coin, coin2: Coin) -> Result> { ensure!((coin1 == Coin::Serai) || (coin2 == Coin::Serai), Error::::PoolNotFound); ensure!(coin1 != coin2, Error::::EqualCoins); ExternalCoin::try_from(coin1) .or_else(|()| ExternalCoin::try_from(coin2)) .map_err(|()| Error::::PoolNotFound) } /// Returns the balance of each coin in the pool. /// The tuple result is in the order requested (not necessarily the same as pool order). pub fn get_reserves( coin1: &Coin, coin2: &Coin, ) -> Result<(SubstrateAmount, SubstrateAmount), Error> { let pool_id = Self::get_pool_id(*coin1, *coin2)?; let pool_account = Self::get_pool_account(pool_id); let balance1 = Self::get_balance(&pool_account, *coin1); let balance2 = Self::get_balance(&pool_account, *coin2); if (balance1 == 0) || (balance2 == 0) { Err(Error::::PoolNotFound)?; } Ok((balance1, balance2)) } /// Leading to an amount at the end of a `path`, get the required amounts in. pub(crate) fn get_amounts_in( amount_out: SubstrateAmount, path: &BoundedVec, ) -> Result, DispatchError> { let mut amounts: Vec = vec![amount_out]; for coins_pair in path.windows(2).rev() { if let [coin1, coin2] = coins_pair { let (reserve_in, reserve_out) = Self::get_reserves(coin1, coin2)?; let prev_amount = amounts.last().expect("Always has at least one element"); let amount_in = Self::get_amount_in(*prev_amount, reserve_in, reserve_out)?; amounts.push(amount_in); } } amounts.reverse(); Ok(amounts) } /// Following an amount into a `path`, get the corresponding amounts out. pub(crate) fn get_amounts_out( amount_in: SubstrateAmount, path: &BoundedVec, ) -> Result, DispatchError> { let mut amounts: Vec = vec![amount_in]; for coins_pair in path.windows(2) { if let [coin1, coin2] = coins_pair { let (reserve_in, reserve_out) = Self::get_reserves(coin1, coin2)?; let prev_amount = amounts.last().expect("Always has at least one element"); let amount_out = Self::get_amount_out(*prev_amount, reserve_in, reserve_out)?; amounts.push(amount_out); } } Ok(amounts) } /// Used by the RPC service to provide current prices. pub fn quote_price_exact_tokens_for_tokens( coin1: Coin, coin2: Coin, amount: SubstrateAmount, include_fee: bool, ) -> Option { let pool_id = Self::get_pool_id(coin1, coin2).ok()?; let pool_account = Self::get_pool_account(pool_id); let balance1 = Self::get_balance(&pool_account, coin1); let balance2 = Self::get_balance(&pool_account, coin2); if balance1 != 0 { if include_fee { Self::get_amount_out(amount, balance1, balance2).ok() } else { Self::quote(amount, balance1, balance2).ok() } } else { None } } /// Used by the RPC service to provide current prices. pub fn quote_price_tokens_for_exact_tokens( coin1: Coin, coin2: Coin, amount: SubstrateAmount, include_fee: bool, ) -> Option { let pool_id = Self::get_pool_id(coin1, coin2).ok()?; let pool_account = Self::get_pool_account(pool_id); let balance1 = Self::get_balance(&pool_account, coin1); let balance2 = Self::get_balance(&pool_account, coin2); if balance1 != 0 { if include_fee { Self::get_amount_in(amount, balance1, balance2).ok() } else { Self::quote(amount, balance2, balance1).ok() } } else { None } } /// Calculates the optimal amount from the reserves. pub fn quote( amount: SubstrateAmount, reserve1: SubstrateAmount, reserve2: SubstrateAmount, ) -> Result> { // amount * reserve2 / reserve1 Self::mul_div(amount, reserve2, reserve1) } pub(super) fn calc_lp_amount_for_zero_supply( amount1: SubstrateAmount, amount2: SubstrateAmount, ) -> Result> { let amount1 = HigherPrecisionBalance::from(amount1); let amount2 = HigherPrecisionBalance::from(amount2); let result = amount1 .checked_mul(amount2) .ok_or(Error::::Overflow)? .integer_sqrt() .checked_sub(T::MintMinLiquidity::get().into()) .ok_or(Error::::InsufficientLiquidityMinted)?; result.try_into().map_err(|_| Error::::Overflow) } fn mul_div( a: SubstrateAmount, b: SubstrateAmount, c: SubstrateAmount, ) -> Result> { let a = HigherPrecisionBalance::from(a); let b = HigherPrecisionBalance::from(b); let c = HigherPrecisionBalance::from(c); let result = a.checked_mul(b).ok_or(Error::::Overflow)?.checked_div(c).ok_or(Error::::Overflow)?; result.try_into().map_err(|_| Error::::Overflow) } /// Calculates amount out. /// /// Given an input amount of an coin and pair reserves, returns the maximum output amount /// of the other coin. pub fn get_amount_out( amount_in: SubstrateAmount, reserve_in: SubstrateAmount, reserve_out: SubstrateAmount, ) -> Result> { let amount_in = HigherPrecisionBalance::from(amount_in); let reserve_in = HigherPrecisionBalance::from(reserve_in); let reserve_out = HigherPrecisionBalance::from(reserve_out); if (reserve_in == 0) || (reserve_out == 0) { return Err(Error::::ZeroLiquidity); } let amount_in_with_fee = amount_in .checked_mul( HigherPrecisionBalance::from(1000u32) - HigherPrecisionBalance::from(T::LPFee::get()), ) .ok_or(Error::::Overflow)?; let numerator = amount_in_with_fee.checked_mul(reserve_out).ok_or(Error::::Overflow)?; let denominator = reserve_in .checked_mul(1000u32.into()) .ok_or(Error::::Overflow)? .checked_add(amount_in_with_fee) .ok_or(Error::::Overflow)?; let result = numerator.checked_div(denominator).ok_or(Error::::Overflow)?; result.try_into().map_err(|_| Error::::Overflow) } /// Calculates amount in. /// /// Given an output amount of an coin and pair reserves, returns a required input amount /// of the other coin. pub fn get_amount_in( amount_out: SubstrateAmount, reserve_in: SubstrateAmount, reserve_out: SubstrateAmount, ) -> Result> { let amount_out = HigherPrecisionBalance::from(amount_out); let reserve_in = HigherPrecisionBalance::from(reserve_in); let reserve_out = HigherPrecisionBalance::from(reserve_out); if (reserve_in == 0) || (reserve_out == 0) { Err(Error::::ZeroLiquidity)? } if amount_out >= reserve_out { Err(Error::::AmountOutTooHigh)? } let numerator = reserve_in .checked_mul(amount_out) .ok_or(Error::::Overflow)? .checked_mul(1000u32.into()) .ok_or(Error::::Overflow)?; let denominator = reserve_out .checked_sub(amount_out) .ok_or(Error::::Overflow)? .checked_mul( HigherPrecisionBalance::from(1000u32) - HigherPrecisionBalance::from(T::LPFee::get()), ) .ok_or(Error::::Overflow)?; let result = numerator .checked_div(denominator) .ok_or(Error::::Overflow)? .checked_add(1) .ok_or(Error::::Overflow)?; result.try_into().map_err(|_| Error::::Overflow) } /// Ensure that a path is valid. fn validate_swap_path( path: &BoundedVec, ) -> Result<(), DispatchError> { ensure!(path.len() >= 2, Error::::InvalidPath); // validate all the pools in the path are unique let mut pools = BoundedBTreeSet::::new(); for coins_pair in path.windows(2) { if let [coin1, coin2] = coins_pair { let pool_id = Self::get_pool_id(*coin1, *coin2)?; let new_element = pools.try_insert(pool_id).map_err(|_| Error::::Overflow)?; if !new_element { return Err(Error::::NonUniquePath.into()); } } } Ok(()) } } } impl Swap for Pallet { fn swap_exact_tokens_for_tokens( sender: T::AccountId, path: Vec, amount_in: HigherPrecisionBalance, amount_out_min: Option, send_to: T::AccountId, ) -> Result { let path = path.try_into().map_err(|_| Error::::PathError)?; let amount_out_min = amount_out_min.map(Self::convert_hpb_to_coin_balance).transpose()?; let amount_out = Self::do_swap_exact_tokens_for_tokens( sender, path, Self::convert_hpb_to_coin_balance(amount_in)?, amount_out_min, send_to, )?; Ok(amount_out.into()) } fn swap_tokens_for_exact_tokens( sender: T::AccountId, path: Vec, amount_out: HigherPrecisionBalance, amount_in_max: Option, send_to: T::AccountId, ) -> Result { let path = path.try_into().map_err(|_| Error::::PathError)?; let amount_in_max = amount_in_max.map(Self::convert_hpb_to_coin_balance).transpose()?; let amount_in = Self::do_swap_tokens_for_exact_tokens( sender, path, Self::convert_hpb_to_coin_balance(amount_out)?, amount_in_max, send_to, )?; Ok(amount_in.into()) } } sp_api::decl_runtime_apis! { /// This runtime api allows people to query the size of the liquidity pools /// and quote prices for swaps. pub trait DexApi { /// Provides a quote for [`Pallet::swap_tokens_for_exact_tokens`]. /// /// Note that the price may have changed by the time the transaction is executed. /// (Use `amount_in_max` to control slippage.) fn quote_price_tokens_for_exact_tokens( coin1: Coin, coin2: Coin, amount: SubstrateAmount, include_fee: bool ) -> Option; /// Provides a quote for [`Pallet::swap_exact_tokens_for_tokens`]. /// /// Note that the price may have changed by the time the transaction is executed. /// (Use `amount_out_min` to control slippage.) fn quote_price_exact_tokens_for_tokens( coin1: Coin, coin2: Coin, amount: SubstrateAmount, include_fee: bool ) -> Option; /// Returns the size of the liquidity pool for the given coin pair. fn get_reserves(coin1: Coin, coin2: Coin) -> Option<(SubstrateAmount, SubstrateAmount)>; } } sp_core::generate_feature_enabled_macro!( runtime_benchmarks_enabled, feature = "runtime-benchmarks", $ ); ================================================ FILE: substrate/dex/pallet/src/mock.rs ================================================ // This file was originally: // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // It has been forked into a crate distributed under the AGPL 3.0. // Please check the current distribution for up-to-date copyright and licensing information. //! Test environment for Dex pallet. use super::*; use crate as dex; use frame_support::{ construct_runtime, derive_impl, traits::{ConstU16, ConstU32, ConstU64}, }; use sp_core::sr25519::Public; use sp_runtime::{traits::IdentityLookup, BuildStorage}; use serai_primitives::{Coin, Balance, Amount, system_address}; pub use coins_pallet as coins; type Block = frame_system::mocking::MockBlock; pub const MEDIAN_PRICE_WINDOW_LENGTH: u16 = 10; construct_runtime!( pub enum Test { System: frame_system, CoinsPallet: coins, LiquidityTokens: coins::::{Pallet, Call, Storage, Event}, Dex: dex, } ); #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { type AccountId = Public; type Lookup = IdentityLookup; type Block = Block; } impl coins::Config for Test { type AllowMint = (); } impl coins::Config for Test { type AllowMint = (); } impl Config for Test { type WeightInfo = (); type LPFee = ConstU32<3>; // means 0.3% type MaxSwapPathLength = ConstU32<4>; type MedianPriceWindowLength = ConstU16<{ MEDIAN_PRICE_WINDOW_LENGTH }>; // 100 is good enough when the main currency has 12 decimals. type MintMinLiquidity = ConstU64<100>; } pub(crate) fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); let accounts: Vec = vec![ system_address(b"account1").into(), system_address(b"account2").into(), system_address(b"account3").into(), system_address(b"account4").into(), ]; coins::GenesisConfig:: { accounts: accounts .into_iter() .map(|a| (a, Balance { coin: Coin::Serai, amount: Amount(1 << 60) })) .collect(), _ignore: Default::default(), } .assimilate_storage(&mut t) .unwrap(); let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| System::set_block_number(1)); ext } ================================================ FILE: substrate/dex/pallet/src/tests.rs ================================================ // This file was originally: // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // It has been forked into a crate distributed under the AGPL 3.0. // Please check the current distribution for up-to-date copyright and licensing information. use crate::{ mock::{*, MEDIAN_PRICE_WINDOW_LENGTH}, *, }; use frame_support::{assert_noop, assert_ok}; pub use coins_pallet as coins; use coins::Pallet as CoinsPallet; use serai_primitives::{Balance, COINS, PublicKey, system_address, Amount}; type LiquidityTokens = coins_pallet::Pallet; type LiquidityTokensError = coins_pallet::Error; fn events() -> Vec> { let result = System::events() .into_iter() .map(|r| r.event) .filter_map(|e| if let mock::RuntimeEvent::Dex(inner) = e { Some(inner) } else { None }) .collect(); System::reset_events(); result } fn pools() -> Vec { let mut s: Vec<_> = Pools::::iter().map(|x| x.0).collect(); s.sort(); s } fn coins() -> Vec { COINS.to_vec() } fn balance(owner: PublicKey, coin: Coin) -> u64 { CoinsPallet::::balance(owner, coin).0 } fn pool_balance(owner: PublicKey, token_id: Coin) -> u64 { LiquidityTokens::::balance(owner, token_id).0 } macro_rules! bvec { ($( $x:tt )*) => { vec![$( $x )*].try_into().unwrap() } } #[test] fn check_pool_accounts_dont_collide() { use std::collections::HashSet; let mut map = HashSet::new(); for coin in coins() { if let Coin::External(c) = coin { let account = Dex::get_pool_account(c); if map.contains(&account) { panic!("Collision at {c:?}"); } map.insert(account); } } } #[test] fn check_max_numbers() { new_test_ext().execute_with(|| { assert_eq!(Dex::quote(3u64, u64::MAX, u64::MAX).ok().unwrap(), 3); assert!(Dex::quote(u64::MAX, 3u64, u64::MAX).is_err()); assert_eq!(Dex::quote(u64::MAX, u64::MAX, 1u64).ok().unwrap(), 1); assert_eq!(Dex::get_amount_out(100u64, u64::MAX, u64::MAX).ok().unwrap(), 99); assert_eq!(Dex::get_amount_in(100u64, u64::MAX, u64::MAX).ok().unwrap(), 101); }); } #[test] fn can_create_pool() { new_test_ext().execute_with(|| { let coin_account_deposit: u64 = 0; let user: PublicKey = system_address(b"user1").into(); let coin1 = Coin::native(); let coin2 = Coin::External(ExternalCoin::Monero); let pool_id = Dex::get_pool_id(coin1, coin2).unwrap(); assert_ok!(CoinsPallet::::mint(user, Balance { coin: coin1, amount: Amount(1000) })); assert_ok!(Dex::create_pool(coin2.try_into().unwrap())); assert_eq!(balance(user, coin1), 1000 - coin_account_deposit); assert_eq!( events(), [Event::::PoolCreated { pool_id, pool_account: Dex::get_pool_account(pool_id) }] ); assert_eq!(pools(), vec![pool_id]); }); } #[test] fn create_same_pool_twice_should_fail() { new_test_ext().execute_with(|| { let coin = ExternalCoin::Dai; assert_ok!(Dex::create_pool(coin)); assert_noop!(Dex::create_pool(coin), Error::::PoolExists); }); } #[test] fn different_pools_should_have_different_lp_tokens() { new_test_ext().execute_with(|| { let coin1 = Coin::native(); let coin2 = Coin::External(ExternalCoin::Bitcoin); let coin3 = Coin::External(ExternalCoin::Ether); let pool_id_1_2 = Dex::get_pool_id(coin1, coin2).unwrap(); let pool_id_1_3 = Dex::get_pool_id(coin1, coin3).unwrap(); let lp_token2_1 = coin2; assert_ok!(Dex::create_pool(coin2.try_into().unwrap())); let lp_token3_1 = coin3; assert_eq!( events(), [Event::::PoolCreated { pool_id: pool_id_1_2, pool_account: Dex::get_pool_account(pool_id_1_2), }] ); assert_ok!(Dex::create_pool(coin3.try_into().unwrap())); assert_eq!( events(), [Event::::PoolCreated { pool_id: pool_id_1_3, pool_account: Dex::get_pool_account(pool_id_1_3), }] ); assert_ne!(lp_token2_1, lp_token3_1); }); } #[test] fn can_add_liquidity() { new_test_ext().execute_with(|| { let user = system_address(b"user1").into(); let coin1 = Coin::native(); let coin2 = Coin::External(ExternalCoin::Dai); let coin3 = Coin::External(ExternalCoin::Monero); let lp_token1 = coin2; assert_ok!(Dex::create_pool(coin2.try_into().unwrap())); let lp_token2 = coin3; assert_ok!(Dex::create_pool(coin3.try_into().unwrap())); assert_ok!(CoinsPallet::::mint( user, Balance { coin: coin1, amount: Amount(10000 * 2 + 1) } )); assert_ok!(CoinsPallet::::mint(user, Balance { coin: coin2, amount: Amount(1000) })); assert_ok!(CoinsPallet::::mint(user, Balance { coin: coin3, amount: Amount(1000) })); assert_ok!(Dex::add_liquidity( RuntimeOrigin::signed(user), coin2.try_into().unwrap(), 10, 10000, 10, 10000, user, )); let pool_id = Dex::get_pool_id(coin1, coin2).unwrap(); assert!(events().contains(&Event::::LiquidityAdded { who: user, mint_to: user, pool_id, sri_amount: 10000, coin_amount: 10, lp_token_minted: 216, })); let pallet_account = Dex::get_pool_account(pool_id); assert_eq!(balance(pallet_account, coin1), 10000); assert_eq!(balance(pallet_account, coin2), 10); assert_eq!(balance(user, coin1), 10000 + 1); assert_eq!(balance(user, coin2), 1000 - 10); assert_eq!(pool_balance(user, lp_token1), 216); // try to pass the non-native - native coins, the result should be the same assert_ok!(Dex::add_liquidity( RuntimeOrigin::signed(user), coin3.try_into().unwrap(), 10, 10000, 10, 10000, user, )); let pool_id = Dex::get_pool_id(coin1, coin3).unwrap(); assert!(events().contains(&Event::::LiquidityAdded { who: user, mint_to: user, pool_id, sri_amount: 10000, coin_amount: 10, lp_token_minted: 216, })); let pallet_account = Dex::get_pool_account(pool_id); assert_eq!(balance(pallet_account, coin1), 10000); assert_eq!(balance(pallet_account, coin3), 10); assert_eq!(balance(user, coin1), 1); assert_eq!(balance(user, coin3), 1000 - 10); assert_eq!(pool_balance(user, lp_token2), 216); }); } #[test] fn add_tiny_liquidity_leads_to_insufficient_liquidity_minted_error() { new_test_ext().execute_with(|| { let user = system_address(b"user1").into(); let coin1 = Coin::native(); let coin2 = ExternalCoin::Bitcoin; assert_ok!(Dex::create_pool(coin2)); assert_ok!(CoinsPallet::::mint(user, Balance { coin: coin1, amount: Amount(1000) })); assert_ok!(CoinsPallet::::mint( user, Balance { coin: coin2.into(), amount: Amount(1000) } )); assert_noop!( Dex::add_liquidity(RuntimeOrigin::signed(user), coin2, 1, 1, 1, 1, user), Error::::InsufficientLiquidityMinted ); }); } #[test] fn add_tiny_liquidity_directly_to_pool_address() { new_test_ext().execute_with(|| { let user = system_address(b"user1").into(); let coin1 = Coin::native(); let coin2 = Coin::External(ExternalCoin::Ether); let coin3 = Coin::External(ExternalCoin::Dai); assert_ok!(Dex::create_pool(coin2.try_into().unwrap())); assert_ok!(Dex::create_pool(coin3.try_into().unwrap())); assert_ok!(CoinsPallet::::mint(user, Balance { coin: coin1, amount: Amount(10000 * 2) })); assert_ok!(CoinsPallet::::mint(user, Balance { coin: coin2, amount: Amount(10000) })); assert_ok!(CoinsPallet::::mint(user, Balance { coin: coin3, amount: Amount(10000) })); // check we're still able to add the liquidity even when the pool already has some coin1 let pallet_account = Dex::get_pool_account(Dex::get_pool_id(coin1, coin2).unwrap()); assert_ok!(CoinsPallet::::mint( pallet_account, Balance { coin: coin1, amount: Amount(1000) } )); assert_ok!(Dex::add_liquidity( RuntimeOrigin::signed(user), coin2.try_into().unwrap(), 10, 10000, 10, 10000, user, )); // check the same but for coin3 (non-native token) let pallet_account = Dex::get_pool_account(Dex::get_pool_id(coin1, coin3).unwrap()); assert_ok!(CoinsPallet::::mint( pallet_account, Balance { coin: coin2, amount: Amount(1) } )); assert_ok!(Dex::add_liquidity( RuntimeOrigin::signed(user), coin3.try_into().unwrap(), 10, 10000, 10, 10000, user, )); }); } #[test] fn can_remove_liquidity() { new_test_ext().execute_with(|| { let user = system_address(b"user1").into(); let coin1 = Coin::native(); let coin2 = Coin::External(ExternalCoin::Monero); let pool_id = Dex::get_pool_id(coin1, coin2).unwrap(); let lp_token = coin2; assert_ok!(Dex::create_pool(coin2.try_into().unwrap())); assert_ok!(CoinsPallet::::mint( user, Balance { coin: coin1, amount: Amount(10000000000) } )); assert_ok!(CoinsPallet::::mint(user, Balance { coin: coin2, amount: Amount(100000) })); assert_ok!(Dex::add_liquidity( RuntimeOrigin::signed(user), coin2.try_into().unwrap(), 100000, 1000000000, 100000, 1000000000, user, )); let total_lp_received = pool_balance(user, lp_token); assert_ok!(Dex::remove_liquidity( RuntimeOrigin::signed(user), coin2.try_into().unwrap(), total_lp_received, 0, 0, user, )); assert!(events().contains(&Event::::LiquidityRemoved { who: user, withdraw_to: user, pool_id, sri_amount: 999990000, coin_amount: 99999, lp_token_burned: total_lp_received, })); let pool_account = Dex::get_pool_account(pool_id); assert_eq!(balance(pool_account, coin1), 10000); assert_eq!(balance(pool_account, coin2), 1); assert_eq!(pool_balance(pool_account, lp_token), 100); assert_eq!(balance(user, coin1), 10000000000 - 1000000000 + 999990000); assert_eq!(balance(user, coin2), 99999); assert_eq!(pool_balance(user, lp_token), 0); }); } #[test] fn can_not_redeem_more_lp_tokens_than_were_minted() { new_test_ext().execute_with(|| { let user = system_address(b"user1").into(); let coin1 = Coin::native(); let coin2 = Coin::External(ExternalCoin::Dai); let lp_token = coin2; assert_ok!(Dex::create_pool(coin2.try_into().unwrap())); assert_ok!(CoinsPallet::::mint(user, Balance { coin: coin1, amount: Amount(10000) })); assert_ok!(CoinsPallet::::mint(user, Balance { coin: coin2, amount: Amount(1000) })); assert_ok!(Dex::add_liquidity( RuntimeOrigin::signed(user), coin2.try_into().unwrap(), 10, 10000, 10, 10000, user, )); // Only 216 lp_tokens_minted assert_eq!(pool_balance(user, lp_token), 216); assert_noop!( Dex::remove_liquidity( RuntimeOrigin::signed(user), coin2.try_into().unwrap(), 216 + 1, // Try and redeem 10 lp tokens while only 9 minted. 0, 0, user, ), LiquidityTokensError::::NotEnoughCoins ); }); } #[test] fn can_quote_price() { new_test_ext().execute_with(|| { let user = system_address(b"user1").into(); let coin1 = Coin::native(); let coin2 = Coin::External(ExternalCoin::Ether); assert_ok!(Dex::create_pool(coin2.try_into().unwrap())); assert_ok!(CoinsPallet::::mint(user, Balance { coin: coin1, amount: Amount(100000) })); assert_ok!(CoinsPallet::::mint(user, Balance { coin: coin2, amount: Amount(1000) })); assert_ok!(Dex::add_liquidity( RuntimeOrigin::signed(user), coin2.try_into().unwrap(), 200, 10000, 1, 1, user, )); assert_eq!( Dex::quote_price_exact_tokens_for_tokens(Coin::native(), coin2, 3000, false,), Some(60) ); // including fee so should get less out... assert_eq!( Dex::quote_price_exact_tokens_for_tokens(Coin::native(), coin2, 3000, true,), Some(46) ); // Check it still gives same price: // (if the above accidentally exchanged then it would not give same quote as before) assert_eq!( Dex::quote_price_exact_tokens_for_tokens(Coin::native(), coin2, 3000, false,), Some(60) ); // including fee so should get less out... assert_eq!( Dex::quote_price_exact_tokens_for_tokens(Coin::native(), coin2, 3000, true,), Some(46) ); // Check inverse: assert_eq!( Dex::quote_price_exact_tokens_for_tokens(coin2, Coin::native(), 60, false,), Some(3000) ); // including fee so should get less out... assert_eq!( Dex::quote_price_exact_tokens_for_tokens(coin2, Coin::native(), 60, true,), Some(2302) ); // // same tests as above but for quote_price_tokens_for_exact_tokens: // assert_eq!( Dex::quote_price_tokens_for_exact_tokens(Coin::native(), coin2, 60, false,), Some(3000) ); // including fee so should need to put more in... assert_eq!( Dex::quote_price_tokens_for_exact_tokens(Coin::native(), coin2, 60, true,), Some(4299) ); // Check it still gives same price: // (if the above accidentally exchanged then it would not give same quote as before) assert_eq!( Dex::quote_price_tokens_for_exact_tokens(Coin::native(), coin2, 60, false,), Some(3000) ); // including fee so should need to put more in... assert_eq!( Dex::quote_price_tokens_for_exact_tokens(Coin::native(), coin2, 60, true,), Some(4299) ); // Check inverse: assert_eq!( Dex::quote_price_tokens_for_exact_tokens(coin2, Coin::native(), 3000, false,), Some(60) ); // including fee so should need to put more in... assert_eq!( Dex::quote_price_tokens_for_exact_tokens(coin2, Coin::native(), 3000, true,), Some(86) ); // // roundtrip: Without fees one should get the original number // let amount_in = 100; assert_eq!( Dex::quote_price_exact_tokens_for_tokens(coin2, Coin::native(), amount_in, false,).and_then( |amount| Dex::quote_price_exact_tokens_for_tokens(Coin::native(), coin2, amount, false,) ), Some(amount_in) ); assert_eq!( Dex::quote_price_exact_tokens_for_tokens(Coin::native(), coin2, amount_in, false,).and_then( |amount| Dex::quote_price_exact_tokens_for_tokens(coin2, Coin::native(), amount, false,) ), Some(amount_in) ); assert_eq!( Dex::quote_price_tokens_for_exact_tokens(coin2, Coin::native(), amount_in, false,).and_then( |amount| Dex::quote_price_tokens_for_exact_tokens(Coin::native(), coin2, amount, false,) ), Some(amount_in) ); assert_eq!( Dex::quote_price_tokens_for_exact_tokens(Coin::native(), coin2, amount_in, false,).and_then( |amount| Dex::quote_price_tokens_for_exact_tokens(coin2, Coin::native(), amount, false,) ), Some(amount_in) ); }); } #[test] fn quote_price_exact_tokens_for_tokens_matches_execution() { new_test_ext().execute_with(|| { let user = system_address(b"user1").into(); let user2 = system_address(b"user2").into(); let coin1 = Coin::native(); let coin2 = Coin::External(ExternalCoin::Bitcoin); assert_ok!(Dex::create_pool(coin2.try_into().unwrap())); assert_ok!(CoinsPallet::::mint(user, Balance { coin: coin1, amount: Amount(100000) })); assert_ok!(CoinsPallet::::mint(user, Balance { coin: coin2, amount: Amount(1000) })); assert_ok!(Dex::add_liquidity( RuntimeOrigin::signed(user), coin2.try_into().unwrap(), 200, 10000, 1, 1, user, )); let amount = 1; let quoted_price = 49; assert_eq!( Dex::quote_price_exact_tokens_for_tokens(coin2, coin1, amount, true,), Some(quoted_price) ); assert_ok!(CoinsPallet::::mint(user2, Balance { coin: coin2, amount: Amount(amount) })); let prior_sri_balance = 0; assert_eq!(prior_sri_balance, balance(user2, coin1)); assert_ok!(Dex::swap_exact_tokens_for_tokens( RuntimeOrigin::signed(user2), bvec![coin2, coin1], amount, 1, user2, )); assert_eq!(prior_sri_balance + quoted_price, balance(user2, coin1)); }); } #[test] fn quote_price_tokens_for_exact_tokens_matches_execution() { new_test_ext().execute_with(|| { let user = system_address(b"user1").into(); let user2 = system_address(b"user2").into(); let coin1 = Coin::native(); let coin2 = Coin::External(ExternalCoin::Monero); assert_ok!(Dex::create_pool(coin2.try_into().unwrap())); assert_ok!(CoinsPallet::::mint(user, Balance { coin: coin1, amount: Amount(100000) })); assert_ok!(CoinsPallet::::mint(user, Balance { coin: coin2, amount: Amount(1000) })); assert_ok!(Dex::add_liquidity( RuntimeOrigin::signed(user), coin2.try_into().unwrap(), 200, 10000, 1, 1, user, )); let amount = 49; let quoted_price = 1; assert_eq!( Dex::quote_price_tokens_for_exact_tokens(coin2, coin1, amount, true,), Some(quoted_price) ); assert_ok!(CoinsPallet::::mint(user2, Balance { coin: coin2, amount: Amount(amount) })); let prior_sri_balance = 0; assert_eq!(prior_sri_balance, balance(user2, coin1)); let prior_coin_balance = 49; assert_eq!(prior_coin_balance, balance(user2, coin2)); assert_ok!(Dex::swap_tokens_for_exact_tokens( RuntimeOrigin::signed(user2), bvec![coin2, coin1], amount, 1, user2, )); assert_eq!(prior_sri_balance + amount, balance(user2, coin1)); assert_eq!(prior_coin_balance - quoted_price, balance(user2, coin2)); }); } #[test] fn can_swap_with_native() { new_test_ext().execute_with(|| { let user = system_address(b"user1").into(); let coin1 = Coin::native(); let coin2 = Coin::External(ExternalCoin::Ether); let pool_id = Dex::get_pool_id(coin1, coin2).unwrap(); assert_ok!(Dex::create_pool(coin2.try_into().unwrap())); assert_ok!(CoinsPallet::::mint(user, Balance { coin: coin1, amount: Amount(10000) })); assert_ok!(CoinsPallet::::mint(user, Balance { coin: coin2, amount: Amount(1000) })); let liquidity1 = 10000; let liquidity2 = 200; assert_ok!(Dex::add_liquidity( RuntimeOrigin::signed(user), coin2.try_into().unwrap(), liquidity2, liquidity1, 1, 1, user, )); let input_amount = 100; let expect_receive = Dex::get_amount_out(input_amount, liquidity2, liquidity1).ok().unwrap(); assert_ok!(Dex::swap_exact_tokens_for_tokens( RuntimeOrigin::signed(user), bvec![coin2, coin1], input_amount, 1, user, )); let pallet_account = Dex::get_pool_account(pool_id); assert_eq!(balance(user, coin1), expect_receive); assert_eq!(balance(user, coin2), 1000 - liquidity2 - input_amount); assert_eq!(balance(pallet_account, coin1), liquidity1 - expect_receive); assert_eq!(balance(pallet_account, coin2), liquidity2 + input_amount); }); } #[test] fn can_swap_with_realistic_values() { new_test_ext().execute_with(|| { let user = system_address(b"user1").into(); let sri = Coin::native(); let dai = Coin::External(ExternalCoin::Dai); assert_ok!(Dex::create_pool(dai.try_into().unwrap())); const UNIT: u64 = 1_000_000_000; assert_ok!(CoinsPallet::::mint( user, Balance { coin: sri, amount: Amount(300_000 * UNIT) } )); assert_ok!(CoinsPallet::::mint( user, Balance { coin: dai, amount: Amount(1_100_000 * UNIT) } )); let liquidity_sri = 200_000 * UNIT; // ratio for a 5$ price let liquidity_dai = 1_000_000 * UNIT; assert_ok!(Dex::add_liquidity( RuntimeOrigin::signed(user), dai.try_into().unwrap(), liquidity_dai, liquidity_sri, 1, 1, user, )); let input_amount = 10 * UNIT; // dai assert_ok!(Dex::swap_exact_tokens_for_tokens( RuntimeOrigin::signed(user), bvec![dai, sri], input_amount, 1, user, )); assert!(events().contains(&Event::::SwapExecuted { who: user, send_to: user, path: bvec![dai, sri], amount_in: 10 * UNIT, // usd amount_out: 1_993_980_120, // About 2 dot after div by UNIT. })); }); } #[test] fn can_not_swap_in_pool_with_no_liquidity_added_yet() { new_test_ext().execute_with(|| { let user = system_address(b"user1").into(); let coin1 = Coin::native(); let coin2 = Coin::External(ExternalCoin::Monero); assert_ok!(Dex::create_pool(coin2.try_into().unwrap())); // Check can't swap an empty pool assert_noop!( Dex::swap_exact_tokens_for_tokens( RuntimeOrigin::signed(user), bvec![coin2, coin1], 10, 1, user, ), Error::::PoolNotFound ); }); } #[test] fn check_no_panic_when_try_swap_close_to_empty_pool() { new_test_ext().execute_with(|| { let user = system_address(b"user1").into(); let coin1 = Coin::native(); let coin2 = Coin::External(ExternalCoin::Bitcoin); let pool_id = Dex::get_pool_id(coin1, coin2).unwrap(); let lp_token = coin2; assert_ok!(Dex::create_pool(coin2.try_into().unwrap())); assert_ok!(CoinsPallet::::mint(user, Balance { coin: coin1, amount: Amount(10000) })); assert_ok!(CoinsPallet::::mint(user, Balance { coin: coin2, amount: Amount(1000) })); let liquidity1 = 10000; let liquidity2 = 200; assert_ok!(Dex::add_liquidity( RuntimeOrigin::signed(user), coin2.try_into().unwrap(), liquidity2, liquidity1, 1, 1, user, )); let lp_token_minted = pool_balance(user, lp_token); assert!(events().contains(&Event::::LiquidityAdded { who: user, mint_to: user, pool_id, sri_amount: liquidity1, coin_amount: liquidity2, lp_token_minted, })); let pallet_account = Dex::get_pool_account(pool_id); assert_eq!(balance(pallet_account, coin1), liquidity1); assert_eq!(balance(pallet_account, coin2), liquidity2); assert_ok!(Dex::remove_liquidity( RuntimeOrigin::signed(user), coin2.try_into().unwrap(), lp_token_minted, 1, 1, user, )); // Now, the pool should exist but be almost empty. // Let's try and drain it. assert_eq!(balance(pallet_account, coin1), 708); assert_eq!(balance(pallet_account, coin2), 15); // validate the reserve should always stay above the ED // Following test fail again due to the force on ED being > 1. // assert_noop!( // Dex::swap_tokens_for_exact_tokens( // RuntimeOrigin::signed(user), // bvec![coin2, coin1], // 708 - ed + 1, // amount_out // 500, // amount_in_max // user, // ), // Error::::ReserveLeftLessThanMinimum // ); assert_ok!(Dex::swap_tokens_for_exact_tokens( RuntimeOrigin::signed(user), bvec![coin2, coin1], 608, // amount_out 500, // amount_in_max user, )); let token_1_left = balance(pallet_account, coin1); let token_2_left = balance(pallet_account, coin2); assert_eq!(token_1_left, 708 - 608); // The price for the last tokens should be very high assert_eq!( Dex::get_amount_in(token_1_left - 1, token_2_left, token_1_left).ok().unwrap(), 10625 ); assert_noop!( Dex::swap_tokens_for_exact_tokens( RuntimeOrigin::signed(user), bvec![coin2, coin1], token_1_left - 1, // amount_out 1000, // amount_in_max user, ), Error::::ProvidedMaximumNotSufficientForSwap ); // Try to swap what's left in the pool assert_noop!( Dex::swap_tokens_for_exact_tokens( RuntimeOrigin::signed(user), bvec![coin2, coin1], token_1_left, // amount_out 1000, // amount_in_max user, ), Error::::AmountOutTooHigh ); }); } #[test] fn swap_should_not_work_if_too_much_slippage() { new_test_ext().execute_with(|| { let user = system_address(b"user1").into(); let coin1 = Coin::native(); let coin2 = Coin::External(ExternalCoin::Ether); assert_ok!(Dex::create_pool(coin2.try_into().unwrap())); assert_ok!(CoinsPallet::::mint(user, Balance { coin: coin1, amount: Amount(10000) })); assert_ok!(CoinsPallet::::mint(user, Balance { coin: coin2, amount: Amount(1000) })); let liquidity1 = 10000; let liquidity2 = 200; assert_ok!(Dex::add_liquidity( RuntimeOrigin::signed(user), coin2.try_into().unwrap(), liquidity2, liquidity1, 1, 1, user, )); let exchange_amount = 100; assert_noop!( Dex::swap_exact_tokens_for_tokens( RuntimeOrigin::signed(user), bvec![coin2, coin1], exchange_amount, // amount_in 4000, // amount_out_min user, ), Error::::ProvidedMinimumNotSufficientForSwap ); }); } #[test] fn can_swap_tokens_for_exact_tokens() { new_test_ext().execute_with(|| { let user = system_address(b"user1").into(); let coin1 = Coin::native(); let coin2 = Coin::External(ExternalCoin::Dai); let pool_id = Dex::get_pool_id(coin1, coin2).unwrap(); assert_ok!(Dex::create_pool(coin2.try_into().unwrap())); assert_ok!(CoinsPallet::::mint(user, Balance { coin: coin1, amount: Amount(20000) })); assert_ok!(CoinsPallet::::mint(user, Balance { coin: coin2, amount: Amount(1000) })); let pallet_account = Dex::get_pool_account(pool_id); let before1 = balance(pallet_account, coin1) + balance(user, coin1); let before2 = balance(pallet_account, coin2) + balance(user, coin2); let liquidity1 = 10000; let liquidity2 = 200; assert_ok!(Dex::add_liquidity( RuntimeOrigin::signed(user), coin2.try_into().unwrap(), liquidity2, liquidity1, 1, 1, user, )); let exchange_out = 50; let expect_in = Dex::get_amount_in(exchange_out, liquidity1, liquidity2).ok().unwrap(); assert_ok!(Dex::swap_tokens_for_exact_tokens( RuntimeOrigin::signed(user), bvec![coin1, coin2], exchange_out, // amount_out 3500, // amount_in_max user, )); assert_eq!(balance(user, coin1), 10000 - expect_in); assert_eq!(balance(user, coin2), 1000 - liquidity2 + exchange_out); assert_eq!(balance(pallet_account, coin1), liquidity1 + expect_in); assert_eq!(balance(pallet_account, coin2), liquidity2 - exchange_out); // check invariants: // native and coin totals should be preserved. assert_eq!(before1, balance(pallet_account, coin1) + balance(user, coin1)); assert_eq!(before2, balance(pallet_account, coin2) + balance(user, coin2)); }); } #[test] fn can_swap_tokens_for_exact_tokens_when_not_liquidity_provider() { new_test_ext().execute_with(|| { let user = system_address(b"user1").into(); let user2 = system_address(b"user2").into(); let coin1 = Coin::native(); let coin2 = Coin::External(ExternalCoin::Monero); let pool_id = Dex::get_pool_id(coin1, coin2).unwrap(); let lp_token = coin2; assert_ok!(Dex::create_pool(coin2.try_into().unwrap())); let base1 = 10000; let base2 = 1000; assert_ok!(CoinsPallet::::mint(user, Balance { coin: coin1, amount: Amount(base1) })); assert_ok!(CoinsPallet::::mint(user2, Balance { coin: coin1, amount: Amount(base1) })); assert_ok!(CoinsPallet::::mint(user2, Balance { coin: coin2, amount: Amount(base2) })); let pallet_account = Dex::get_pool_account(pool_id); let before1 = balance(pallet_account, coin1) + balance(user, coin1) + balance(user2, coin1); let before2 = balance(pallet_account, coin2) + balance(user, coin2) + balance(user2, coin2); let liquidity1 = 10000; let liquidity2 = 200; assert_ok!(Dex::add_liquidity( RuntimeOrigin::signed(user2), coin2.try_into().unwrap(), liquidity2, liquidity1, 1, 1, user2, )); assert_eq!(balance(user, coin1), base1); assert_eq!(balance(user, coin2), 0); let exchange_out = 50; let expect_in = Dex::get_amount_in(exchange_out, liquidity1, liquidity2).ok().unwrap(); assert_ok!(Dex::swap_tokens_for_exact_tokens( RuntimeOrigin::signed(user), bvec![coin1, coin2], exchange_out, // amount_out 3500, // amount_in_max user, )); assert_eq!(balance(user, coin1), base1 - expect_in); assert_eq!(balance(pallet_account, coin1), liquidity1 + expect_in); assert_eq!(balance(user, coin2), exchange_out); assert_eq!(balance(pallet_account, coin2), liquidity2 - exchange_out); // check invariants: // native and coin totals should be preserved. assert_eq!( before1, balance(pallet_account, coin1) + balance(user, coin1) + balance(user2, coin1) ); assert_eq!( before2, balance(pallet_account, coin2) + balance(user, coin2) + balance(user2, coin2) ); let lp_token_minted = pool_balance(user2, lp_token); assert_eq!(lp_token_minted, 1314); assert_ok!(Dex::remove_liquidity( RuntimeOrigin::signed(user2), coin2.try_into().unwrap(), lp_token_minted, 0, 0, user2, )); }); } #[test] fn swap_tokens_for_exact_tokens_should_not_work_if_too_much_slippage() { new_test_ext().execute_with(|| { let user = system_address(b"user1").into(); let coin1 = Coin::native(); let coin2 = Coin::External(ExternalCoin::Ether); assert_ok!(Dex::create_pool(coin2.try_into().unwrap())); assert_ok!(CoinsPallet::::mint(user, Balance { coin: coin1, amount: Amount(20000) })); assert_ok!(CoinsPallet::::mint(user, Balance { coin: coin2, amount: Amount(1000) })); let liquidity1 = 10000; let liquidity2 = 200; assert_ok!(Dex::add_liquidity( RuntimeOrigin::signed(user), coin2.try_into().unwrap(), liquidity2, liquidity1, 1, 1, user, )); let exchange_out = 1; assert_noop!( Dex::swap_tokens_for_exact_tokens( RuntimeOrigin::signed(user), bvec![coin1, coin2], exchange_out, // amount_out 50, // amount_in_max just greater than slippage. user, ), Error::::ProvidedMaximumNotSufficientForSwap ); }); } #[test] fn swap_exact_tokens_for_tokens_in_multi_hops() { new_test_ext().execute_with(|| { let user = system_address(b"user1").into(); let coin1 = Coin::native(); let coin2 = Coin::External(ExternalCoin::Dai); let coin3 = Coin::External(ExternalCoin::Monero); assert_ok!(Dex::create_pool(coin2.try_into().unwrap())); assert_ok!(Dex::create_pool(coin3.try_into().unwrap())); let base1 = 10000; let base2 = 10000; assert_ok!(CoinsPallet::::mint(user, Balance { coin: coin1, amount: Amount(base1 * 2) })); assert_ok!(CoinsPallet::::mint(user, Balance { coin: coin2, amount: Amount(base2) })); assert_ok!(CoinsPallet::::mint(user, Balance { coin: coin3, amount: Amount(base2) })); let liquidity1 = 10000; let liquidity2 = 200; let liquidity3 = 2000; assert_ok!(Dex::add_liquidity( RuntimeOrigin::signed(user), coin2.try_into().unwrap(), liquidity2, liquidity1, 1, 1, user, )); assert_ok!(Dex::add_liquidity( RuntimeOrigin::signed(user), coin3.try_into().unwrap(), liquidity3, liquidity1, 1, 1, user, )); let input_amount = 500; let expect_out2 = Dex::get_amount_out(input_amount, liquidity2, liquidity1).ok().unwrap(); let expect_out3 = Dex::get_amount_out(expect_out2, liquidity1, liquidity3).ok().unwrap(); assert_noop!( Dex::swap_exact_tokens_for_tokens( RuntimeOrigin::signed(user), bvec![coin1], input_amount, 80, user, ), Error::::InvalidPath ); assert_noop!( Dex::swap_exact_tokens_for_tokens( RuntimeOrigin::signed(user), bvec![coin2, coin1, coin2], input_amount, 80, user, ), Error::::NonUniquePath ); assert_ok!(Dex::swap_exact_tokens_for_tokens( RuntimeOrigin::signed(user), bvec![coin2, coin1, coin3], input_amount, // amount_in 80, // amount_out_min user, )); let pool_id1 = Dex::get_pool_id(coin1, coin2).unwrap(); let pool_id2 = Dex::get_pool_id(coin1, coin3).unwrap(); let pallet_account1 = Dex::get_pool_account(pool_id1); let pallet_account2 = Dex::get_pool_account(pool_id2); assert_eq!(balance(user, coin2), base2 - liquidity2 - input_amount); assert_eq!(balance(pallet_account1, coin2), liquidity2 + input_amount); assert_eq!(balance(pallet_account1, coin1), liquidity1 - expect_out2); assert_eq!(balance(pallet_account2, coin1), liquidity1 + expect_out2); assert_eq!(balance(pallet_account2, coin3), liquidity3 - expect_out3); assert_eq!(balance(user, coin3), 10000 - liquidity3 + expect_out3); }); } #[test] fn swap_tokens_for_exact_tokens_in_multi_hops() { new_test_ext().execute_with(|| { let user = system_address(b"user1").into(); let coin1 = Coin::native(); let coin2 = Coin::External(ExternalCoin::Bitcoin); let coin3 = Coin::External(ExternalCoin::Ether); assert_ok!(Dex::create_pool(coin2.try_into().unwrap())); assert_ok!(Dex::create_pool(coin3.try_into().unwrap())); let base1 = 10000; let base2 = 10000; assert_ok!(CoinsPallet::::mint(user, Balance { coin: coin1, amount: Amount(base1 * 2) })); assert_ok!(CoinsPallet::::mint(user, Balance { coin: coin2, amount: Amount(base2) })); assert_ok!(CoinsPallet::::mint(user, Balance { coin: coin3, amount: Amount(base2) })); let liquidity1 = 10000; let liquidity2 = 200; let liquidity3 = 2000; assert_ok!(Dex::add_liquidity( RuntimeOrigin::signed(user), coin2.try_into().unwrap(), liquidity2, liquidity1, 1, 1, user, )); assert_ok!(Dex::add_liquidity( RuntimeOrigin::signed(user), coin3.try_into().unwrap(), liquidity3, liquidity1, 1, 1, user, )); let exchange_out3 = 100; let expect_in2 = Dex::get_amount_in(exchange_out3, liquidity1, liquidity3).ok().unwrap(); let expect_in1 = Dex::get_amount_in(expect_in2, liquidity2, liquidity1).ok().unwrap(); assert_ok!(Dex::swap_tokens_for_exact_tokens( RuntimeOrigin::signed(user), bvec![coin2, coin1, coin3], exchange_out3, // amount_out 1000, // amount_in_max user, )); let pool_id1 = Dex::get_pool_id(coin1, coin2).unwrap(); let pool_id2 = Dex::get_pool_id(coin1, coin3).unwrap(); let pallet_account1 = Dex::get_pool_account(pool_id1); let pallet_account2 = Dex::get_pool_account(pool_id2); assert_eq!(balance(user, coin2), base2 - liquidity2 - expect_in1); assert_eq!(balance(pallet_account1, coin1), liquidity1 - expect_in2); assert_eq!(balance(pallet_account1, coin2), liquidity2 + expect_in1); assert_eq!(balance(pallet_account2, coin1), liquidity1 + expect_in2); assert_eq!(balance(pallet_account2, coin3), liquidity3 - exchange_out3); assert_eq!(balance(user, coin3), 10000 - liquidity3 + exchange_out3); }); } #[test] fn can_not_swap_same_coin() { new_test_ext().execute_with(|| { let user = system_address(b"user1").into(); let coin1 = Coin::External(ExternalCoin::Dai); assert_ok!(CoinsPallet::::mint(user, Balance { coin: coin1, amount: Amount(1000) })); let exchange_amount = 10; assert_noop!( Dex::swap_exact_tokens_for_tokens( RuntimeOrigin::signed(user), bvec![coin1, coin1], exchange_amount, 1, user, ), Error::::PoolNotFound ); assert_noop!( Dex::swap_exact_tokens_for_tokens( RuntimeOrigin::signed(user), bvec![Coin::native(), Coin::native()], exchange_amount, 1, user, ), Error::::EqualCoins ); }); } #[test] fn validate_pool_id_sorting() { new_test_ext().execute_with(|| { // Serai < Bitcoin < Ether < Dai < Monero. // coin1 <= coin2 for this test to pass. let native = Coin::native(); let coin1 = Coin::External(ExternalCoin::Bitcoin); let coin2 = Coin::External(ExternalCoin::Monero); assert_eq!(Dex::get_pool_id(native, coin2).unwrap(), coin2.try_into().unwrap()); assert_eq!(Dex::get_pool_id(coin2, native).unwrap(), coin2.try_into().unwrap()); assert!(matches!(Dex::get_pool_id(native, native), Err(Error::::EqualCoins))); assert!(matches!(Dex::get_pool_id(coin2, coin1), Err(Error::::PoolNotFound))); assert!(coin2 > coin1); assert!(coin1 <= coin1); assert_eq!(coin1, coin1); assert!(native < coin1); }); } #[test] fn cannot_block_pool_creation() { new_test_ext().execute_with(|| { // User 1 is the pool creator let user = system_address(b"user1").into(); // User 2 is the attacker let attacker = system_address(b"attacker").into(); assert_ok!(CoinsPallet::::mint( attacker, Balance { coin: Coin::native(), amount: Amount(10000) } )); // The target pool the user wants to create is Native <=> Coin(2) let coin1 = Coin::native(); let coin2 = Coin::External(ExternalCoin::Ether); // Attacker computes the still non-existing pool account for the target pair let pool_account = Dex::get_pool_account(Dex::get_pool_id(coin2, coin1).unwrap()); // And transfers 1 to that pool account assert_ok!(CoinsPallet::::transfer_internal( attacker, pool_account, Balance { coin: Coin::native(), amount: Amount(1) } )); // Then, the attacker creates 14 tokens and sends one of each to the pool account // skip the coin1 and coin2 coins. for coin in coins().into_iter().filter(|c| (*c != coin1 && *c != coin2)) { assert_ok!(CoinsPallet::::mint(attacker, Balance { coin, amount: Amount(1000) })); assert_ok!(CoinsPallet::::transfer_internal( attacker, pool_account, Balance { coin, amount: Amount(1) } )); } // User can still create the pool assert_ok!(Dex::create_pool(coin2.try_into().unwrap())); // User has to transfer one Coin(2) token to the pool account (otherwise add_liquidity will // fail with `CoinTwoDepositDidNotMeetMinimum`), also transfer native token for the same error. assert_ok!(CoinsPallet::::mint(user, Balance { coin: coin1, amount: Amount(10000) })); assert_ok!(CoinsPallet::::mint(user, Balance { coin: coin2, amount: Amount(10000) })); assert_ok!(CoinsPallet::::transfer_internal( user, pool_account, Balance { coin: coin2, amount: Amount(1) } )); assert_ok!(CoinsPallet::::transfer_internal( user, pool_account, Balance { coin: coin1, amount: Amount(100) } )); // add_liquidity shouldn't fail because of the number of consumers assert_ok!(Dex::add_liquidity( RuntimeOrigin::signed(user), coin2.try_into().unwrap(), 100, 9900, 10, 9900, user, )); }); } #[test] fn test_median_price() { new_test_ext().execute_with(|| { use rand_core::{RngCore, OsRng}; let mut prices = vec![]; for i in 0 .. 100 { // Randomly use an active number if (i != 0) && (OsRng.next_u64() % u64::from(MEDIAN_PRICE_WINDOW_LENGTH / 3) == 0) { let old_index = usize::try_from( OsRng.next_u64() % u64::from(MEDIAN_PRICE_WINDOW_LENGTH) % u64::try_from(prices.len()).unwrap(), ) .unwrap(); let window_base = prices.len().saturating_sub(MEDIAN_PRICE_WINDOW_LENGTH.into()); prices.push(prices[window_base + old_index]); } else { prices.push(OsRng.next_u64()); } } let coin = ExternalCoin::Bitcoin; assert!(prices.len() >= (2 * usize::from(MEDIAN_PRICE_WINDOW_LENGTH))); for i in 0 .. prices.len() { let price = Amount(prices[i]); let n = BlockNumberFor::::from(u32::try_from(i).unwrap()); SpotPriceForBlock::::set(n, coin, Some(price)); Dex::insert_into_median(coin, price); if SpotPricesLength::::get(coin).unwrap() > MEDIAN_PRICE_WINDOW_LENGTH { let old = n - u64::from(MEDIAN_PRICE_WINDOW_LENGTH); let old_price = SpotPriceForBlock::::get(old, coin).unwrap(); SpotPriceForBlock::::remove(old, coin); Dex::remove_from_median(coin, old_price); } // get the current window (cloning so our sort doesn't affect the original array) let window_base = (i + 1).saturating_sub(MEDIAN_PRICE_WINDOW_LENGTH.into()); let mut window = Vec::from(&prices[window_base ..= i]); assert!(window.len() <= MEDIAN_PRICE_WINDOW_LENGTH.into()); // get the median window.sort(); let median_index = window.len() / 2; assert_eq!(Dex::median_price(coin).unwrap(), Amount(window[median_index])); } }); } ================================================ FILE: substrate/dex/pallet/src/types.rs ================================================ // This file was originally: // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // It has been forked into a crate distributed under the AGPL 3.0. // Please check the current distribution for up-to-date copyright and licensing information. use super::*; /// Trait for providing methods to swap between the various coin classes. pub trait Swap { /// Swap exactly `amount_in` of coin `path[0]` for coin `path[1]`. /// If an `amount_out_min` is specified, it will return an error if it is unable to acquire /// the amount desired. /// /// Withdraws the `path[0]` coin from `sender`, deposits the `path[1]` coin to `send_to`, /// /// If successful, returns the amount of `path[1]` acquired for the `amount_in`. fn swap_exact_tokens_for_tokens( sender: AccountId, path: Vec, amount_in: Balance, amount_out_min: Option, send_to: AccountId, ) -> Result; /// Take the `path[0]` coin and swap some amount for `amount_out` of the `path[1]`. If an /// `amount_in_max` is specified, it will return an error if acquiring `amount_out` would be /// too costly. /// /// Withdraws `path[0]` coin from `sender`, deposits `path[1]` coin to `send_to`, /// /// If successful returns the amount of the `path[0]` taken to provide `path[1]`. fn swap_tokens_for_exact_tokens( sender: AccountId, path: Vec, amount_out: Balance, amount_in_max: Option, send_to: AccountId, ) -> Result; } ================================================ FILE: substrate/dex/pallet/src/weights.rs ================================================ // This file was originally: // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // It has been forked into a crate distributed under the AGPL 3.0. // Please check the current distribution for up-to-date copyright and licensing information. //! Autogenerated weights for Dex Pallet. //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev //! DATE: 2023-07-18, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `runner-gghbxkbs-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: // target/production/substrate // benchmark // pallet // --steps=50 // --repeat=20 // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 // --json-file=/builds/parity/mirrors/substrate/.git/.artifacts/bench.json // --pallet=serai_dex_pallet // --chain=dev // --header=./HEADER-APACHE2 // --output=./substrate/dex/pallet/src/weights.rs // --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] #![allow(missing_docs)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; /// Weight functions needed for Dex Pallet. pub trait WeightInfo { fn create_pool() -> Weight; fn add_liquidity() -> Weight; fn remove_liquidity() -> Weight; fn swap_exact_tokens_for_tokens() -> Weight; fn swap_tokens_for_exact_tokens() -> Weight; } /// Weights for Dex Pallet using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { /// Storage: `DexPallet::Pools` (r:1 w:1) /// Proof: `DexPallet::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Coins::Account` (r:1 w:1) /// Proof: `Coins::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) /// Storage: `Coins::Coin` (r:1 w:1) /// Proof: `Coins::Coin` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) /// Storage: `DexPallet::NextPoolCoinId` (r:1 w:1) /// Proof: `DexPallet::NextPoolCoinId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `PoolCoins::Coin` (r:1 w:1) /// Proof: `PoolCoins::Coin` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) /// Storage: `PoolCoins::Account` (r:1 w:1) /// Proof: `PoolCoins::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn create_pool() -> Weight { // Proof Size summary in bytes: // Measured: `729` // Estimated: `6196` // Minimum execution time: 131_688_000 picoseconds. Weight::from_parts(134_092_000, 6196) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(8_u64)) } /// Storage: `DexPallet::Pools` (r:1 w:0) /// Proof: `DexPallet::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Coins::Coin` (r:1 w:1) /// Proof: `Coins::Coin` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) /// Storage: `Coins::Account` (r:2 w:2) /// Proof: `Coins::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) /// Storage: `PoolCoins::Coin` (r:1 w:1) /// Proof: `PoolCoins::Coin` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) /// Storage: `PoolCoins::Account` (r:2 w:2) /// Proof: `PoolCoins::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn add_liquidity() -> Weight { // Proof Size summary in bytes: // Measured: `1382` // Estimated: `6208` // Minimum execution time: 157_310_000 picoseconds. Weight::from_parts(161_547_000, 6208) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } /// Storage: `DexPallet::Pools` (r:1 w:0) /// Proof: `DexPallet::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Coins::Coin` (r:1 w:1) /// Proof: `Coins::Coin` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) /// Storage: `Coins::Account` (r:2 w:2) /// Proof: `Coins::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) /// Storage: `PoolCoins::Coin` (r:1 w:1) /// Proof: `PoolCoins::Coin` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) /// Storage: `PoolCoins::Account` (r:1 w:1) /// Proof: `PoolCoins::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn remove_liquidity() -> Weight { // Proof Size summary in bytes: // Measured: `1371` // Estimated: `6208` // Minimum execution time: 142_769_000 picoseconds. Weight::from_parts(145_139_000, 6208) .saturating_add(T::DbWeight::get().reads(7_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Coins::Coin` (r:3 w:3) /// Proof: `Coins::Coin` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) /// Storage: `Coins::Account` (r:6 w:6) /// Proof: `Coins::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn swap_exact_tokens_for_tokens() -> Weight { // Proof Size summary in bytes: // Measured: `1738` // Estimated: `16644` // Minimum execution time: 213_186_000 picoseconds. Weight::from_parts(217_471_000, 16644) .saturating_add(T::DbWeight::get().reads(10_u64)) .saturating_add(T::DbWeight::get().writes(10_u64)) } /// Storage: `Coins::Coin` (r:3 w:3) /// Proof: `Coins::Coin` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) /// Storage: `Coins::Account` (r:6 w:6) /// Proof: `Coins::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn swap_tokens_for_exact_tokens() -> Weight { // Proof Size summary in bytes: // Measured: `1738` // Estimated: `16644` // Minimum execution time: 213_793_000 picoseconds. Weight::from_parts(218_584_000, 16644) .saturating_add(T::DbWeight::get().reads(10_u64)) .saturating_add(T::DbWeight::get().writes(10_u64)) } } // For backwards compatibility and tests. impl WeightInfo for () { /// Storage: `DexPallet::Pools` (r:1 w:1) /// Proof: `DexPallet::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Coins::Account` (r:1 w:1) /// Proof: `Coins::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) /// Storage: `Coins::Coin` (r:1 w:1) /// Proof: `Coins::Coin` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) /// Storage: `DexPallet::NextPoolCoinId` (r:1 w:1) /// Proof: `DexPallet::NextPoolCoinId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `PoolCoins::Coin` (r:1 w:1) /// Proof: `PoolCoins::Coin` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) /// Storage: `PoolCoins::Account` (r:1 w:1) /// Proof: `PoolCoins::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn create_pool() -> Weight { // Proof Size summary in bytes: // Measured: `729` // Estimated: `6196` // Minimum execution time: 131_688_000 picoseconds. Weight::from_parts(134_092_000, 6196) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(8_u64)) } /// Storage: `DexPallet::Pools` (r:1 w:0) /// Proof: `DexPallet::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Coins::Coin` (r:1 w:1) /// Proof: `Coins::Coin` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) /// Storage: `Coins::Account` (r:2 w:2) /// Proof: `Coins::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) /// Storage: `PoolCoins::Coin` (r:1 w:1) /// Proof: `PoolCoins::Coin` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) /// Storage: `PoolCoins::Account` (r:2 w:2) /// Proof: `PoolCoins::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn add_liquidity() -> Weight { // Proof Size summary in bytes: // Measured: `1382` // Estimated: `6208` // Minimum execution time: 157_310_000 picoseconds. Weight::from_parts(161_547_000, 6208) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } /// Storage: `DexPallet::Pools` (r:1 w:0) /// Proof: `DexPallet::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Coins::Coin` (r:1 w:1) /// Proof: `Coins::Coin` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) /// Storage: `Coins::Account` (r:2 w:2) /// Proof: `Coins::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) /// Storage: `PoolCoins::Coin` (r:1 w:1) /// Proof: `PoolCoins::Coin` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) /// Storage: `PoolCoins::Account` (r:1 w:1) /// Proof: `PoolCoins::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn remove_liquidity() -> Weight { // Proof Size summary in bytes: // Measured: `1371` // Estimated: `6208` // Minimum execution time: 142_769_000 picoseconds. Weight::from_parts(145_139_000, 6208) .saturating_add(RocksDbWeight::get().reads(7_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Coins::Coin` (r:3 w:3) /// Proof: `Coins::Coin` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) /// Storage: `Coins::Account` (r:6 w:6) /// Proof: `Coins::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) fn swap_exact_tokens_for_tokens() -> Weight { // Proof Size summary in bytes: // Measured: `1738` // Estimated: `16644` // Minimum execution time: 213_186_000 picoseconds. Weight::from_parts(217_471_000, 16644) .saturating_add(RocksDbWeight::get().reads(10_u64)) .saturating_add(RocksDbWeight::get().writes(10_u64)) } /// Storage: `Coins::Coin` (r:3 w:3) /// Proof: `Coins::Coin` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) /// Storage: `Coins::Account` (r:6 w:6) /// Proof: `Coins::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn swap_tokens_for_exact_tokens() -> Weight { // Proof Size summary in bytes: // Measured: `1738` // Estimated: `16644` // Minimum execution time: 213_793_000 picoseconds. Weight::from_parts(218_584_000, 16644) .saturating_add(RocksDbWeight::get().reads(10_u64)) .saturating_add(RocksDbWeight::get().writes(10_u64)) } } ================================================ FILE: substrate/economic-security/pallet/Cargo.toml ================================================ [package] name = "serai-economic-security-pallet" version = "0.1.0" description = "Economic Security pallet for Serai" license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/substrate/economic-security/pallet" authors = ["Akil Demir "] edition = "2021" rust-version = "1.77" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [package.metadata.cargo-machete] ignored = ["scale"] [lints] workspace = true [dependencies] scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive"] } frame-system = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } frame-support = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } dex-pallet = { package = "serai-dex-pallet", path = "../../dex/pallet", default-features = false } coins-pallet = { package = "serai-coins-pallet", path = "../../coins/pallet", default-features = false } serai-primitives = { path = "../../primitives", default-features = false } [features] std = [ "scale/std", "frame-system/std", "frame-support/std", "dex-pallet/std", "coins-pallet/std", "serai-primitives/std", ] try-runtime = [] # TODO default = ["std"] ================================================ FILE: substrate/economic-security/pallet/LICENSE ================================================ AGPL-3.0-only license Copyright (c) 2024 Luke Parker This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License Version 3 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see . ================================================ FILE: substrate/economic-security/pallet/src/lib.rs ================================================ #![cfg_attr(not(feature = "std"), no_std)] #[allow( unreachable_patterns, clippy::cast_possible_truncation, clippy::no_effect_underscore_binding, clippy::empty_docs )] #[frame_support::pallet] pub mod pallet { use frame_system::pallet_prelude::*; use frame_support::pallet_prelude::*; use dex_pallet::{Config as DexConfig, Pallet as Dex}; use coins_pallet::{Config as CoinsConfig, AllowMint}; use serai_primitives::*; #[pallet::config] pub trait Config: frame_system::Config + CoinsConfig + DexConfig {} #[pallet::event] #[pallet::generate_deposit(fn deposit_event)] pub enum Event { EconomicSecurityReached { network: ExternalNetworkId }, } #[pallet::pallet] pub struct Pallet(PhantomData); #[pallet::storage] #[pallet::getter(fn economic_security_block)] pub(crate) type EconomicSecurityBlock = StorageMap<_, Identity, ExternalNetworkId, BlockNumberFor, OptionQuery>; #[pallet::hooks] impl Hooks> for Pallet { fn on_initialize(n: BlockNumberFor) -> Weight { // we accept we reached economic security once we can mint smallest amount of a network's coin for coin in EXTERNAL_COINS { let existing = EconomicSecurityBlock::::get(coin.network()); // TODO: we don't need to check for oracle value if is_allowed returns false when there is // no coin value if existing.is_none() && Dex::::security_oracle_value(coin).is_some() && ::AllowMint::is_allowed(&ExternalBalance { coin, amount: Amount(1) }) { EconomicSecurityBlock::::set(coin.network(), Some(n)); Self::deposit_event(Event::EconomicSecurityReached { network: coin.network() }); } } Weight::zero() // TODO } } } pub use pallet::*; ================================================ FILE: substrate/emissions/pallet/Cargo.toml ================================================ [package] name = "serai-emissions-pallet" version = "0.1.0" description = "Emissions pallet for Serai" license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/substrate/emissions/pallet" authors = ["Akil Demir "] edition = "2021" rust-version = "1.77" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [package.metadata.cargo-machete] ignored = ["scale"] [lints] workspace = true [dependencies] scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive"] } frame-system = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } frame-support = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-std = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-runtime = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } coins-pallet = { package = "serai-coins-pallet", path = "../../coins/pallet", default-features = false } validator-sets-pallet = { package = "serai-validator-sets-pallet", path = "../../validator-sets/pallet", default-features = false } dex-pallet = { package = "serai-dex-pallet", path = "../../dex/pallet", default-features = false } genesis-liquidity-pallet = { package = "serai-genesis-liquidity-pallet", path = "../../genesis-liquidity/pallet", default-features = false } economic-security-pallet = { package = "serai-economic-security-pallet", path = "../../economic-security/pallet", default-features = false } serai-primitives = { path = "../../primitives", default-features = false } validator-sets-primitives = { package = "serai-validator-sets-primitives", path = "../../validator-sets/primitives", default-features = false } emissions-primitives = { package = "serai-emissions-primitives", path = "../primitives", default-features = false } [features] std = [ "scale/std", "frame-system/std", "frame-support/std", "sp-std/std", "sp-runtime/std", "coins-pallet/std", "validator-sets-pallet/std", "dex-pallet/std", "genesis-liquidity-pallet/std", "economic-security-pallet/std", "serai-primitives/std", "emissions-primitives/std", ] fast-epoch = [] try-runtime = [] # TODO default = ["std"] ================================================ FILE: substrate/emissions/pallet/LICENSE ================================================ AGPL-3.0-only license Copyright (c) 2024 Luke Parker This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License Version 3 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see . ================================================ FILE: substrate/emissions/pallet/src/lib.rs ================================================ #![cfg_attr(not(feature = "std"), no_std)] #[allow( unreachable_patterns, clippy::cast_possible_truncation, clippy::no_effect_underscore_binding, clippy::empty_docs )] #[frame_support::pallet] pub mod pallet { use super::*; use frame_system::{pallet_prelude::*, RawOrigin}; use frame_support::{pallet_prelude::*, sp_runtime::SaturatedConversion}; use sp_std::{vec, vec::Vec, ops::Mul, collections::btree_map::BTreeMap}; use coins_pallet::{Config as CoinsConfig, Pallet as Coins}; use dex_pallet::{Config as DexConfig, Pallet as Dex}; use validator_sets_pallet::{Pallet as ValidatorSets, Config as ValidatorSetsConfig}; use genesis_liquidity_pallet::{Pallet as GenesisLiquidity, Config as GenesisLiquidityConfig}; use economic_security_pallet::{Config as EconomicSecurityConfig, Pallet as EconomicSecurity}; use serai_primitives::*; use validator_sets_primitives::{MAX_KEY_SHARES_PER_SET, Session}; pub use emissions_primitives as primitives; use primitives::*; #[pallet::config] pub trait Config: frame_system::Config + ValidatorSetsConfig + CoinsConfig + DexConfig + GenesisLiquidityConfig + EconomicSecurityConfig { } #[pallet::genesis_config] #[derive(Clone, Debug)] pub struct GenesisConfig { /// Networks to spawn Serai with. pub networks: Vec<(NetworkId, Amount)>, /// List of participants to place in the initial validator sets. pub participants: Vec, } impl Default for GenesisConfig { fn default() -> Self { GenesisConfig { networks: Default::default(), participants: Default::default() } } } #[pallet::error] pub enum Error { NetworkHasEconomicSecurity, NoValueForCoin, InsufficientAllocation, } #[pallet::event] pub enum Event {} #[pallet::pallet] pub struct Pallet(PhantomData); // TODO: Remove this. This should be the sole domain of validator-sets #[pallet::storage] #[pallet::getter(fn participants)] pub(crate) type Participants = StorageMap< _, Identity, NetworkId, BoundedVec<(PublicKey, u64), ConstU32<{ MAX_KEY_SHARES_PER_SET }>>, OptionQuery, >; // TODO: Remove this too #[pallet::storage] #[pallet::getter(fn session)] pub type CurrentSession = StorageMap<_, Identity, NetworkId, u32, ValueQuery>; #[pallet::storage] pub(crate) type LastSwapVolume = StorageMap<_, Identity, ExternalCoin, u64, OptionQuery>; #[pallet::genesis_build] impl BuildGenesisConfig for GenesisConfig { fn build(&self) { for (id, stake) in self.networks.clone() { let mut participants = vec![]; for p in self.participants.clone() { participants.push((p, stake.0)); } Participants::::set(id, Some(participants.try_into().unwrap())); CurrentSession::::set(id, 0); } } } #[pallet::hooks] impl Hooks> for Pallet { fn on_initialize(n: BlockNumberFor) -> Weight { let genesis_ended = GenesisLiquidity::::genesis_complete_block().is_some(); // check if we got a new session let mut session_changed = false; let session = ValidatorSets::::session(NetworkId::Serai).unwrap_or(Session(0)); if session.0 > Self::session(NetworkId::Serai) { session_changed = true; CurrentSession::::set(NetworkId::Serai, session.0); } // update participants per session before the genesis // after the genesis, we update them after reward distribution. if (!genesis_ended) && session_changed { Self::update_participants(); } // We only want to distribute emissions if the genesis period is over AND the session has // ended if !(genesis_ended && session_changed) { return Weight::zero(); // TODO } // figure out the amount of blocks in the last session // Since the session has changed, we're now at least at session 1 let block_count = ValidatorSets::::session_begin_block(NetworkId::Serai, session) - ValidatorSets::::session_begin_block(NetworkId::Serai, Session(session.0 - 1)); // get total reward for this epoch let pre_ec_security = Self::pre_ec_security(); let mut distances = BTreeMap::new(); let mut total_distance: u64 = 0; let reward_this_epoch = if pre_ec_security { // calculate distance to economic security per network for n in EXTERNAL_NETWORKS { let required = ValidatorSets::::required_stake_for_network(n); let mut current = ValidatorSets::::total_allocated_stake(NetworkId::from(n)).unwrap_or(Amount(0)).0; if current > required { current = required; } let distance = required - current; distances.insert(NetworkId::from(n), distance); total_distance = total_distance.saturating_add(distance); } // add serai network portion (20%) let new_total_distance = total_distance.saturating_mul(100) / (100 - SERAI_VALIDATORS_DESIRED_PERCENTAGE); distances.insert(NetworkId::Serai, new_total_distance - total_distance); total_distance = new_total_distance; if Self::initial_period(n) { // rewards are fixed for initial period block_count * INITIAL_REWARD_PER_BLOCK } else { // rewards for pre-economic security is // (STAKE_REQUIRED - CURRENT_STAKE) / blocks_until(SECURE_BY). let block_reward = total_distance / Self::blocks_until(SECURE_BY); block_count * block_reward } } else { // post ec security block_count * REWARD_PER_BLOCK }; // map epoch ec-security-distance/volume to rewards let (rewards_per_network, volume_per_network, volume_per_coin) = if pre_ec_security { ( distances .into_iter() .map(|(n, distance)| { // calculate how much each network gets based on distance to ec-security let reward = u64::try_from( u128::from(reward_this_epoch).saturating_mul(u128::from(distance)) / u128::from(total_distance), ) .unwrap(); (n, reward) }) .collect::>(), None, None, ) } else { // get swap volumes let mut volume_per_coin: BTreeMap = BTreeMap::new(); for c in EXTERNAL_COINS { let current_volume = Dex::::swap_volume(c).unwrap_or(0); let last_volume = LastSwapVolume::::get(c).unwrap_or(0); let vol_this_epoch = current_volume.saturating_sub(last_volume); // update the current volume LastSwapVolume::::set(c, Some(current_volume)); volume_per_coin.insert(c, vol_this_epoch); } // aggregate per network let mut total_volume = 0u64; let mut volume_per_network: BTreeMap = BTreeMap::new(); for (c, vol) in &volume_per_coin { volume_per_network.insert( c.network().into(), (*volume_per_network.get(&c.network().into()).unwrap_or(&0)).saturating_add(*vol), ); total_volume = total_volume.saturating_add(*vol); } // we add the serai network now volume_per_network.insert(NetworkId::Serai, 0); ( volume_per_network .iter() .map(|(n, vol)| { // 20% of the reward goes to the Serai network and rest is distributed among others // based on swap-volume. let reward = if *n == NetworkId::Serai { reward_this_epoch / 5 } else { let reward = reward_this_epoch - (reward_this_epoch / 5); // TODO: It is highly unlikely but what to do in case of 0 total volume? if total_volume != 0 { u64::try_from( u128::from(reward).saturating_mul(u128::from(*vol)) / u128::from(total_volume), ) .unwrap() } else { 0 } }; (*n, reward) }) .collect::>(), Some(volume_per_network), Some(volume_per_coin), ) }; // distribute the rewards within the network for (n, reward) in rewards_per_network { let validators_reward = if let NetworkId::External(external_network) = n { // calculate pool vs validator share let capacity = ValidatorSets::::total_allocated_stake(NetworkId::from(external_network)) .unwrap_or(Amount(0)) .0; let required = ValidatorSets::::required_stake_for_network(external_network); let unused_capacity = capacity.saturating_sub(required); let distribution = unused_capacity.saturating_mul(ACCURACY_MULTIPLIER) / capacity; let total = DESIRED_DISTRIBUTION.saturating_add(distribution); let validators_reward = DESIRED_DISTRIBUTION.saturating_mul(reward) / total; let network_pool_reward = reward.saturating_sub(validators_reward); // send the rest to the pool if network_pool_reward != 0 { // these should be available to unwrap if we have a network_pool_reward. Because that // means we had an unused capacity hence in a post-ec era. let vpn = volume_per_network.as_ref().unwrap(); let vpc = volume_per_coin.as_ref().unwrap(); for c in external_network.coins() { let pool_reward = u64::try_from( u128::from(network_pool_reward).saturating_mul(u128::from(vpc[&c])) / u128::from(vpn[&n]), ) .unwrap(); if Coins::::mint( Dex::::get_pool_account(c), Balance { coin: Coin::Serai, amount: Amount(pool_reward) }, ) .is_err() { // TODO: log the failure continue; } } } validators_reward } else { reward }; // distribute validators rewards Self::distribute_to_validators(n, validators_reward); } // TODO: we have the past session participants here in the emissions pallet so that we can // distribute rewards to them in the next session. Ideally we should be able to fetch this // information from validator sets pallet. Self::update_participants(); Weight::zero() // TODO } } impl Pallet { fn blocks_until(block: u64) -> u64 { let current = >::block_number().saturated_into::(); block.saturating_sub(current) } fn initial_period(n: BlockNumberFor) -> bool { #[cfg(feature = "fast-epoch")] let initial_period_duration = FAST_EPOCH_INITIAL_PERIOD; #[cfg(not(feature = "fast-epoch"))] let initial_period_duration = 2 * MONTHS; let genesis_complete_block = GenesisLiquidity::::genesis_complete_block(); genesis_complete_block.is_some() && (n.saturated_into::() < (genesis_complete_block.unwrap() + initial_period_duration)) } /// Returns true if any of the external networks haven't reached economic security yet. fn pre_ec_security() -> bool { for n in EXTERNAL_NETWORKS { if EconomicSecurity::::economic_security_block(n).is_none() { return true; } } false } // Distribute the reward among network's set based on // -> (key shares * stake per share) + ((stake % stake per share) / 2) fn distribute_to_validators(n: NetworkId, reward: u64) { let stake_per_share = ValidatorSets::::allocation_per_key_share(n).unwrap().0; let mut scores = vec![]; let mut total_score = 0u64; for (p, amount) in Self::participants(n).unwrap() { let remainder = amount % stake_per_share; let score = amount - (remainder / 2); total_score = total_score.saturating_add(score); scores.push((p, score)); } // stake the rewards for (p, score) in scores { let p_reward = u64::try_from( u128::from(reward).saturating_mul(u128::from(score)) / u128::from(total_score), ) .unwrap(); Coins::::mint(p, Balance { coin: Coin::Serai, amount: Amount(p_reward) }).unwrap(); if ValidatorSets::::distribute_block_rewards(n, p, Amount(p_reward)).is_err() { // TODO: log the failure continue; } } } pub fn swap_to_staked_sri( to: PublicKey, network: NetworkId, balance: ExternalBalance, ) -> DispatchResult { // check the network didn't reach the economic security yet if let NetworkId::External(n) = network { if EconomicSecurity::::economic_security_block(n).is_some() { Err(Error::::NetworkHasEconomicSecurity)?; } } else { // we target 20% of the network's stake to be behind the Serai network let mut total_stake = 0; for n in NETWORKS { total_stake += ValidatorSets::::total_allocated_stake(n).unwrap_or(Amount(0)).0; } let stake = ValidatorSets::::total_allocated_stake(network).unwrap_or(Amount(0)).0; let desired_stake = total_stake / (100 / SERAI_VALIDATORS_DESIRED_PERCENTAGE); if stake >= desired_stake { Err(Error::::NetworkHasEconomicSecurity)?; } } // swap half of the liquidity for SRI to form PoL. let half = balance.amount.0 / 2; let path = BoundedVec::try_from(vec![balance.coin.into(), Coin::Serai]).unwrap(); let origin = RawOrigin::Signed(POL_ACCOUNT.into()); Dex::::swap_exact_tokens_for_tokens( origin.clone().into(), path, half, 1, // minimum out, so we accept whatever we get. POL_ACCOUNT.into(), )?; // get how much we got for our swap let sri_amount = Coins::::balance(POL_ACCOUNT.into(), Coin::Serai).0; // add liquidity Dex::::add_liquidity( origin.clone().into(), balance.coin, half, sri_amount, 1, 1, POL_ACCOUNT.into(), )?; // use last block spot price to calculate how much SRI the balance makes. let last_block = >::block_number() - 1u32.into(); let value = Dex::::spot_price_for_block(last_block, balance.coin) .ok_or(Error::::NoValueForCoin)?; // TODO: may panic? It might be best for this math ops to return the result as is instead of // doing an unwrap so that it can be properly dealt with. let sri_amount = balance.amount.mul(value); // Mint Coins::::mint(to, Balance { coin: Coin::Serai, amount: sri_amount })?; // Stake the SRI for the network. ValidatorSets::::allocate( frame_system::RawOrigin::Signed(to).into(), network, sri_amount, )?; Ok(()) } fn update_participants() { for n in NETWORKS { let participants = ValidatorSets::::participants_for_latest_decided_set(n) .unwrap() .into_iter() .map(|(key, _)| (key, ValidatorSets::::allocation((n, key)).unwrap_or(Amount(0)).0)) .collect::>(); Participants::::set(n, Some(participants.try_into().unwrap())); } } } } pub use pallet::*; ================================================ FILE: substrate/emissions/primitives/Cargo.toml ================================================ [package] name = "serai-emissions-primitives" version = "0.1.0" description = "Serai emissions primitives" license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/substrate/emissions/primitives" authors = ["Akil Demir "] edition = "2021" rust-version = "1.77" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] serai-primitives = { path = "../../primitives", default-features = false } [features] std = ["serai-primitives/std"] default = ["std"] ================================================ FILE: substrate/emissions/primitives/LICENSE ================================================ MIT License Copyright (c) 2024 Luke Parker Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: substrate/emissions/primitives/src/lib.rs ================================================ #![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(not(feature = "std"), no_std)] use serai_primitives::{DAYS, YEARS, SeraiAddress, system_address}; // Protocol owned liquidity account. pub const POL_ACCOUNT: SeraiAddress = system_address(b"Serai-protocol_owned_liquidity"); /// INITIAL_REWARD = 100,000 SRI / BLOCKS_PER_DAY for 60 days pub const INITIAL_REWARD_PER_BLOCK: u64 = (100_000 * 10u64.pow(8)) / DAYS; /// REWARD = 20M SRI / BLOCKS_PER_YEAR pub const REWARD_PER_BLOCK: u64 = (20_000_000 * 10u64.pow(8)) / YEARS; /// 20% of all stake desired to be for Serai network pub const SERAI_VALIDATORS_DESIRED_PERCENTAGE: u64 = 20; /// Desired unused capacity ratio for a network assuming capacity is 10,000. pub const DESIRED_DISTRIBUTION: u64 = 1_000; /// Percentage scale for the validator vs. pool reward distribution. pub const ACCURACY_MULTIPLIER: u64 = 10_000; /// The block to target for economic security pub const SECURE_BY: u64 = YEARS; ================================================ FILE: substrate/genesis-liquidity/pallet/Cargo.toml ================================================ [package] name = "serai-genesis-liquidity-pallet" version = "0.1.0" description = "Genesis liquidity pallet for Serai" license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/substrate/genesis-liquidity/pallet" authors = ["Akil Demir "] edition = "2021" rust-version = "1.77" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [package.metadata.cargo-machete] ignored = ["scale"] [lints] workspace = true [dependencies] scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive"] } frame-system = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } frame-support = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-std = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-core = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-application-crypto = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } dex-pallet = { package = "serai-dex-pallet", path = "../../dex/pallet", default-features = false } coins-pallet = { package = "serai-coins-pallet", path = "../../coins/pallet", default-features = false } validator-sets-pallet = { package = "serai-validator-sets-pallet", path = "../../validator-sets/pallet", default-features = false } economic-security-pallet = { package = "serai-economic-security-pallet", path = "../../economic-security/pallet", default-features = false } serai-primitives = { path = "../../primitives", default-features = false } genesis-liquidity-primitives = { package = "serai-genesis-liquidity-primitives", path = "../primitives", default-features = false } validator-sets-primitives = { package = "serai-validator-sets-primitives", path = "../../validator-sets/primitives", default-features = false } [features] std = [ "scale/std", "frame-system/std", "frame-support/std", "sp-std/std", "sp-core/std", "sp-application-crypto/std", "coins-pallet/std", "dex-pallet/std", "validator-sets-pallet/std", "economic-security-pallet/std", "serai-primitives/std", "genesis-liquidity-primitives/std", "validator-sets-primitives/std", ] try-runtime = [] # TODO fast-epoch = [] default = ["std"] ================================================ FILE: substrate/genesis-liquidity/pallet/LICENSE ================================================ AGPL-3.0-only license Copyright (c) 2024 Luke Parker This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License Version 3 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see . ================================================ FILE: substrate/genesis-liquidity/pallet/src/lib.rs ================================================ #![cfg_attr(not(feature = "std"), no_std)] #[allow( unreachable_patterns, clippy::cast_possible_truncation, clippy::no_effect_underscore_binding, clippy::empty_docs )] #[frame_support::pallet] pub mod pallet { use super::*; use frame_system::{pallet_prelude::*, RawOrigin}; use frame_support::{pallet_prelude::*, sp_runtime::SaturatedConversion}; use sp_std::{vec, vec::Vec}; use sp_core::sr25519::Signature; use sp_application_crypto::RuntimePublic; use dex_pallet::{Pallet as Dex, Config as DexConfig}; use coins_pallet::{Config as CoinsConfig, Pallet as Coins}; use validator_sets_pallet::{Config as VsConfig, Pallet as ValidatorSets}; use economic_security_pallet::{Config as EconomicSecurityConfig, Pallet as EconomicSecurity}; use serai_primitives::*; use validator_sets_primitives::{ValidatorSet, musig_key}; pub use genesis_liquidity_primitives as primitives; use primitives::*; // TODO: Have a more robust way of accessing LiquidityTokens pallet. /// LiquidityTokens Pallet as an instance of coins pallet. pub type LiquidityTokens = coins_pallet::Pallet; #[pallet::config] pub trait Config: frame_system::Config + VsConfig + DexConfig + EconomicSecurityConfig + CoinsConfig + coins_pallet::Config { } #[pallet::error] pub enum Error { GenesisPeriodEnded, AmountOverflowed, NotEnoughLiquidity, CanOnlyRemoveFullAmount, } #[pallet::event] #[pallet::generate_deposit(fn deposit_event)] pub enum Event { GenesisLiquidityAdded { by: SeraiAddress, balance: ExternalBalance }, GenesisLiquidityRemoved { by: SeraiAddress, balance: ExternalBalance }, GenesisLiquidityAddedToPool { coin: ExternalBalance, sri: Amount }, } #[pallet::pallet] pub struct Pallet(PhantomData); /// Keeps shares and the amount of coins per account. #[pallet::storage] pub(crate) type Liquidity = StorageDoubleMap< _, Identity, ExternalCoin, Blake2_128Concat, PublicKey, LiquidityAmount, OptionQuery, >; /// Keeps the total shares and the total amount of coins per coin. #[pallet::storage] pub(crate) type Supply = StorageMap<_, Identity, ExternalCoin, LiquidityAmount, OptionQuery>; #[pallet::storage] pub(crate) type Oracle = StorageMap<_, Identity, ExternalCoin, u64, OptionQuery>; #[pallet::storage] #[pallet::getter(fn genesis_complete_block)] pub(crate) type GenesisCompleteBlock = StorageValue<_, u64, OptionQuery>; #[pallet::hooks] impl Hooks> for Pallet { fn on_initialize(n: BlockNumberFor) -> Weight { #[cfg(feature = "fast-epoch")] let final_block = 10u64; #[cfg(not(feature = "fast-epoch"))] let final_block = MONTHS; // Distribute the genesis sri to pools after a month if (n.saturated_into::() >= final_block) && Self::oraclization_is_done() && GenesisCompleteBlock::::get().is_none() { // mint the SRI Coins::::mint( GENESIS_LIQUIDITY_ACCOUNT.into(), Balance { coin: Coin::Serai, amount: Amount(GENESIS_SRI) }, ) .unwrap(); // get pool & total values let mut pool_values = vec![]; let mut total_value: u128 = 0; for coin in EXTERNAL_COINS { // initial coin value in terms of btc let Some(value) = Oracle::::get(coin) else { continue; }; let pool_amount = u128::from(Supply::::get(coin).unwrap_or(LiquidityAmount::zero()).coins); let pool_value = pool_amount .checked_mul(value.into()) .unwrap() .checked_div(10u128.pow(coin.decimals())) .unwrap(); total_value = total_value.checked_add(pool_value).unwrap(); pool_values.push((coin, pool_amount, pool_value)); } // add the liquidity per pool let mut total_sri_distributed = 0; let pool_values_len = pool_values.len(); for (i, (coin, pool_amount, pool_value)) in pool_values.into_iter().enumerate() { // whatever sri left for the last coin should be ~= it's ratio let sri_amount = if i == (pool_values_len - 1) { GENESIS_SRI.checked_sub(total_sri_distributed).unwrap() } else { u64::try_from( u128::from(GENESIS_SRI) .checked_mul(pool_value) .unwrap() .checked_div(total_value) .unwrap(), ) .unwrap() }; total_sri_distributed = total_sri_distributed.checked_add(sri_amount).unwrap(); // actually add the liquidity to dex let origin = RawOrigin::Signed(GENESIS_LIQUIDITY_ACCOUNT.into()); let Ok(()) = Dex::::add_liquidity( origin.into(), coin, u64::try_from(pool_amount).unwrap(), sri_amount, u64::try_from(pool_amount).unwrap(), sri_amount, GENESIS_LIQUIDITY_ACCOUNT.into(), ) else { continue; }; // let everyone know about the event Self::deposit_event(Event::GenesisLiquidityAddedToPool { coin: ExternalBalance { coin, amount: Amount(u64::try_from(pool_amount).unwrap()) }, sri: Amount(sri_amount), }); } assert_eq!(total_sri_distributed, GENESIS_SRI); // we shouldn't have left any coin in genesis account at this moment, including SRI. // All transferred to the pools. for coin in COINS { assert_eq!(Coins::::balance(GENESIS_LIQUIDITY_ACCOUNT.into(), coin), Amount(0)); } GenesisCompleteBlock::::set(Some(n.saturated_into::())); } Weight::zero() // TODO } } impl Pallet { /// Add genesis liquidity for the given account. All accounts that provide liquidity /// will receive the genesis SRI according to their liquidity ratio. pub fn add_coin_liquidity(account: PublicKey, balance: ExternalBalance) -> DispatchResult { // check we are still in genesis period if Self::genesis_ended() { Err(Error::::GenesisPeriodEnded)?; } // calculate new shares & supply let (new_liquidity, new_supply) = if let Some(supply) = Supply::::get(balance.coin) { // calculate amount of shares for this amount let shares = Self::mul_div(supply.shares, balance.amount.0, supply.coins)?; // get new shares for this account let existing = Liquidity::::get(balance.coin, account).unwrap_or(LiquidityAmount::zero()); ( LiquidityAmount { shares: existing.shares.checked_add(shares).ok_or(Error::::AmountOverflowed)?, coins: existing .coins .checked_add(balance.amount.0) .ok_or(Error::::AmountOverflowed)?, }, LiquidityAmount { shares: supply.shares.checked_add(shares).ok_or(Error::::AmountOverflowed)?, coins: supply .coins .checked_add(balance.amount.0) .ok_or(Error::::AmountOverflowed)?, }, ) } else { let first_amount = LiquidityAmount { shares: INITIAL_GENESIS_LP_SHARES, coins: balance.amount.0 }; (first_amount, first_amount) }; // save Liquidity::::set(balance.coin, account, Some(new_liquidity)); Supply::::set(balance.coin, Some(new_supply)); Self::deposit_event(Event::GenesisLiquidityAdded { by: account.into(), balance }); Ok(()) } /// Returns the number of blocks since the all networks reached economic security first time. /// If networks is yet to be reached that threshold, None is returned. fn blocks_since_ec_security() -> Option { let mut min = u64::MAX; for n in EXTERNAL_NETWORKS { let ec_security_block = EconomicSecurity::::economic_security_block(n)?.saturated_into::(); let current = >::block_number().saturated_into::(); let diff = current.saturating_sub(ec_security_block); min = diff.min(min); } Some(min) } fn genesis_ended() -> bool { Self::oraclization_is_done() && >::block_number().saturated_into::() >= MONTHS } fn oraclization_is_done() -> bool { for c in EXTERNAL_COINS { if Oracle::::get(c).is_none() { return false; } } true } fn mul_div(a: u64, b: u64, c: u64) -> Result> { let a = u128::from(a); let b = u128::from(b); let c = u128::from(c); let result = a .checked_mul(b) .ok_or(Error::::AmountOverflowed)? .checked_div(c) .ok_or(Error::::AmountOverflowed)?; result.try_into().map_err(|_| Error::::AmountOverflowed) } } #[pallet::call] impl Pallet { /// Remove the provided genesis liquidity for an account. #[pallet::call_index(0)] #[pallet::weight((0, DispatchClass::Operational))] // TODO pub fn remove_coin_liquidity(origin: OriginFor, balance: ExternalBalance) -> DispatchResult { let account = ensure_signed(origin)?; let origin = RawOrigin::Signed(GENESIS_LIQUIDITY_ACCOUNT.into()); let supply = Supply::::get(balance.coin).ok_or(Error::::NotEnoughLiquidity)?; // check we are still in genesis period let (new_liquidity, new_supply) = if Self::genesis_ended() { // see how much liq tokens we have let total_liq_tokens = LiquidityTokens::::balance(GENESIS_LIQUIDITY_ACCOUNT.into(), Coin::Serai).0; // get how much user wants to remove let LiquidityAmount { shares, coins } = Liquidity::::get(balance.coin, account).unwrap_or(LiquidityAmount::zero()); let total_shares = Supply::::get(balance.coin).unwrap_or(LiquidityAmount::zero()).shares; let user_liq_tokens = Self::mul_div(total_liq_tokens, shares, total_shares)?; let amount_to_remove = Self::mul_div(user_liq_tokens, balance.amount.0, INITIAL_GENESIS_LP_SHARES)?; // remove liquidity from pool let prev_sri = Coins::::balance(GENESIS_LIQUIDITY_ACCOUNT.into(), Coin::Serai); let prev_coin = Coins::::balance(GENESIS_LIQUIDITY_ACCOUNT.into(), balance.coin.into()); Dex::::remove_liquidity( origin.clone().into(), balance.coin, amount_to_remove, 1, 1, GENESIS_LIQUIDITY_ACCOUNT.into(), )?; let current_sri = Coins::::balance(GENESIS_LIQUIDITY_ACCOUNT.into(), Coin::Serai); let current_coin = Coins::::balance(GENESIS_LIQUIDITY_ACCOUNT.into(), balance.coin.into()); // burn the SRI if necessary // TODO: take into consideration movement between pools. let mut sri: u64 = current_sri.0.saturating_sub(prev_sri.0); let distance_to_full_pay = GENESIS_SRI_TRICKLE_FEED.saturating_sub(Self::blocks_since_ec_security().unwrap_or(0)); let burn_sri_amount = u64::try_from( u128::from(sri) .checked_mul(u128::from(distance_to_full_pay)) .ok_or(Error::::AmountOverflowed)? .checked_div(u128::from(GENESIS_SRI_TRICKLE_FEED)) .ok_or(Error::::AmountOverflowed)?, ) .map_err(|_| Error::::AmountOverflowed)?; Coins::::burn( origin.clone().into(), Balance { coin: Coin::Serai, amount: Amount(burn_sri_amount) }, )?; sri = sri.checked_sub(burn_sri_amount).ok_or(Error::::AmountOverflowed)?; // transfer to owner let coin_out = current_coin.0.saturating_sub(prev_coin.0); Coins::::transfer( origin.clone().into(), account, Balance { coin: balance.coin.into(), amount: Amount(coin_out) }, )?; Coins::::transfer( origin.into(), account, Balance { coin: Coin::Serai, amount: Amount(sri) }, )?; // return new amounts ( LiquidityAmount { shares: shares.checked_sub(amount_to_remove).ok_or(Error::::AmountOverflowed)?, coins: coins.checked_sub(coin_out).ok_or(Error::::AmountOverflowed)?, }, LiquidityAmount { shares: supply .shares .checked_sub(amount_to_remove) .ok_or(Error::::AmountOverflowed)?, coins: supply.coins.checked_sub(coin_out).ok_or(Error::::AmountOverflowed)?, }, ) } else { if balance.amount.0 != INITIAL_GENESIS_LP_SHARES { Err(Error::::CanOnlyRemoveFullAmount)?; } let existing = Liquidity::::get(balance.coin, account).ok_or(Error::::NotEnoughLiquidity)?; // transfer to the user Coins::::transfer( origin.into(), account, Balance { coin: balance.coin.into(), amount: Amount(existing.coins) }, )?; ( LiquidityAmount::zero(), LiquidityAmount { shares: supply .shares .checked_sub(existing.shares) .ok_or(Error::::AmountOverflowed)?, coins: supply.coins.checked_sub(existing.coins).ok_or(Error::::AmountOverflowed)?, }, ) }; // save if new_liquidity == LiquidityAmount::zero() { Liquidity::::set(balance.coin, account, None); } else { Liquidity::::set(balance.coin, account, Some(new_liquidity)); } Supply::::set(balance.coin, Some(new_supply)); Self::deposit_event(Event::GenesisLiquidityRemoved { by: account.into(), balance }); Ok(()) } /// A call to submit the initial coin values in terms of BTC. #[pallet::call_index(1)] #[pallet::weight((0, DispatchClass::Operational))] // TODO pub fn oraclize_values( origin: OriginFor, values: Values, _signature: Signature, ) -> DispatchResult { ensure_none(origin)?; // set their relative values Oracle::::set(ExternalCoin::Bitcoin, Some(10u64.pow(ExternalCoin::Bitcoin.decimals()))); Oracle::::set(ExternalCoin::Monero, Some(values.monero)); Oracle::::set(ExternalCoin::Ether, Some(values.ether)); Oracle::::set(ExternalCoin::Dai, Some(values.dai)); Ok(()) } } #[pallet::validate_unsigned] impl ValidateUnsigned for Pallet { type Call = Call; fn validate_unsigned(_: TransactionSource, call: &Self::Call) -> TransactionValidity { match call { Call::oraclize_values { ref values, ref signature } => { let network = NetworkId::Serai; let Some(session) = ValidatorSets::::session(network) else { return Err(TransactionValidityError::from(InvalidTransaction::Custom(0))); }; let set = ValidatorSet { network, session }; let signers = ValidatorSets::::participants_for_latest_decided_set(network) .expect("no participant in the current set") .into_iter() .map(|(p, _)| p) .collect::>(); // check this didn't get called before if Self::oraclization_is_done() { Err(InvalidTransaction::Custom(1))?; } // make sure signers settings the value at the end of the genesis period. // we don't need this check for tests. #[cfg(not(feature = "fast-epoch"))] if >::block_number().saturated_into::() < MONTHS { Err(InvalidTransaction::Custom(2))?; } if !musig_key(set, &signers).verify(&oraclize_values_message(&set, values), signature) { Err(InvalidTransaction::BadProof)?; } ValidTransaction::with_tag_prefix("GenesisLiquidity") .and_provides((0, set)) .longevity(u64::MAX) .propagate(true) .build() } Call::remove_coin_liquidity { .. } => Err(InvalidTransaction::Call)?, Call::__Ignore(_, _) => unreachable!(), } } } } pub use pallet::*; ================================================ FILE: substrate/genesis-liquidity/primitives/Cargo.toml ================================================ [package] name = "serai-genesis-liquidity-primitives" version = "0.1.0" description = "Serai genesis liquidity primitives" license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/substrate/genesis-liquidity/primitives" authors = ["Akil Demir "] edition = "2021" rust-version = "1.77" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] zeroize = { version = "^1.5", features = ["derive"], optional = true } borsh = { version = "1", default-features = false, features = ["derive", "de_strict_order"], optional = true } serde = { version = "1", default-features = false, features = ["derive", "alloc"], optional = true } scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive"] } sp-std = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } serai-primitives = { path = "../../primitives", default-features = false } validator-sets-primitives = { package = "serai-validator-sets-primitives", path = "../../validator-sets/primitives", default-features = false } [features] std = [ "zeroize", "scale/std", "borsh?/std", "serde?/std", "serai-primitives/std", "validator-sets-primitives/std", "sp-std/std" ] default = ["std"] ================================================ FILE: substrate/genesis-liquidity/primitives/LICENSE ================================================ MIT License Copyright (c) 2024 Luke Parker Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: substrate/genesis-liquidity/primitives/src/lib.rs ================================================ #![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(not(feature = "std"), no_std)] #[cfg(feature = "std")] use zeroize::Zeroize; #[cfg(feature = "borsh")] use borsh::{BorshSerialize, BorshDeserialize}; #[cfg(feature = "serde")] use serde::{Serialize, Deserialize}; use sp_std::vec::Vec; use scale::{Encode, Decode, DecodeWithMemTracking, MaxEncodedLen}; use serai_primitives::*; use validator_sets_primitives::ValidatorSet; pub const INITIAL_GENESIS_LP_SHARES: u64 = 10_000; // This is the account to hold and manage the genesis liquidity. pub const GENESIS_LIQUIDITY_ACCOUNT: SeraiAddress = system_address(b"GenesisLiquidity-account"); #[derive( Clone, Copy, PartialEq, Eq, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen, )] #[cfg_attr(feature = "std", derive(Zeroize))] #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct Values { pub monero: u64, pub ether: u64, pub dai: u64, } #[derive( Clone, Copy, PartialEq, Eq, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen, )] #[cfg_attr(feature = "std", derive(Zeroize))] #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct LiquidityAmount { pub shares: u64, pub coins: u64, } impl LiquidityAmount { pub fn zero() -> Self { LiquidityAmount { shares: 0, coins: 0 } } } /// The message for the oraclize_values signature. pub fn oraclize_values_message(set: &ValidatorSet, values: &Values) -> Vec { (b"GenesisLiquidity-oraclize_values", set, values).encode() } ================================================ FILE: substrate/in-instructions/pallet/Cargo.toml ================================================ [package] name = "serai-in-instructions-pallet" version = "0.1.0" description = "Execute calls via In Instructions from unsigned transactions" license = "AGPL-3.0-only" authors = ["Luke Parker "] edition = "2021" publish = false rust-version = "1.74" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [package.metadata.cargo-machete] ignored = ["scale"] [lints] workspace = true [dependencies] scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive", "max-encoded-len"] } sp-std = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-application-crypto = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-io = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-runtime = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-core = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } frame-system = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } frame-support = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } serai-primitives = { path = "../../primitives", default-features = false } in-instructions-primitives = { package = "serai-in-instructions-primitives", path = "../primitives", default-features = false } coins-pallet = { package = "serai-coins-pallet", path = "../../coins/pallet", default-features = false } dex-pallet = { package = "serai-dex-pallet", path = "../../dex/pallet", default-features = false } validator-sets-pallet = { package = "serai-validator-sets-pallet", path = "../../validator-sets/pallet", default-features = false } genesis-liquidity-pallet = { package = "serai-genesis-liquidity-pallet", path = "../../genesis-liquidity/pallet", default-features = false } emissions-pallet = { package = "serai-emissions-pallet", path = "../../emissions/pallet", default-features = false } [features] std = [ "scale/std", "sp-std/std", "sp-application-crypto/std", "sp-io/std", "sp-runtime/std", "sp-core/std", "frame-system/std", "frame-support/std", "serai-primitives/std", "in-instructions-primitives/std", "coins-pallet/std", "dex-pallet/std", "validator-sets-pallet/std", "genesis-liquidity-pallet/std", "emissions-pallet/std", ] default = ["std"] # TODO try-runtime = [] ================================================ FILE: substrate/in-instructions/pallet/LICENSE ================================================ AGPL-3.0-only license Copyright (c) 2022-2023 Luke Parker This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License Version 3 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see . ================================================ FILE: substrate/in-instructions/pallet/src/lib.rs ================================================ #![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(not(feature = "std"), no_std)] use sp_io::hashing::blake2_256; use serai_primitives::*; pub use in_instructions_primitives as primitives; use primitives::*; // TODO: Investigate why Substrate generates these #[allow( unreachable_patterns, clippy::cast_possible_truncation, clippy::no_effect_underscore_binding, clippy::empty_docs )] #[frame_support::pallet] pub mod pallet { use sp_std::vec; use sp_application_crypto::RuntimePublic; use sp_runtime::traits::Zero; use sp_core::sr25519::Public; use frame_support::pallet_prelude::*; use frame_system::{pallet_prelude::*, RawOrigin}; use coins_pallet::{ Config as CoinsConfig, Pallet as Coins, primitives::{OutInstruction, OutInstructionWithBalance}, }; use dex_pallet::{Config as DexConfig, Pallet as Dex}; use validator_sets_pallet::{ primitives::{Session, ValidatorSet, ExternalValidatorSet}, Config as ValidatorSetsConfig, Pallet as ValidatorSets, }; use genesis_liquidity_pallet::{ Pallet as GenesisLiq, Config as GenesisLiqConfig, primitives::GENESIS_LIQUIDITY_ACCOUNT, }; use emissions_pallet::{Pallet as Emissions, Config as EmissionsConfig, primitives::POL_ACCOUNT}; use super::*; #[pallet::config] pub trait Config: frame_system::Config + CoinsConfig + DexConfig + ValidatorSetsConfig + GenesisLiqConfig + EmissionsConfig { } #[pallet::event] #[pallet::generate_deposit(fn deposit_event)] pub enum Event { Batch { network: ExternalNetworkId, id: u32, block: BlockHash, instructions_hash: [u8; 32] }, InstructionFailure { network: ExternalNetworkId, id: u32, index: u32 }, Halt { network: ExternalNetworkId }, } #[pallet::error] pub enum Error { /// Coin and OutAddress types don't match. InvalidAddressForCoin, } #[pallet::pallet] pub struct Pallet(PhantomData); // The ID of the last executed Batch for a network. #[pallet::storage] #[pallet::getter(fn batches)] pub(crate) type LastBatch = StorageMap<_, Identity, ExternalNetworkId, u32, OptionQuery>; // The last Serai block in which this validator set included a batch #[pallet::storage] #[pallet::getter(fn last_batch_block)] pub(crate) type LastBatchBlock = StorageMap<_, Identity, ExternalNetworkId, BlockNumberFor, OptionQuery>; // Halted networks. #[pallet::storage] pub(crate) type Halted = StorageMap<_, Identity, ExternalNetworkId, (), OptionQuery>; // The latest block a network has acknowledged as finalized #[pallet::storage] #[pallet::getter(fn latest_network_block)] pub(crate) type LatestNetworkBlock = StorageMap<_, Identity, ExternalNetworkId, BlockHash, OptionQuery>; impl Pallet { // Use a dedicated transaction layer when executing this InInstruction // This lets it individually error without causing any storage modifications #[frame_support::transactional] fn execute(instruction: InInstructionWithBalance) -> Result<(), DispatchError> { match instruction.instruction { InInstruction::Transfer(address) => { Coins::::mint(address.into(), instruction.balance.into())?; } InInstruction::Dex(call) => { // This will only be initiated by external chain transactions. That is why we only need // add liquidity and swaps. Other functionalities (such as remove_liq, etc) will be // called directly from Serai with a native transaction. match call { DexCall::SwapAndAddLiquidity(address) => { let origin = RawOrigin::Signed(IN_INSTRUCTION_EXECUTOR.into()); let coin = instruction.balance.coin; // mint the given coin on the account Coins::::mint(IN_INSTRUCTION_EXECUTOR.into(), instruction.balance.into())?; // swap half of it for SRI let half = instruction.balance.amount.0 / 2; let path = BoundedVec::try_from(vec![coin.into(), Coin::Serai]).unwrap(); Dex::::swap_exact_tokens_for_tokens( origin.clone().into(), path, half, 1, // minimum out, so we accept whatever we get. IN_INSTRUCTION_EXECUTOR.into(), )?; // get how much we got for our swap let sri_amount = Coins::::balance(IN_INSTRUCTION_EXECUTOR.into(), Coin::Serai).0; // add liquidity Dex::::add_liquidity( origin.clone().into(), coin, half, sri_amount, 1, 1, address.into(), )?; // TODO: minimums are set to 1 above to guarantee successful adding liq call. // Ideally we either get this info from user or send the leftovers back to user. // Let's send the leftovers back to user for now. let coin_balance = Coins::::balance(IN_INSTRUCTION_EXECUTOR.into(), coin.into()); let sri_balance = Coins::::balance(IN_INSTRUCTION_EXECUTOR.into(), Coin::Serai); if coin_balance != Amount(0) { Coins::::transfer_internal( IN_INSTRUCTION_EXECUTOR.into(), address.into(), Balance { coin: coin.into(), amount: coin_balance }, )?; } if sri_balance != Amount(0) { Coins::::transfer_internal( IN_INSTRUCTION_EXECUTOR.into(), address.into(), Balance { coin: Coin::Serai, amount: sri_balance }, )?; } } DexCall::Swap(out_balance, out_address) => { let send_to_external = !out_address.is_native(); let native_coin = out_balance.coin.is_native(); // we can't send native coin to external chain if native_coin && send_to_external { Err(Error::::InvalidAddressForCoin)?; } // mint the given coin on our account Coins::::mint(IN_INSTRUCTION_EXECUTOR.into(), instruction.balance.into())?; // get the path let mut path = vec![instruction.balance.coin.into(), Coin::Serai]; if !native_coin { path.push(out_balance.coin); } // get the swap address // if the address is internal, we can directly swap to it. if not, we swap to // ourselves and burn the coins to send them back on the external chain. let send_to = if send_to_external { IN_INSTRUCTION_EXECUTOR } else { out_address.clone().as_native().unwrap() }; // do the swap let origin = RawOrigin::Signed(IN_INSTRUCTION_EXECUTOR.into()); Dex::::swap_exact_tokens_for_tokens( origin.clone().into(), BoundedVec::try_from(path).unwrap(), instruction.balance.amount.0, out_balance.amount.0, send_to.into(), )?; // burn the received coins so that they sent back to the user // if it is requested to an external address. if send_to_external { // see how much we got let coin_balance = Coins::::balance(IN_INSTRUCTION_EXECUTOR.into(), out_balance.coin); let instruction = OutInstructionWithBalance { instruction: OutInstruction { address: out_address.as_external().unwrap(), // TODO: Properly pass data. Replace address with an OutInstruction entirely? data: None, }, balance: ExternalBalance { coin: out_balance.coin.try_into().unwrap(), amount: coin_balance, }, }; Coins::::burn_with_instruction(origin.into(), instruction)?; } } } } InInstruction::GenesisLiquidity(address) => { Coins::::mint(GENESIS_LIQUIDITY_ACCOUNT.into(), instruction.balance.into())?; GenesisLiq::::add_coin_liquidity(address.into(), instruction.balance)?; } InInstruction::SwapToStakedSRI(address, network) => { Coins::::mint(POL_ACCOUNT.into(), instruction.balance.into())?; Emissions::::swap_to_staked_sri(address.into(), network, instruction.balance)?; } } Ok(()) } pub fn halt(network: ExternalNetworkId) -> Result<(), DispatchError> { Halted::::set(network, Some(())); Self::deposit_event(Event::Halt { network }); Ok(()) } } fn keys_for_network( network: ExternalNetworkId, ) -> Result<(Session, Option, Option), InvalidTransaction> { // If there's no session set, and therefore no keys set, then this must be an invalid signature let Some(session) = ValidatorSets::::session(NetworkId::from(network)) else { Err(InvalidTransaction::BadProof)? }; let mut set = ExternalValidatorSet { network, session }; let latest = ValidatorSets::::keys(set).map(|keys| keys.0); let prior = if set.session.0 != 0 { set.session.0 -= 1; ValidatorSets::::keys(set).map(|keys| keys.0) } else { None }; if prior.is_none() && latest.is_none() { Err(InvalidTransaction::BadProof)?; } Ok((session, prior, latest)) } #[pallet::call] impl Pallet { #[pallet::call_index(0)] #[pallet::weight((0, DispatchClass::Operational))] // TODO pub fn execute_batch(origin: OriginFor, batch: SignedBatch) -> DispatchResult { ensure_none(origin)?; let batch = batch.batch; LatestNetworkBlock::::insert(batch.network, batch.block); Self::deposit_event(Event::Batch { network: batch.network, id: batch.id, block: batch.block, instructions_hash: blake2_256(&batch.instructions.encode()), }); for (i, instruction) in batch.instructions.into_iter().enumerate() { if Self::execute(instruction).is_err() { Self::deposit_event(Event::InstructionFailure { network: batch.network, id: batch.id, index: u32::try_from(i).unwrap(), }); } } Ok(()) } } #[pallet::validate_unsigned] impl ValidateUnsigned for Pallet { type Call = Call; fn validate_unsigned(_: TransactionSource, call: &Self::Call) -> TransactionValidity { // Match to be exhaustive let batch = match call { Call::execute_batch { ref batch } => batch, Call::__Ignore(_, _) => unreachable!(), }; // verify the batch size // TODO: Merge this encode with the one done by batch_message if batch.batch.encode().len() > MAX_BATCH_SIZE { Err(InvalidTransaction::ExhaustsResources)?; } let network = batch.batch.network; // verify the signature let (current_session, prior, current) = keys_for_network::(network)?; let batch_message = batch_message(&batch.batch); // Check the prior key first since only a single `Batch` (the last one) will be when prior is // Some yet prior wasn't the signing key let valid_by_prior = if let Some(key) = prior { key.verify(&batch_message, &batch.signature) } else { false }; let valid = valid_by_prior || (if let Some(key) = current { key.verify(&batch_message, &batch.signature) } else { false }); if !valid { Err(InvalidTransaction::BadProof)?; } if Halted::::contains_key(network) { Err(InvalidTransaction::Custom(1))?; } // If it wasn't valid by the prior key, meaning it was valid by the current key, the current // key is publishing `Batch`s. This should only happen once the current key has verified all // `Batch`s published by the prior key, meaning they are accepting the hand-over. if prior.is_some() && (!valid_by_prior) { ValidatorSets::::retire_set(ValidatorSet { network: network.into(), session: Session(current_session.0 - 1), }); } // check that this validator set isn't publishing a batch more than once per block let current_block = >::block_number(); let last_block = LastBatchBlock::::get(network).unwrap_or(Zero::zero()); if last_block >= current_block { Err(InvalidTransaction::Future)?; } LastBatchBlock::::insert(batch.batch.network, frame_system::Pallet::::block_number()); // Verify the batch is sequential // LastBatch has the last ID set. The next ID should be it + 1 // If there's no ID, the next ID should be 0 let expected = LastBatch::::get(network).map_or(0, |prev| prev + 1); if batch.batch.id < expected { Err(InvalidTransaction::Stale)?; } if batch.batch.id > expected { Err(InvalidTransaction::Future)?; } LastBatch::::insert(batch.batch.network, batch.batch.id); // Verify all Balances in this Batch are for this network for instruction in &batch.batch.instructions { // Verify this coin is for this network // If this is ever hit, it means the validator set has turned malicious and should be fully // slashed // Because we have an error here, no validator set which turns malicious should execute // this code path // Accordingly, there's no value in writing code to fully slash the network, when such an // even would require a runtime upgrade to fully resolve anyways if instruction.balance.coin.network() != batch.batch.network { Err(InvalidTransaction::Custom(2))?; } } ValidTransaction::with_tag_prefix("in-instructions") .and_provides((batch.batch.network, batch.batch.id)) // Set a 10 block longevity, though this should be included in the next block .longevity(10) .propagate(true) .build() } // Explicitly provide a pre-dispatch which calls validate_unsigned fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> { Self::validate_unsigned(TransactionSource::InBlock, call).map(|_| ()).map_err(Into::into) } } } pub use pallet::*; ================================================ FILE: substrate/in-instructions/primitives/Cargo.toml ================================================ [package] name = "serai-in-instructions-primitives" version = "0.1.0" description = "Serai instructions library, enabling encoding and decoding" license = "MIT" authors = ["Luke Parker "] edition = "2021" rust-version = "1.74" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] zeroize = { version = "^1.5", features = ["derive"], optional = true } borsh = { version = "1", default-features = false, features = ["derive", "de_strict_order"], optional = true } serde = { version = "1", default-features = false, features = ["derive", "alloc"], optional = true } scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive"] } sp-std = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-application-crypto = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } serai-primitives = { path = "../../primitives", default-features = false } coins-primitives = { package = "serai-coins-primitives", path = "../../coins/primitives", default-features = false } [features] std = [ "zeroize", "borsh?/std", "serde?/std", "scale/std", "sp-std/std", "sp-application-crypto/std", "serai-primitives/std", "coins-primitives/std", ] borsh = ["dep:borsh", "serai-primitives/borsh", "coins-primitives/borsh"] serde = ["dep:serde", "serai-primitives/serde", "coins-primitives/serde"] default = ["std"] ================================================ FILE: substrate/in-instructions/primitives/LICENSE ================================================ MIT License Copyright (c) 2022-2023 Luke Parker Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: substrate/in-instructions/primitives/src/lib.rs ================================================ #![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(not(feature = "std"), no_std)] #![expect(clippy::cast_possible_truncation)] #[cfg(feature = "std")] use zeroize::Zeroize; #[cfg(feature = "borsh")] use borsh::{BorshSerialize, BorshDeserialize}; #[cfg(feature = "serde")] use serde::{Serialize, Deserialize}; use scale::{Encode, Decode, DecodeWithMemTracking, MaxEncodedLen}; use sp_application_crypto::sr25519::Signature; #[cfg(not(feature = "std"))] use sp_std::vec::Vec; #[rustfmt::skip] use serai_primitives::{BlockHash, Balance, ExternalNetworkId, NetworkId, SeraiAddress, ExternalBalance, ExternalAddress, system_address}; mod shorthand; pub use shorthand::*; pub const MAX_BATCH_SIZE: usize = 25_000; // ~25kb // This is the account which will be the origin for any dispatched `InInstruction`s. pub const IN_INSTRUCTION_EXECUTOR: SeraiAddress = system_address(b"InInstructions-executor"); #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen)] #[cfg_attr(feature = "std", derive(Zeroize))] #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub enum OutAddress { Serai(SeraiAddress), External(ExternalAddress), } impl OutAddress { pub fn is_native(&self) -> bool { matches!(self, Self::Serai(_)) } pub fn as_native(self) -> Option { match self { Self::Serai(addr) => Some(addr), _ => None, } } pub fn as_external(self) -> Option { match self { Self::External(addr) => Some(addr), Self::Serai(_) => None, } } } #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen)] #[cfg_attr(feature = "std", derive(Zeroize))] #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub enum DexCall { // address to send the lp tokens to // TODO: Update this per documentation/Shorthand SwapAndAddLiquidity(SeraiAddress), // minimum out balance and out address Swap(Balance, OutAddress), } #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen)] #[cfg_attr(feature = "std", derive(Zeroize))] #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub enum InInstruction { Transfer(SeraiAddress), Dex(DexCall), GenesisLiquidity(SeraiAddress), SwapToStakedSRI(SeraiAddress, NetworkId), } #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen)] #[cfg_attr(feature = "std", derive(Zeroize))] #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct RefundableInInstruction { pub origin: Option, pub instruction: InInstruction, } #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen)] #[cfg_attr(feature = "std", derive(Zeroize))] #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct InInstructionWithBalance { pub instruction: InInstruction, pub balance: ExternalBalance, } #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, DecodeWithMemTracking)] #[cfg_attr(feature = "std", derive(Zeroize))] #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct Batch { pub network: ExternalNetworkId, pub id: u32, pub block: BlockHash, pub instructions: Vec, } #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, DecodeWithMemTracking)] #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct SignedBatch { pub batch: Batch, #[cfg_attr( feature = "borsh", borsh( serialize_with = "serai_primitives::borsh_serialize_signature", deserialize_with = "serai_primitives::borsh_deserialize_signature" ) )] pub signature: Signature, } #[cfg(feature = "std")] impl Zeroize for SignedBatch { fn zeroize(&mut self) { self.batch.zeroize(); let signature: &mut [u8] = self.signature.as_mut(); signature.zeroize(); } } // TODO: Make this an associated method? /// The message for the batch signature. pub fn batch_message(batch: &Batch) -> Vec { [b"InInstructions-batch".as_ref(), &batch.encode()].concat() } ================================================ FILE: substrate/in-instructions/primitives/src/shorthand.rs ================================================ #[cfg(feature = "std")] use zeroize::Zeroize; #[cfg(feature = "borsh")] use borsh::{BorshSerialize, BorshDeserialize}; #[cfg(feature = "serde")] use serde::{Serialize, Deserialize}; use scale::{Encode, Decode, DecodeWithMemTracking, MaxEncodedLen}; use serai_primitives::{Amount, ExternalAddress, ExternalCoin, SeraiAddress}; use coins_primitives::OutInstruction; use crate::RefundableInInstruction; #[cfg(feature = "std")] use crate::InInstruction; #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen)] #[cfg_attr(feature = "std", derive(Zeroize))] #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub enum Shorthand { Raw(RefundableInInstruction), Swap { origin: Option, coin: ExternalCoin, minimum: Amount, out: OutInstruction, }, SwapAndAddLiquidity { origin: Option, minimum: Amount, gas: Amount, address: SeraiAddress, }, } impl Shorthand { #[cfg(feature = "std")] pub fn transfer(origin: Option, address: SeraiAddress) -> Self { Self::Raw(RefundableInInstruction { origin, instruction: InInstruction::Transfer(address) }) } } impl TryFrom for RefundableInInstruction { type Error = &'static str; fn try_from(shorthand: Shorthand) -> Result { Ok(match shorthand { Shorthand::Raw(instruction) => instruction, Shorthand::Swap { .. } => todo!(), Shorthand::SwapAndAddLiquidity { .. } => todo!(), }) } } ================================================ FILE: substrate/node/Cargo.toml ================================================ [package] name = "serai-node" version = "0.1.0" description = "Serai network node, built over Substrate" license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/substrate/node" authors = ["Luke Parker "] edition = "2021" publish = false rust-version = "1.74" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [[bin]] name = "serai-node" [dependencies] rand_core = "0.6" zeroize = "1" hex = "0.4" log = "0.4" schnorrkel = "0.11" libp2p = "0.56" sp-core = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b" } sp-keystore = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b" } sp-timestamp = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b" } sp-state-machine = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b" } sp-io = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b" } sp-blockchain = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b" } sp-runtime = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b" } sp-api = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b" } sp-block-builder = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b" } sp-consensus-babe = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b" } frame-benchmarking = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b" } serai-runtime = { path = "../runtime", features = ["std"] } clap = { version = "4", features = ["derive"] } futures-util = "0.3" tokio = { version = "1", features = ["sync", "rt-multi-thread"] } jsonrpsee = { version = "0.24", features = ["server"] } sc-transaction-pool = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b" } sc-transaction-pool-api = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b" } sc-basic-authorship = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b" } sc-executor = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b" } sc-service = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b" } sc-client-api = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b" } sc-network = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b" } sc-consensus = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b" } sc-consensus-babe = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b" } sc-consensus-grandpa = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b" } sc-authority-discovery = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b" } sc-telemetry = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b" } sc-chain-spec = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b" } sc-cli = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b" } frame-system-rpc-runtime-api = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b" } substrate-frame-rpc-system = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b" } pallet-transaction-payment-rpc = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b" } serai-env = { path = "../../common/env" } [build-dependencies] substrate-build-script-utils = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b" } [features] default = [] fast-epoch = ["serai-runtime/fast-epoch"] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "serai-runtime/runtime-benchmarks", ] ================================================ FILE: substrate/node/LICENSE ================================================ AGPL-3.0-only license Copyright (c) 2022-2023 Luke Parker This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License Version 3 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see . ================================================ FILE: substrate/node/build.rs ================================================ use substrate_build_script_utils::generate_cargo_keys; fn main() { generate_cargo_keys(); } ================================================ FILE: substrate/node/src/chain_spec.rs ================================================ use core::marker::PhantomData; use std::collections::HashSet; use sp_core::{Decode, Pair as PairTrait, sr25519::Public}; use sc_service::ChainType; use serai_runtime::{ primitives::*, WASM_BINARY, BABE_GENESIS_EPOCH_CONFIG, RuntimeGenesisConfig, SystemConfig, CoinsConfig, ValidatorSetsConfig, SignalsConfig, BabeConfig, GrandpaConfig, EmissionsConfig, }; pub type ChainSpec = sc_service::GenericChainSpec; fn account_from_name(name: &'static str) -> PublicKey { insecure_pair_from_name(name).public() } fn wasm_binary() -> Vec { // TODO: Accept a config of runtime path const WASM_PATH: &str = "/runtime/serai.wasm"; if let Ok(binary) = std::fs::read(WASM_PATH) { log::info!("using {WASM_PATH}"); return binary; } log::info!("using built-in wasm"); WASM_BINARY.ok_or("compiled in wasm not available").unwrap().to_vec() } fn devnet_genesis( validators: &[&'static str], endowed_accounts: Vec, ) -> RuntimeGenesisConfig { let validators = validators.iter().map(|name| account_from_name(name)).collect::>(); let key_shares = NETWORKS .iter() .map(|network| match network { NetworkId::Serai => (NetworkId::Serai, Amount(50_000 * 10_u64.pow(8))), NetworkId::External(ExternalNetworkId::Bitcoin) => { (NetworkId::External(ExternalNetworkId::Bitcoin), Amount(1_000_000 * 10_u64.pow(8))) } NetworkId::External(ExternalNetworkId::Ethereum) => { (NetworkId::External(ExternalNetworkId::Ethereum), Amount(1_000_000 * 10_u64.pow(8))) } NetworkId::External(ExternalNetworkId::Monero) => { (NetworkId::External(ExternalNetworkId::Monero), Amount(100_000 * 10_u64.pow(8))) } }) .collect::>(); RuntimeGenesisConfig { system: SystemConfig { _config: PhantomData }, transaction_payment: Default::default(), coins: CoinsConfig { accounts: endowed_accounts .into_iter() .map(|a| (a, Balance { coin: Coin::Serai, amount: Amount(1 << 60) })) .collect(), _ignore: Default::default(), }, validator_sets: ValidatorSetsConfig { networks: key_shares.clone(), participants: validators.clone(), }, emissions: EmissionsConfig { networks: key_shares, participants: validators.clone() }, signals: SignalsConfig::default(), babe: BabeConfig { authorities: validators.iter().map(|validator| ((*validator).into(), 1)).collect(), epoch_config: BABE_GENESIS_EPOCH_CONFIG, _config: PhantomData, }, grandpa: GrandpaConfig { authorities: validators.into_iter().map(|validator| (validator.into(), 1)).collect(), _config: PhantomData, }, } } fn testnet_genesis(validators: Vec<&'static str>) -> RuntimeGenesisConfig { let validators = validators .into_iter() .map(|validator| Public::decode(&mut hex::decode(validator).unwrap().as_slice()).unwrap()) .collect::>(); let key_shares = NETWORKS .iter() .map(|network| match network { NetworkId::Serai => (NetworkId::Serai, Amount(50_000 * 10_u64.pow(8))), NetworkId::External(ExternalNetworkId::Bitcoin) => { (NetworkId::External(ExternalNetworkId::Bitcoin), Amount(1_000_000 * 10_u64.pow(8))) } NetworkId::External(ExternalNetworkId::Ethereum) => { (NetworkId::External(ExternalNetworkId::Ethereum), Amount(1_000_000 * 10_u64.pow(8))) } NetworkId::External(ExternalNetworkId::Monero) => { (NetworkId::External(ExternalNetworkId::Monero), Amount(100_000 * 10_u64.pow(8))) } }) .collect::>(); assert_eq!(validators.iter().collect::>().len(), validators.len()); RuntimeGenesisConfig { system: SystemConfig { _config: PhantomData }, transaction_payment: Default::default(), coins: CoinsConfig { accounts: validators .iter() .map(|a| (*a, Balance { coin: Coin::Serai, amount: Amount(5_000_000 * 10_u64.pow(8)) })) .collect(), _ignore: Default::default(), }, validator_sets: ValidatorSetsConfig { networks: key_shares.clone(), participants: validators.clone(), }, emissions: EmissionsConfig { networks: key_shares, participants: validators.clone() }, signals: SignalsConfig::default(), babe: BabeConfig { authorities: validators.iter().map(|validator| ((*validator).into(), 1)).collect(), epoch_config: BABE_GENESIS_EPOCH_CONFIG, _config: PhantomData, }, grandpa: GrandpaConfig { authorities: validators.into_iter().map(|validator| (validator.into(), 1)).collect(), _config: PhantomData, }, } } fn genesis( name: &'static str, id: &'static str, chain_type: ChainType, protocol_id: &'static str, config: &RuntimeGenesisConfig, ) -> ChainSpec { use sp_core::{ Encode, traits::{RuntimeCode, WrappedRuntimeCode, CodeExecutor}, }; use sc_service::ChainSpec as _; let bin = wasm_binary(); let hash = sp_core::blake2_256(&bin).to_vec(); let mut chain_spec = sc_chain_spec::ChainSpecBuilder::new(&bin, None) .with_name(name) .with_id(id) .with_chain_type(chain_type) .with_protocol_id(protocol_id) .build(); let mut ext = sp_state_machine::BasicExternalities::new_empty(); let code_fetcher = WrappedRuntimeCode(bin.clone().into()); sc_executor::WasmExecutor::::builder() .with_allow_missing_host_functions(true) .build() .call( &mut ext, &RuntimeCode { heap_pages: None, code_fetcher: &code_fetcher, hash }, "GenesisApi_build", &config.encode(), sp_core::traits::CallContext::Onchain, ) .0 .unwrap(); let mut storage = ext.into_storages(); storage.top.insert(sp_core::storage::well_known_keys::CODE.to_vec(), bin); chain_spec.set_storage(storage); chain_spec } pub fn development_config() -> ChainSpec { genesis( "Development Network", "devnet", ChainType::Development, "serai-devnet", &devnet_genesis( &["Alice"], vec![ account_from_name("Alice"), account_from_name("Bob"), account_from_name("Charlie"), account_from_name("Dave"), account_from_name("Eve"), account_from_name("Ferdie"), ], ), ) } pub fn local_config() -> ChainSpec { genesis( "Local Test Network", "local", ChainType::Local, "serai-local", &devnet_genesis( &["Alice", "Bob", "Charlie", "Dave"], vec![ account_from_name("Alice"), account_from_name("Bob"), account_from_name("Charlie"), account_from_name("Dave"), account_from_name("Eve"), account_from_name("Ferdie"), ], ), ) } #[allow(clippy::redundant_closure_call)] pub fn testnet_config() -> ChainSpec { genesis( "Test Network 0", "testnet-0", ChainType::Live, "serai-testnet-0", &(move || { let _ = testnet_genesis(vec![]); todo!("TODO") })(), ) } pub fn bootnode_multiaddrs(id: &str) -> Vec { match id { "devnet" | "local" => vec![], "testnet-0" => todo!("TODO"), _ => panic!("unrecognized network ID"), } } ================================================ FILE: substrate/node/src/cli.rs ================================================ use sc_cli::RunCmd; #[derive(Debug, clap::Parser)] pub struct Cli { #[clap(subcommand)] pub subcommand: Option, #[clap(flatten)] pub run: RunCmd, } #[allow(clippy::large_enum_variant)] #[derive(Debug, clap::Subcommand)] pub enum Subcommand { // Key management CLI utilities #[clap(subcommand)] Key(sc_cli::KeySubcommand), // Build a chain specification BuildSpec(sc_cli::BuildSpecCmd), // Validate blocks CheckBlock(sc_cli::CheckBlockCmd), // Export blocks ExportBlocks(sc_cli::ExportBlocksCmd), // Export the state of a given block into a chain spec ExportState(sc_cli::ExportStateCmd), // Import blocks ImportBlocks(sc_cli::ImportBlocksCmd), // Remove the entire chain PurgeChain(sc_cli::PurgeChainCmd), // Revert the chain to a previous state Revert(sc_cli::RevertCmd), // DB meta columns information ChainInfo(sc_cli::ChainInfoCmd), } ================================================ FILE: substrate/node/src/command.rs ================================================ use std::sync::Arc; use serai_runtime::Block; use sc_service::{PruningMode, PartialComponents}; use sc_cli::SubstrateCli; use crate::{ chain_spec, cli::{Cli, Subcommand}, service::{self, FullClient}, }; impl SubstrateCli for Cli { fn impl_name() -> String { "Serai Node".into() } fn impl_version() -> String { env!("SUBSTRATE_CLI_IMPL_VERSION").to_string() } fn description() -> String { env!("CARGO_PKG_DESCRIPTION").to_string() } fn author() -> String { env!("CARGO_PKG_AUTHORS").to_string() } fn support_url() -> String { "https://github.com/serai-dex/serai/issues/new".to_string() } fn copyright_start_year() -> i32 { 2022 } fn load_spec(&self, id: &str) -> Result, String> { match id { "dev" | "devnet" => Ok(Box::new(chain_spec::development_config())), "local" => Ok(Box::new(chain_spec::local_config())), "testnet" => Ok(Box::new(chain_spec::testnet_config())), _ => panic!("Unknown network ID"), } } } pub fn run() -> sc_cli::Result<()> { let mut cli = Cli::from_args(); match &cli.subcommand { Some(Subcommand::Key(cmd)) => cmd.run(&cli), Some(Subcommand::BuildSpec(cmd)) => { cli.create_runner(cmd)?.sync_run(|config| cmd.run(config.chain_spec, config.network)) } Some(Subcommand::CheckBlock(cmd)) => cli.create_runner(cmd)?.async_run(|config| { let PartialComponents { client, task_manager, import_queue, .. } = service::new_partial(&config)?.0; Ok((cmd.run(client, import_queue), task_manager)) }), Some(Subcommand::ExportBlocks(cmd)) => cli.create_runner(cmd)?.async_run(|config| { let PartialComponents { client, task_manager, .. } = service::new_partial(&config)?.0; Ok((cmd.run(client, config.database), task_manager)) }), Some(Subcommand::ExportState(cmd)) => cli.create_runner(cmd)?.async_run(|config| { let PartialComponents { client, task_manager, .. } = service::new_partial(&config)?.0; Ok((cmd.run(client, config.chain_spec), task_manager)) }), Some(Subcommand::ImportBlocks(cmd)) => cli.create_runner(cmd)?.async_run(|config| { let PartialComponents { client, task_manager, import_queue, .. } = service::new_partial(&config)?.0; Ok((cmd.run(client, import_queue), task_manager)) }), Some(Subcommand::PurgeChain(cmd)) => { cli.create_runner(cmd)?.sync_run(|config| cmd.run(config.database)) } Some(Subcommand::Revert(cmd)) => cli.create_runner(cmd)?.async_run(|config| { let PartialComponents { client, task_manager, backend, .. } = service::new_partial(&config)?.0; let aux_revert = Box::new(|client: Arc, backend, blocks| { sc_consensus_babe::revert(client.clone(), backend, blocks)?; sc_consensus_grandpa::revert(client, blocks)?; Ok(()) }); Ok((cmd.run(client, backend, Some(aux_revert)), task_manager)) }), Some(Subcommand::ChainInfo(cmd)) => { cli.create_runner(cmd)?.sync_run(|config| cmd.run::(&config)) } None => { cli.run.network_params.node_key_params = sc_cli::NodeKeyParams { node_key: None, node_key_file: None, node_key_type: sc_cli::arg_enums::NodeKeyType::Ed25519, unsafe_force_node_key_generation: true, }; cli.create_runner(&cli.run)?.run_node_until_exit(|mut config| async { if config.role.is_authority() { config.state_pruning = Some(PruningMode::ArchiveAll); } service::new_full(config).map_err(sc_cli::Error::Service) }) } } } ================================================ FILE: substrate/node/src/keystore.rs ================================================ use zeroize::Zeroize; use sp_core::{crypto::*, sr25519}; use sp_keystore::*; pub struct Keystore(sr25519::Pair); impl Keystore { pub fn from_env() -> Option { let mut key_hex = serai_env::var("KEY")?; if key_hex.trim().is_empty() { None?; } let mut key = hex::decode(&key_hex).expect("KEY from environment wasn't hex"); key_hex.zeroize(); assert_eq!(key.len(), 32, "KEY from environment wasn't 32 bytes"); key.extend(sp_core::blake2_256(&key)); let res = Self(sr25519::Pair::from(schnorrkel::SecretKey::from_bytes(&key).unwrap())); key.zeroize(); Some(res) } } impl sp_keystore::Keystore for Keystore { fn sr25519_public_keys(&self, _: KeyTypeId) -> Vec { vec![self.0.public()] } fn sr25519_generate_new(&self, _: KeyTypeId, _: Option<&str>) -> Result { panic!("asked to generate an sr25519 key"); } fn sr25519_sign( &self, _: KeyTypeId, public: &sr25519::Public, msg: &[u8], ) -> Result, Error> { if public == &self.0.public() { Ok(Some(self.0.sign(msg))) } else { Ok(None) } } fn sr25519_vrf_sign( &self, _: KeyTypeId, public: &sr25519::Public, data: &sr25519::vrf::VrfSignData, ) -> Result, Error> { if public == &self.0.public() { Ok(Some(self.0.vrf_sign(data))) } else { Ok(None) } } fn sr25519_vrf_pre_output( &self, _: KeyTypeId, public: &sr25519::Public, input: &sr25519::vrf::VrfInput, ) -> Result, Error> { if public == &self.0.public() { Ok(Some(self.0.vrf_pre_output(input))) } else { Ok(None) } } fn insert(&self, _: KeyTypeId, _: &str, _: &[u8]) -> Result<(), ()> { panic!("asked to insert a key"); } fn keys(&self, _: KeyTypeId) -> Result>, Error> { Ok(vec![self.0.public().0.to_vec()]) } fn has_keys(&self, public_keys: &[(Vec, KeyTypeId)]) -> bool { let our_key = self.0.public().0; for (public_key, _) in public_keys { if our_key != public_key.as_slice() { return false; } } true } } ================================================ FILE: substrate/node/src/main.rs ================================================ mod keystore; mod chain_spec; mod service; mod command; mod rpc; mod cli; fn main() -> sc_cli::Result<()> { command::run() } ================================================ FILE: substrate/node/src/rpc.rs ================================================ use std::{sync::Arc, collections::HashSet}; use rand_core::{RngCore, OsRng}; use sp_core::Encode; use sp_blockchain::{Error as BlockchainError, HeaderBackend, HeaderMetadata}; use sp_block_builder::BlockBuilder; use sp_api::ProvideRuntimeApi; use serai_runtime::{ primitives::{NetworkId, SubstrateAmount, PublicKey}, Nonce, Block, SeraiRuntimeApi, }; use tokio::sync::RwLock; use jsonrpsee::RpcModule; use sc_client_api::BlockBackend; use sc_transaction_pool_api::TransactionPool; pub struct FullDeps { pub id: String, pub client: Arc, pub pool: Arc

, pub authority_discovery: Option, } pub fn create_full< C: ProvideRuntimeApi + HeaderBackend + HeaderMetadata + BlockBackend + Send + Sync + 'static, P: TransactionPool + 'static, >( deps: FullDeps, ) -> Result, Box> where C::Api: frame_system_rpc_runtime_api::AccountNonceApi + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi + SeraiRuntimeApi + BlockBuilder, { use substrate_frame_rpc_system::{System, SystemApiServer}; use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer}; let mut module = RpcModule::new(()); let FullDeps { id, client, pool, authority_discovery } = deps; module.merge(System::new(client.clone(), pool).into_rpc())?; module.merge(TransactionPayment::new(client.clone()).into_rpc())?; if let Some(authority_discovery) = authority_discovery { let mut authority_discovery_module = RpcModule::new((id, client.clone(), RwLock::new(authority_discovery))); authority_discovery_module.register_async_method( "p2p_validators", |params, context, _ext| async move { let [network]: [NetworkId; 1] = params.parse()?; let (id, client, authority_discovery) = &*context; let latest_block = client.info().best_hash; let validators = client.runtime_api().validators(latest_block, network).map_err(|_| { jsonrpsee::types::error::ErrorObjectOwned::owned( -1, format!( "couldn't get validators from the latest block, which is likely a fatal bug. {}", "please report this at https://github.com/serai-dex/serai", ), Option::<()>::None, ) }); let validators = match validators { Ok(validators) => validators, Err(e) => return Err(e), }; // Always return the protocol's bootnodes let mut all_p2p_addresses = crate::chain_spec::bootnode_multiaddrs(id); // Additionally returns validators found over the DHT for validator in validators { let mut returned_addresses = authority_discovery .write() .await .get_addresses_by_authority_id(validator.into()) .await .unwrap_or_else(HashSet::new) .into_iter() .collect::>(); // Randomly select an address // There should be one, there may be two if their IP address changed, and more should only // occur if they have multiple proxies/an IP address changing frequently/some issue // preventing consistent self-identification // It isn't beneficial to use multiple addresses for a single peer here if !returned_addresses.is_empty() { all_p2p_addresses.push( returned_addresses .remove(usize::try_from(OsRng.next_u64() >> 32).unwrap() % returned_addresses.len()) .into(), ); } } Ok(all_p2p_addresses) }, )?; module.merge(authority_discovery_module)?; } let mut block_bin_module = RpcModule::new(client); block_bin_module.register_async_method( "chain_getBlockBin", |params, client, _ext| async move { let [block_hash]: [String; 1] = params.parse()?; let Some(block_hash) = hex::decode(&block_hash).ok().and_then(|bytes| { <[u8; 32]>::try_from(bytes.as_slice()) .map(::Hash::from) .ok() }) else { return Err(jsonrpsee::types::error::ErrorObjectOwned::owned( -1, "requested block hash wasn't a valid hash", Option::<()>::None, )); }; let Some(block) = client.block(block_hash).ok().flatten() else { return Err(jsonrpsee::types::error::ErrorObjectOwned::owned( -1, "couldn't find requested block", Option::<()>::None, )); }; Ok(hex::encode(block.block.encode())) }, )?; module.merge(block_bin_module)?; Ok(module) } ================================================ FILE: substrate/node/src/service.rs ================================================ use std::{boxed::Box, sync::Arc}; use futures_util::stream::StreamExt; use sp_timestamp::InherentDataProvider as TimestampInherent; use sp_consensus_babe::{SlotDuration, inherents::InherentDataProvider as BabeInherent}; use sp_io::SubstrateHostFunctions; use sc_executor::{sp_wasm_interface::ExtendedHostFunctions, WasmExecutor}; use sc_network::{Event, NetworkEventStream, NetworkBackend}; use sc_service::{error::Error as ServiceError, Configuration, TaskManager, TFullClient}; use sc_transaction_pool_api::OffchainTransactionPoolFactory; use sc_client_api::BlockBackend; use sc_telemetry::{Telemetry, TelemetryWorker}; use serai_runtime::{Block, RuntimeApi}; use sc_consensus_babe::{self, SlotProportion}; use sc_consensus_grandpa as grandpa; #[cfg(not(feature = "runtime-benchmarks"))] pub type Executor = WasmExecutor>; #[cfg(feature = "runtime-benchmarks")] pub type Executor = WasmExecutor< ExtendedHostFunctions, >; type FullBackend = sc_service::TFullBackend; pub type FullClient = TFullClient; type SelectChain = sc_consensus::LongestChain; type GrandpaBlockImport = grandpa::GrandpaBlockImport; type BabeBlockImport = sc_consensus_babe::BabeBlockImport; type PartialComponents = sc_service::PartialComponents< FullClient, FullBackend, SelectChain, sc_consensus::DefaultImportQueue, sc_transaction_pool::TransactionPoolWrapper, ( BabeBlockImport, sc_consensus_babe::BabeLink, grandpa::LinkHalf, grandpa::SharedVoterState, Option, ), >; fn create_inherent_data_providers( slot_duration: SlotDuration, ) -> (BabeInherent, TimestampInherent) { let timestamp = TimestampInherent::from_system_time(); (BabeInherent::from_timestamp_and_slot_duration(*timestamp, slot_duration), timestamp) } pub fn new_partial( config: &Configuration, ) -> Result<(PartialComponents, Arc), ServiceError> { let telemetry = config .telemetry_endpoints .clone() .filter(|x| !x.is_empty()) .map(|endpoints| -> Result<_, sc_telemetry::Error> { let worker = TelemetryWorker::new(16)?; let telemetry = worker.handle().new_telemetry(endpoints); Ok((worker, telemetry)) }) .transpose()?; #[allow(deprecated)] let executor = Executor::new( config.executor.wasm_method, config.executor.default_heap_pages, config.executor.max_runtime_instances, None, config.executor.runtime_cache_size, ); let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts::( config, telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), executor, )?; let client = Arc::new(client); let keystore: Arc = if let Some(keystore) = crate::keystore::Keystore::from_env() { Arc::new(keystore) } else { keystore_container.keystore() }; let telemetry = telemetry.map(|(worker, telemetry)| { task_manager.spawn_handle().spawn("telemetry", None, worker.run()); telemetry }); let select_chain = sc_consensus::LongestChain::new(backend.clone()); let transaction_pool = sc_transaction_pool::Builder::new( task_manager.spawn_essential_handle(), client.clone(), config.role.is_authority().into(), ) .with_options(config.transaction_pool.clone()) .with_prometheus(config.prometheus_registry()) .build(); let transaction_pool = Arc::new(transaction_pool); let (grandpa_block_import, grandpa_link) = grandpa::block_import( client.clone(), u32::MAX, &client, select_chain.clone(), telemetry.as_ref().map(Telemetry::handle), )?; let justification_import = grandpa_block_import.clone(); let (block_import, babe_link) = sc_consensus_babe::block_import( sc_consensus_babe::configuration(&*client)?, grandpa_block_import, client.clone(), )?; let slot_duration = babe_link.config().slot_duration(); let (import_queue, babe_handle) = sc_consensus_babe::import_queue(sc_consensus_babe::ImportQueueParams { link: babe_link.clone(), block_import: block_import.clone(), justification_import: Some(Box::new(justification_import)), client: client.clone(), select_chain: select_chain.clone(), create_inherent_data_providers: move |_, ()| async move { Ok(create_inherent_data_providers(slot_duration)) }, spawner: &task_manager.spawn_essential_handle(), registry: config.prometheus_registry(), telemetry: telemetry.as_ref().map(Telemetry::handle), offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(transaction_pool.clone()), })?; // This can't be dropped, or BABE breaks // We don't have anything to do with it though // This won't grow in size, so forgetting this isn't a disastrous memleak std::mem::forget(babe_handle); Ok(( sc_service::PartialComponents { client, backend, task_manager, keystore_container, select_chain, import_queue, transaction_pool, other: (block_import, babe_link, grandpa_link, grandpa::SharedVoterState::empty(), telemetry), }, keystore, )) } pub fn new_full(mut config: Configuration) -> Result { let ( sc_service::PartialComponents { client, backend, mut task_manager, keystore_container: _, import_queue, select_chain, transaction_pool, other: (block_import, babe_link, grandpa_link, shared_voter_state, mut telemetry), }, keystore_container, ) = new_partial(&config)?; config.network.node_name = "serai".to_string(); config.network.client_version = "0.1.0".to_string(); config.network.listen_addresses = vec!["/ip4/0.0.0.0/tcp/30333".parse().unwrap(), "/ip6/::/tcp/30333".parse().unwrap()]; type N = sc_network::service::NetworkWorker::Hash>; let mut net_config = sc_network::config::FullNetworkConfiguration::<_, _, N>::new( &config.network, config.prometheus_registry().cloned(), ); let metrics = N::register_notification_metrics(config.prometheus_registry()); let grandpa_protocol_name = grandpa::protocol_standard_name(&client.block_hash(0).unwrap().unwrap(), &config.chain_spec); let (grandpa_protocol_config, grandpa_notification_service) = sc_consensus_grandpa::grandpa_peers_set_config::( grandpa_protocol_name.clone(), metrics.clone(), net_config.peer_store_handle(), ); net_config.add_notification_protocol(grandpa_protocol_config); let publish_non_global_ips = config.network.allow_non_globals_in_dht; let (network, system_rpc_tx, tx_handler_controller, sync_service) = sc_service::build_network(sc_service::BuildNetworkParams { config: &config, net_config, client: client.clone(), transaction_pool: transaction_pool.clone(), spawn_handle: task_manager.spawn_handle(), import_queue, block_announce_validator_builder: None, metrics, block_relay: None, warp_sync_config: None, })?; task_manager.spawn_handle().spawn("bootnodes", "bootnodes", { let network = network.clone(); let id = config.chain_spec.id().to_string(); async move { // Transforms the above Multiaddrs into MultiaddrWithPeerIds // While the PeerIds *should* be known in advance and hardcoded, that data wasn't collected in // time and this fine for a testnet let bootnodes = || async { use libp2p::{ core::{ Endpoint, transport::{PortUse, DialOpts}, }, Transport as TransportTrait, tcp::tokio::Transport, noise::Config, }; let bootnode_multiaddrs = crate::chain_spec::bootnode_multiaddrs(&id); let mut tasks = vec![]; for multiaddr in bootnode_multiaddrs { tasks.push(tokio::time::timeout( core::time::Duration::from_secs(10), tokio::task::spawn(async move { let Ok(noise) = Config::new(&sc_network::Keypair::generate_ed25519()) else { None? }; let mut transport = Transport::default() .upgrade(libp2p::core::upgrade::Version::V1) .authenticate(noise) .multiplex(libp2p::yamux::Config::default()); let Ok(transport) = transport.dial( multiaddr.clone(), DialOpts { role: Endpoint::Dialer, port_use: PortUse::Reuse }, ) else { None? }; let Ok((peer_id, _)) = transport.await else { None? }; Some(sc_network::config::MultiaddrWithPeerId { multiaddr: multiaddr.into(), peer_id: peer_id.into(), }) }), )); } let mut res = vec![]; for task in tasks { if let Ok(Ok(Some(bootnode))) = task.await { res.push(bootnode); } } res }; use sc_network::{NetworkStatusProvider, NetworkPeers}; loop { if let Ok(status) = network.status().await { if status.num_connected_peers < 3 { for bootnode in bootnodes().await { let _ = network.add_reserved_peer(bootnode); } } } tokio::time::sleep(core::time::Duration::from_secs(60)).await; } } }); let role = config.role; let keystore = keystore_container; if let Some(seed) = config.dev_key_seed.as_ref() { let _ = keystore.sr25519_generate_new(sp_core::crypto::key_types::AUTHORITY_DISCOVERY, Some(seed)); } let prometheus_registry = config.prometheus_registry().cloned(); // TODO: Ensure we're considered as an authority is a validator of an external network let authority_discovery = if role.is_authority() { let (worker, service) = sc_authority_discovery::new_worker_and_service_with_config( #[allow(clippy::field_reassign_with_default)] { let mut worker = sc_authority_discovery::WorkerConfig::default(); worker.publish_non_global_ips = publish_non_global_ips; worker.strict_record_validation = true; worker }, client.clone(), Arc::new(network.clone()), Box::pin(network.event_stream("authority-discovery").filter_map(|e| async move { match e { Event::Dht(e) => Some(e), _ => None, } })), sc_authority_discovery::Role::PublishAndDiscover(keystore.clone()), prometheus_registry.clone(), task_manager.spawn_handle(), ); task_manager.spawn_handle().spawn( "authority-discovery-worker", Some("networking"), worker.run(), ); Some(service) } else { None }; let rpc_builder = { let id = config.chain_spec.id().to_string(); let client = client.clone(); let pool = transaction_pool.clone(); Box::new(move |_| { crate::rpc::create_full(crate::rpc::FullDeps { id: id.clone(), client: client.clone(), pool: pool.clone(), authority_discovery: authority_discovery.clone(), }) .map_err(Into::into) }) }; let enable_grandpa = !config.disable_grandpa; let force_authoring = config.force_authoring; let name = config.network.node_name.clone(); sc_service::spawn_tasks(sc_service::SpawnTasksParams { config, backend, client: client.clone(), keystore: keystore.clone(), network: network.clone(), rpc_builder, transaction_pool: transaction_pool.clone(), task_manager: &mut task_manager, system_rpc_tx, tx_handler_controller, sync_service: sync_service.clone(), telemetry: telemetry.as_mut(), })?; if let sc_service::config::Role::Authority { .. } = &role { let slot_duration = babe_link.config().slot_duration(); let babe_config = sc_consensus_babe::BabeParams { keystore: keystore.clone(), client: client.clone(), select_chain, env: sc_basic_authorship::ProposerFactory::new( task_manager.spawn_handle(), client, transaction_pool.clone(), prometheus_registry.as_ref(), telemetry.as_ref().map(Telemetry::handle), ), block_import, sync_oracle: sync_service.clone(), justification_sync_link: sync_service.clone(), create_inherent_data_providers: move |_, ()| async move { Ok(create_inherent_data_providers(slot_duration)) }, force_authoring, backoff_authoring_blocks: None::<()>, babe_link, block_proposal_slot_portion: SlotProportion::new(0.5), max_block_proposal_slot_portion: None, telemetry: telemetry.as_ref().map(Telemetry::handle), }; task_manager.spawn_essential_handle().spawn_blocking( "babe-proposer", Some("block-authoring"), sc_consensus_babe::start_babe(babe_config)?, ); } if enable_grandpa { task_manager.spawn_essential_handle().spawn_blocking( "grandpa-voter", None, grandpa::run_grandpa_voter(grandpa::GrandpaParams { config: grandpa::Config { gossip_duration: std::time::Duration::from_millis(333), justification_generation_period: 512, name: Some(name), observer_enabled: false, keystore: if role.is_authority() { Some(keystore) } else { None }, local_role: role, telemetry: telemetry.as_ref().map(Telemetry::handle), protocol_name: grandpa_protocol_name, }, link: grandpa_link, network, sync: Arc::new(sync_service), telemetry: telemetry.as_ref().map(Telemetry::handle), voting_rule: grandpa::VotingRulesBuilder::default().build(), prometheus_registry, shared_voter_state, offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(transaction_pool), notification_service: grandpa_notification_service, })?, ); } Ok(task_manager) } ================================================ FILE: substrate/primitives/Cargo.toml ================================================ [package] name = "serai-primitives" version = "0.1.0" description = "Primitives for the Serai blockchain" license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/substrate/primitives" authors = ["Luke Parker "] edition = "2021" rust-version = "1.74" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] zeroize = { version = "^1.5", features = ["derive"], optional = true } scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive"] } borsh = { version = "1", default-features = false, features = ["derive", "de_strict_order"], optional = true } serde = { version = "1", default-features = false, features = ["derive", "alloc"], optional = true } sp-application-crypto = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-core = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-runtime = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-io = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-std = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } frame-support = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } [dev-dependencies] rand_core = { version = "0.6", default-features = false, features = ["getrandom"] } [features] std = ["zeroize", "scale/std", "borsh?/std", "serde?/std", "sp-core/std", "sp-runtime/std", "sp-std/std", "frame-support/std"] borsh = ["dep:borsh"] serde = ["dep:serde"] default = ["std"] ================================================ FILE: substrate/primitives/LICENSE ================================================ MIT License Copyright (c) 2022-2023 Luke Parker Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: substrate/primitives/src/account.rs ================================================ #[cfg(feature = "std")] use zeroize::Zeroize; #[cfg(feature = "borsh")] use borsh::{BorshSerialize, BorshDeserialize}; #[cfg(feature = "serde")] use serde::{Serialize, Deserialize}; use scale::{Encode, Decode, DecodeWithMemTracking, MaxEncodedLen}; use sp_core::sr25519::Public; pub use sp_core::sr25519::Signature; #[cfg(feature = "std")] use sp_core::{Pair as PairTrait, sr25519::Pair}; use sp_runtime::traits::{LookupError, Lookup, StaticLookup}; pub type PublicKey = Public; #[cfg(feature = "borsh")] pub fn borsh_serialize_public( public: &Public, writer: &mut W, ) -> Result<(), borsh::io::Error> { borsh::BorshSerialize::serialize(&public.0, writer) } #[cfg(feature = "borsh")] pub fn borsh_deserialize_public( reader: &mut R, ) -> Result { let public: [u8; 32] = borsh::BorshDeserialize::deserialize_reader(reader)?; Ok(public.into()) } #[cfg(feature = "borsh")] pub fn borsh_serialize_signature( signature: &Signature, writer: &mut W, ) -> Result<(), borsh::io::Error> { borsh::BorshSerialize::serialize(&signature.0, writer) } #[cfg(feature = "borsh")] pub fn borsh_deserialize_signature( reader: &mut R, ) -> Result { let signature: [u8; 64] = borsh::BorshDeserialize::deserialize_reader(reader)?; Ok(signature.into()) } // TODO: Remove this for solely Public? #[derive( Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen, )] #[cfg_attr(feature = "std", derive(Zeroize))] #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct SeraiAddress(pub [u8; 32]); impl SeraiAddress { pub fn new(key: [u8; 32]) -> SeraiAddress { SeraiAddress(key) } } impl From<[u8; 32]> for SeraiAddress { fn from(key: [u8; 32]) -> SeraiAddress { SeraiAddress(key) } } impl From for SeraiAddress { fn from(key: PublicKey) -> SeraiAddress { SeraiAddress(key.0) } } impl From for PublicKey { fn from(address: SeraiAddress) -> PublicKey { PublicKey::from_raw(address.0) } } #[cfg(feature = "std")] impl std::fmt::Display for SeraiAddress { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { // TODO: Bech32 write!(f, "{:?}", self.0) } } #[cfg(feature = "std")] pub fn insecure_pair_from_name(name: &str) -> Pair { Pair::from_string(&format!("//{name}"), None).unwrap() } pub struct AccountLookup; impl Lookup for AccountLookup { type Source = SeraiAddress; type Target = PublicKey; fn lookup(&self, source: SeraiAddress) -> Result { Ok(PublicKey::from_raw(source.0)) } } impl StaticLookup for AccountLookup { type Source = SeraiAddress; type Target = PublicKey; fn lookup(source: SeraiAddress) -> Result { Ok(source.into()) } fn unlookup(source: PublicKey) -> SeraiAddress { source.into() } } pub const fn system_address(pallet: &'static [u8]) -> SeraiAddress { let mut address = [0; 32]; let mut set = false; // Implement a while loop since we can't use a for loop let mut i = 0; while i < pallet.len() { address[i] = pallet[i]; if address[i] != 0 { set = true; } i += 1; } // Make sure this address isn't the identity point // Doesn't do address != [0; 32] since that's not const assert!(set, "address is the identity point"); SeraiAddress(address) } ================================================ FILE: substrate/primitives/src/amount.rs ================================================ use core::{ ops::{Add, Sub, Mul}, fmt::Debug, }; #[cfg(feature = "std")] use zeroize::Zeroize; #[cfg(feature = "borsh")] use borsh::{BorshSerialize, BorshDeserialize}; #[cfg(feature = "serde")] use serde::{Serialize, Deserialize}; use scale::{Encode, Decode, DecodeWithMemTracking, MaxEncodedLen}; /// The type used for amounts within Substrate. // Distinct from Amount due to Substrate's requirements on this type. // While Amount could have all the necessary traits implemented, not only are they many, it'd make // Amount a large type with a variety of misc functions. // The current type's minimalism sets clear bounds on usage. pub type SubstrateAmount = u64; /// The type used for amounts. #[derive( Clone, Copy, PartialEq, Eq, PartialOrd, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen, )] #[cfg_attr(feature = "std", derive(Zeroize))] #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct Amount(pub SubstrateAmount); // TODO: these impl shouldn't panic and return error to be dealt with. // Otherwise we might have a panic that stops the network. impl Add for Amount { type Output = Amount; fn add(self, other: Amount) -> Amount { // Explicitly use checked_add so even if range checks are disabled, this is still checked Amount(self.0.checked_add(other.0).unwrap()) } } impl Sub for Amount { type Output = Amount; fn sub(self, other: Amount) -> Amount { Amount(self.0.checked_sub(other.0).unwrap()) } } impl Mul for Amount { type Output = Amount; fn mul(self, other: Amount) -> Amount { Amount(self.0.checked_mul(other.0).unwrap()) } } ================================================ FILE: substrate/primitives/src/balance.rs ================================================ use core::ops::{Add, Sub, Mul}; #[cfg(feature = "std")] use zeroize::Zeroize; #[cfg(feature = "borsh")] use borsh::{BorshSerialize, BorshDeserialize}; #[cfg(feature = "serde")] use serde::{Serialize, Deserialize}; use scale::{Encode, Decode, DecodeWithMemTracking, MaxEncodedLen}; use crate::{Amount, Coin, ExternalCoin}; /// The type used for balances (a Coin and Balance). #[derive( Clone, Copy, PartialEq, Eq, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen, )] #[cfg_attr(feature = "std", derive(Zeroize))] #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct Balance { pub coin: Coin, pub amount: Amount, } /// The type used for balances (a Coin and Balance). #[derive( Clone, Copy, PartialEq, Eq, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen, )] #[cfg_attr(feature = "std", derive(Zeroize))] #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct ExternalBalance { pub coin: ExternalCoin, pub amount: Amount, } impl From for Balance { fn from(balance: ExternalBalance) -> Self { Balance { coin: balance.coin.into(), amount: balance.amount } } } impl TryFrom for ExternalBalance { type Error = (); fn try_from(balance: Balance) -> Result { match balance.coin { Coin::Serai => Err(())?, Coin::External(coin) => Ok(ExternalBalance { coin, amount: balance.amount }), } } } // TODO: these impl either should be removed or return errors in case of overflows impl Add for Balance { type Output = Balance; fn add(self, other: Amount) -> Balance { Balance { coin: self.coin, amount: self.amount + other } } } impl Sub for Balance { type Output = Balance; fn sub(self, other: Amount) -> Balance { Balance { coin: self.coin, amount: self.amount - other } } } impl Mul for Balance { type Output = Balance; fn mul(self, other: Amount) -> Balance { Balance { coin: self.coin, amount: self.amount * other } } } ================================================ FILE: substrate/primitives/src/block.rs ================================================ #[cfg(feature = "std")] use zeroize::Zeroize; #[cfg(feature = "borsh")] use borsh::{BorshSerialize, BorshDeserialize}; #[cfg(feature = "serde")] use serde::{Serialize, Deserialize}; use scale::{Encode, Decode, DecodeWithMemTracking, MaxEncodedLen}; use sp_core::H256; /// The type used to identify block numbers. #[derive( Clone, Copy, Default, PartialEq, Eq, Hash, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen, )] #[cfg_attr(feature = "std", derive(Zeroize))] #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct BlockNumber(pub u64); impl From for BlockNumber { fn from(number: u64) -> BlockNumber { BlockNumber(number) } } /// The type used to identify block hashes. // This may not be universally compatible // If a block exists with a hash which isn't 32-bytes, it can be hashed into a value with 32-bytes // This would require the processor to maintain a mapping of 32-byte IDs to actual hashes, which // would be fine #[derive( Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen, )] #[cfg_attr(feature = "std", derive(Zeroize))] #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct BlockHash(pub [u8; 32]); impl AsRef<[u8]> for BlockHash { fn as_ref(&self) -> &[u8] { self.0.as_ref() } } impl From<[u8; 32]> for BlockHash { fn from(hash: [u8; 32]) -> BlockHash { BlockHash(hash) } } impl From for BlockHash { fn from(hash: H256) -> BlockHash { BlockHash(hash.into()) } } ================================================ FILE: substrate/primitives/src/constants.rs ================================================ use crate::BlockNumber; // 1 MB pub const BLOCK_SIZE: u32 = 1024 * 1024; // 6 seconds pub const TARGET_BLOCK_TIME: u64 = 6; /// Measured in blocks. pub const MINUTES: BlockNumber = 60 / TARGET_BLOCK_TIME; pub const HOURS: BlockNumber = 60 * MINUTES; pub const DAYS: BlockNumber = 24 * HOURS; pub const WEEKS: BlockNumber = 7 * DAYS; // Defines a month as 30 days, which is slightly inaccurate pub const MONTHS: BlockNumber = 30 * DAYS; // Defines a year as 12 inaccurate months, which is 360 days literally (~1.5% off) pub const YEARS: BlockNumber = 12 * MONTHS; /// 6 months of blocks pub const GENESIS_SRI_TRICKLE_FEED: u64 = 6 * MONTHS; // 100 Million SRI pub const GENESIS_SRI: u64 = 100_000_000 * 10_u64.pow(8); /// This needs to be long enough for arbitrage to occur and make holding any fake price up /// sufficiently unrealistic. #[allow(clippy::cast_possible_truncation)] pub const ARBITRAGE_TIME: u16 = (2 * HOURS) as u16; /// Since we use the median price, double the window length. /// /// We additionally +1 so there is a true median. pub const MEDIAN_PRICE_WINDOW_LENGTH: u16 = (2 * ARBITRAGE_TIME) + 1; /// Amount of blocks per epoch in the fast-epoch feature that is used in tests. pub const FAST_EPOCH_DURATION: u64 = 2 * MINUTES; /// Amount of blocks for the initial period era of the emissions under the fast-epoch feature. pub const FAST_EPOCH_INITIAL_PERIOD: u64 = 2 * FAST_EPOCH_DURATION; ================================================ FILE: substrate/primitives/src/lib.rs ================================================ #![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(not(feature = "std"), no_std)] #[cfg(feature = "std")] use zeroize::Zeroize; #[cfg(feature = "borsh")] use borsh::{BorshSerialize, BorshDeserialize}; #[cfg(feature = "serde")] use serde::{Serialize, Deserialize}; use scale::{Encode, Decode, DecodeWithMemTracking, MaxEncodedLen}; #[cfg(test)] use sp_io::TestExternalities; #[cfg(test)] use frame_support::{pallet_prelude::*, Identity, traits::StorageInstance}; use sp_core::{ConstU32, bounded::BoundedVec}; pub use sp_application_crypto as crypto; mod amount; pub use amount::*; mod block; pub use block::*; mod networks; pub use networks::*; mod balance; pub use balance::*; mod account; pub use account::*; mod constants; pub use constants::*; pub type BlockNumber = u64; pub type Header = sp_runtime::generic::Header; #[cfg(feature = "borsh")] pub fn borsh_serialize_bounded_vec( bounded: &BoundedVec>, writer: &mut W, ) -> Result<(), borsh::io::Error> { borsh::BorshSerialize::serialize(bounded.as_slice(), writer) } #[cfg(feature = "borsh")] pub fn borsh_deserialize_bounded_vec( reader: &mut R, ) -> Result>, borsh::io::Error> { let vec: Vec = borsh::BorshDeserialize::deserialize_reader(reader)?; vec.try_into().map_err(|_| borsh::io::Error::other("bound exceeded")) } // Monero, our current longest address candidate, has a longest address of featured // 1 (enum) + 1 (flags) + 64 (two keys) = 66 // When JAMTIS arrives, it'll become 112 or potentially even 142 bytes pub const MAX_ADDRESS_LEN: u32 = 196; #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen)] #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct ExternalAddress( #[cfg_attr( feature = "borsh", borsh( serialize_with = "borsh_serialize_bounded_vec", deserialize_with = "borsh_deserialize_bounded_vec" ) )] BoundedVec>, ); #[cfg(feature = "std")] impl Zeroize for ExternalAddress { fn zeroize(&mut self) { self.0.as_mut().zeroize() } } impl ExternalAddress { #[cfg(feature = "std")] pub fn new(address: Vec) -> Result { Ok(ExternalAddress(address.try_into().map_err(|_| "address length exceeds {MAX_ADDRESS_LEN}")?)) } pub fn address(&self) -> &[u8] { self.0.as_ref() } #[cfg(feature = "std")] pub fn consume(self) -> Vec { self.0.into_inner() } } impl AsRef<[u8]> for ExternalAddress { fn as_ref(&self) -> &[u8] { self.0.as_ref() } } // Should be enough for a Uniswap v3 call pub const MAX_DATA_LEN: u32 = 512; #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen)] #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct Data( #[cfg_attr( feature = "borsh", borsh( serialize_with = "borsh_serialize_bounded_vec", deserialize_with = "borsh_deserialize_bounded_vec" ) )] BoundedVec>, ); #[cfg(feature = "std")] impl Zeroize for Data { fn zeroize(&mut self) { self.0.as_mut().zeroize() } } impl Data { #[cfg(feature = "std")] pub fn new(data: Vec) -> Result { Ok(Data(data.try_into().map_err(|_| "data length exceeds {MAX_DATA_LEN}")?)) } pub fn data(&self) -> &[u8] { self.0.as_ref() } #[cfg(feature = "std")] pub fn consume(self) -> Vec { self.0.into_inner() } } impl AsRef<[u8]> for Data { fn as_ref(&self) -> &[u8] { self.0.as_ref() } } /// Lexicographically reverses a given byte array. pub fn reverse_lexicographic_order(bytes: [u8; N]) -> [u8; N] { let mut res = [0u8; N]; for (i, byte) in bytes.iter().enumerate() { res[i] = !*byte; } res } #[test] fn test_reverse_lexicographic_order() { TestExternalities::default().execute_with(|| { use rand_core::{RngCore, OsRng}; struct Storage; impl StorageInstance for Storage { fn pallet_prefix() -> &'static str { "LexicographicOrder" } const STORAGE_PREFIX: &'static str = "storage"; } type Map = StorageMap; struct StorageReverse; impl StorageInstance for StorageReverse { fn pallet_prefix() -> &'static str { "LexicographicOrder" } const STORAGE_PREFIX: &'static str = "storagereverse"; } type MapReverse = StorageMap; // populate the maps let mut amounts = vec![]; for _ in 0 .. 100 { amounts.push(OsRng.next_u64()); } let mut amounts_sorted = amounts.clone(); amounts_sorted.sort(); for a in amounts { Map::set(a.to_be_bytes(), Some(())); MapReverse::set(reverse_lexicographic_order(a.to_be_bytes()), Some(())); } // retrive back and check whether they are sorted as expected let total_size = amounts_sorted.len(); let mut map_iter = Map::iter_keys(); let mut reverse_map_iter = MapReverse::iter_keys(); for i in 0 .. amounts_sorted.len() { let first = map_iter.next().unwrap(); let second = reverse_map_iter.next().unwrap(); assert_eq!(u64::from_be_bytes(first), amounts_sorted[i]); assert_eq!( u64::from_be_bytes(reverse_lexicographic_order(second)), amounts_sorted[total_size - (i + 1)] ); } }); } ================================================ FILE: substrate/primitives/src/networks.rs ================================================ #[cfg(feature = "std")] use zeroize::Zeroize; use scale::{Encode, EncodeLike, Decode, DecodeWithMemTracking, MaxEncodedLen}; #[cfg(feature = "borsh")] use borsh::{BorshSerialize, BorshDeserialize}; #[cfg(feature = "serde")] use serde::{Serialize, Deserialize}; use sp_core::{ConstU32, bounded::BoundedVec}; use sp_std::{vec, vec::Vec}; #[cfg(feature = "borsh")] use crate::{borsh_serialize_bounded_vec, borsh_deserialize_bounded_vec}; /// The type used to identify external networks. #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, PartialOrd, Ord)] #[cfg_attr(feature = "std", derive(Zeroize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub enum ExternalNetworkId { Bitcoin, Ethereum, Monero, } impl Encode for ExternalNetworkId { fn encode(&self) -> Vec { match self { ExternalNetworkId::Bitcoin => vec![1], ExternalNetworkId::Ethereum => vec![2], ExternalNetworkId::Monero => vec![3], } } } impl Decode for ExternalNetworkId { fn decode(input: &mut I) -> Result { let kind = input.read_byte()?; match kind { 1 => Ok(Self::Bitcoin), 2 => Ok(Self::Ethereum), 3 => Ok(Self::Monero), _ => Err(scale::Error::from("invalid format")), } } } impl DecodeWithMemTracking for ExternalNetworkId {} impl MaxEncodedLen for ExternalNetworkId { fn max_encoded_len() -> usize { 1 } } impl EncodeLike for ExternalNetworkId {} #[cfg(feature = "borsh")] impl BorshSerialize for ExternalNetworkId { fn serialize(&self, writer: &mut W) -> std::io::Result<()> { writer.write_all(&self.encode()) } } #[cfg(feature = "borsh")] impl BorshDeserialize for ExternalNetworkId { fn deserialize_reader(reader: &mut R) -> std::io::Result { let mut kind = [0; 1]; reader.read_exact(&mut kind)?; ExternalNetworkId::decode(&mut kind.as_slice()) .map_err(|_| std::io::Error::other("invalid format")) } } /// The type used to identify networks. #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, PartialOrd, Ord)] #[cfg_attr(feature = "std", derive(Zeroize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub enum NetworkId { Serai, External(ExternalNetworkId), } impl Encode for NetworkId { fn encode(&self) -> Vec { match self { NetworkId::Serai => vec![0], NetworkId::External(network) => network.encode(), } } } impl Decode for NetworkId { fn decode(input: &mut I) -> Result { let kind = input.read_byte()?; match kind { 0 => Ok(Self::Serai), _ => Ok(ExternalNetworkId::decode(&mut [kind].as_slice())?.into()), } } } impl DecodeWithMemTracking for NetworkId {} impl MaxEncodedLen for NetworkId { fn max_encoded_len() -> usize { 1 } } impl EncodeLike for NetworkId {} #[cfg(feature = "borsh")] impl BorshSerialize for NetworkId { fn serialize(&self, writer: &mut W) -> std::io::Result<()> { writer.write_all(&self.encode()) } } #[cfg(feature = "borsh")] impl BorshDeserialize for NetworkId { fn deserialize_reader(reader: &mut R) -> std::io::Result { let mut kind = [0; 1]; reader.read_exact(&mut kind)?; NetworkId::decode(&mut kind.as_slice()).map_err(|_| std::io::Error::other("invalid format")) } } impl ExternalNetworkId { pub fn coins(&self) -> Vec { match self { Self::Bitcoin => vec![ExternalCoin::Bitcoin], Self::Ethereum => vec![ExternalCoin::Ether, ExternalCoin::Dai], Self::Monero => vec![ExternalCoin::Monero], } } } impl NetworkId { pub fn coins(&self) -> Vec { match self { Self::Serai => vec![Coin::Serai], Self::External(network) => { network.coins().into_iter().map(core::convert::Into::into).collect() } } } } impl From for NetworkId { fn from(network: ExternalNetworkId) -> Self { NetworkId::External(network) } } impl TryFrom for ExternalNetworkId { type Error = (); fn try_from(network: NetworkId) -> Result { match network { NetworkId::Serai => Err(())?, NetworkId::External(n) => Ok(n), } } } pub const EXTERNAL_NETWORKS: [ExternalNetworkId; 3] = [ExternalNetworkId::Bitcoin, ExternalNetworkId::Ethereum, ExternalNetworkId::Monero]; pub const NETWORKS: [NetworkId; 4] = [ NetworkId::Serai, NetworkId::External(ExternalNetworkId::Bitcoin), NetworkId::External(ExternalNetworkId::Ethereum), NetworkId::External(ExternalNetworkId::Monero), ]; pub const EXTERNAL_COINS: [ExternalCoin; 4] = [ExternalCoin::Bitcoin, ExternalCoin::Ether, ExternalCoin::Dai, ExternalCoin::Monero]; pub const COINS: [Coin; 5] = [ Coin::Serai, Coin::External(ExternalCoin::Bitcoin), Coin::External(ExternalCoin::Ether), Coin::External(ExternalCoin::Dai), Coin::External(ExternalCoin::Monero), ]; /// The type used to identify external coins. #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] #[cfg_attr(feature = "std", derive(Zeroize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub enum ExternalCoin { Bitcoin, Ether, Dai, Monero, } impl Encode for ExternalCoin { fn encode(&self) -> Vec { match self { ExternalCoin::Bitcoin => vec![4], ExternalCoin::Ether => vec![5], ExternalCoin::Dai => vec![6], ExternalCoin::Monero => vec![7], } } } impl Decode for ExternalCoin { fn decode(input: &mut I) -> Result { let kind = input.read_byte()?; match kind { 4 => Ok(Self::Bitcoin), 5 => Ok(Self::Ether), 6 => Ok(Self::Dai), 7 => Ok(Self::Monero), _ => Err(scale::Error::from("invalid format")), } } } impl DecodeWithMemTracking for ExternalCoin {} impl MaxEncodedLen for ExternalCoin { fn max_encoded_len() -> usize { 1 } } impl EncodeLike for ExternalCoin {} #[cfg(feature = "borsh")] impl BorshSerialize for ExternalCoin { fn serialize(&self, writer: &mut W) -> std::io::Result<()> { writer.write_all(&self.encode()) } } #[cfg(feature = "borsh")] impl BorshDeserialize for ExternalCoin { fn deserialize_reader(reader: &mut R) -> std::io::Result { let mut kind = [0; 1]; reader.read_exact(&mut kind)?; ExternalCoin::decode(&mut kind.as_slice()).map_err(|_| std::io::Error::other("invalid format")) } } /// The type used to identify coins. #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] #[cfg_attr(feature = "std", derive(Zeroize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub enum Coin { Serai, External(ExternalCoin), } impl Encode for Coin { fn encode(&self) -> Vec { match self { Coin::Serai => vec![0], Coin::External(ec) => ec.encode(), } } } impl Decode for Coin { fn decode(input: &mut I) -> Result { let kind = input.read_byte()?; match kind { 0 => Ok(Self::Serai), _ => Ok(ExternalCoin::decode(&mut [kind].as_slice())?.into()), } } } impl DecodeWithMemTracking for Coin {} impl MaxEncodedLen for Coin { fn max_encoded_len() -> usize { 1 } } impl EncodeLike for Coin {} #[cfg(feature = "borsh")] impl BorshSerialize for Coin { fn serialize(&self, writer: &mut W) -> std::io::Result<()> { writer.write_all(&self.encode()) } } #[cfg(feature = "borsh")] impl BorshDeserialize for Coin { fn deserialize_reader(reader: &mut R) -> std::io::Result { let mut kind = [0; 1]; reader.read_exact(&mut kind)?; Coin::decode(&mut kind.as_slice()).map_err(|_| std::io::Error::other("invalid format")) } } impl From for Coin { fn from(coin: ExternalCoin) -> Self { Coin::External(coin) } } impl TryFrom for ExternalCoin { type Error = (); fn try_from(coin: Coin) -> Result { match coin { Coin::Serai => Err(())?, Coin::External(c) => Ok(c), } } } impl ExternalCoin { pub fn network(&self) -> ExternalNetworkId { match self { ExternalCoin::Bitcoin => ExternalNetworkId::Bitcoin, ExternalCoin::Ether | ExternalCoin::Dai => ExternalNetworkId::Ethereum, ExternalCoin::Monero => ExternalNetworkId::Monero, } } pub fn name(&self) -> &'static str { match self { ExternalCoin::Bitcoin => "Bitcoin", ExternalCoin::Ether => "Ether", ExternalCoin::Dai => "Dai Stablecoin", ExternalCoin::Monero => "Monero", } } pub fn symbol(&self) -> &'static str { match self { ExternalCoin::Bitcoin => "BTC", ExternalCoin::Ether => "ETH", ExternalCoin::Dai => "DAI", ExternalCoin::Monero => "XMR", } } pub fn decimals(&self) -> u32 { match self { // Ether and DAI have 18 decimals, yet we only track 8 in order to fit them within u64s ExternalCoin::Bitcoin | ExternalCoin::Ether | ExternalCoin::Dai => 8, ExternalCoin::Monero => 12, } } } impl Coin { pub fn native() -> Coin { Coin::Serai } pub fn network(&self) -> NetworkId { match self { Coin::Serai => NetworkId::Serai, Coin::External(c) => c.network().into(), } } pub fn name(&self) -> &'static str { match self { Coin::Serai => "Serai", Coin::External(c) => c.name(), } } pub fn symbol(&self) -> &'static str { match self { Coin::Serai => "SRI", Coin::External(c) => c.symbol(), } } pub fn decimals(&self) -> u32 { match self { Coin::Serai => 8, Coin::External(c) => c.decimals(), } } pub fn is_native(&self) -> bool { matches!(self, Coin::Serai) } } // Max of 8 coins per network // Since Serai isn't interested in listing tokens, as on-chain DEXs will almost certainly have // more liquidity, the only reason we'd have so many coins from a network is if there's no DEX // on-chain // There's probably no chain with so many *worthwhile* coins and no on-chain DEX // This could probably be just 4, yet 8 is a hedge for the unforeseen // If necessary, this can be increased with a fork pub const MAX_COINS_PER_NETWORK: u32 = 8; /// Network definition. #[derive(Clone, PartialEq, Eq, Debug)] #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct Network { #[cfg_attr( feature = "borsh", borsh( serialize_with = "borsh_serialize_bounded_vec", deserialize_with = "borsh_deserialize_bounded_vec" ) )] coins: BoundedVec>, } #[cfg(feature = "std")] impl Zeroize for Network { fn zeroize(&mut self) { for coin in self.coins.as_mut() { coin.zeroize(); } self.coins.truncate(0); } } impl Network { #[cfg(feature = "std")] pub fn new(coins: Vec) -> Result { if coins.is_empty() { Err("no coins provided")?; } let network = coins[0].network(); for coin in coins.iter().skip(1) { if coin.network() != network { Err("coins have different networks")?; } } Ok(Network { coins: coins.try_into().map_err(|_| "coins length exceeds {MAX_COINS_PER_NETWORK}")?, }) } pub fn coins(&self) -> &[Coin] { &self.coins } } ================================================ FILE: substrate/runtime/Cargo.toml ================================================ [package] name = "serai-runtime" version = "0.1.0" description = "Serai network node runtime, built over Substrate" license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/substrate/runtime" authors = ["Luke Parker "] edition = "2021" rust-version = "1.74" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [package.metadata.cargo-machete] ignored = ["scale"] [lints] workspace = true [dependencies] hashbrown = { version = "0.14", default-features = false, features = ["ahash", "inline-more"] } scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive"] } sp-core = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-std = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-offchain = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-version = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-inherents = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-session = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-consensus-babe = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-consensus-grandpa = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-authority-discovery = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-transaction-pool = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-block-builder = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-runtime = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-api = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } frame-system = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } frame-support = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } frame-executive = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } frame-benchmarking = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false, optional = true } serai-primitives = { path = "../primitives", default-features = false } serai-abi = { path = "../abi", default-features = false, features = ["serde"] } pallet-timestamp = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } pallet-authorship = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } pallet-transaction-payment = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } coins-pallet = { package = "serai-coins-pallet", path = "../coins/pallet", default-features = false } dex-pallet = { package = "serai-dex-pallet", path = "../dex/pallet", default-features = false } validator-sets-pallet = { package = "serai-validator-sets-pallet", path = "../validator-sets/pallet", default-features = false } genesis-liquidity-pallet = { package = "serai-genesis-liquidity-pallet", path = "../genesis-liquidity/pallet", default-features = false } emissions-pallet = { package = "serai-emissions-pallet", path = "../emissions/pallet", default-features = false } economic-security-pallet = { package = "serai-economic-security-pallet", path = "../economic-security/pallet", default-features = false } in-instructions-pallet = { package = "serai-in-instructions-pallet", path = "../in-instructions/pallet", default-features = false } signals-pallet = { package = "serai-signals-pallet", path = "../signals/pallet", default-features = false } pallet-session = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } pallet-babe = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } pallet-grandpa = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } frame-system-rpc-runtime-api = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } [build-dependencies] substrate-wasm-builder = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b" } [features] std = [ "scale/std", "sp-core/std", "sp-std/std", "sp-offchain/std", "sp-version/std", "sp-inherents/std", "sp-session/std", "sp-consensus-babe/std", "sp-consensus-grandpa/std", "sp-authority-discovery/std", "sp-transaction-pool/std", "sp-block-builder/std", "sp-runtime/std", "sp-api/std", "frame-system/std", "frame-support/std", "frame-executive/std", "serai-primitives/std", "serai-abi/std", "serai-abi/serde", "pallet-timestamp/std", "pallet-authorship/std", "pallet-transaction-payment/std", "coins-pallet/std", "dex-pallet/std", "validator-sets-pallet/std", "genesis-liquidity-pallet/std", "emissions-pallet/std", "economic-security-pallet/std", "in-instructions-pallet/std", "signals-pallet/std", "pallet-session/std", "pallet-babe/std", "pallet-grandpa/std", "frame-system-rpc-runtime-api/std", "pallet-transaction-payment-rpc-runtime-api/std", ] fast-epoch = [ "genesis-liquidity-pallet/fast-epoch", "emissions-pallet/fast-epoch", ] runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "frame-system/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-benchmarking/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "pallet-babe/runtime-benchmarks", "pallet-grandpa/runtime-benchmarks", ] default = ["std"] ================================================ FILE: substrate/runtime/LICENSE ================================================ AGPL-3.0-only license Copyright (c) 2022-2023 Luke Parker This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License Version 3 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see . ================================================ FILE: substrate/runtime/build.rs ================================================ fn main() { #[cfg(feature = "std")] substrate_wasm_builder::WasmBuilder::build_using_defaults(); } ================================================ FILE: substrate/runtime/src/abi.rs ================================================ use core::marker::PhantomData; use scale::{Encode, Decode}; use serai_abi::Call; use crate::{ Vec, primitives::{PublicKey, SeraiAddress}, timestamp, coins, dex, genesis_liquidity, validator_sets::{self, MembershipProof}, in_instructions, signals, babe, grandpa, RuntimeCall, }; impl From for RuntimeCall { fn from(call: Call) -> RuntimeCall { match call { Call::Timestamp(serai_abi::timestamp::Call::set { now }) => { RuntimeCall::Timestamp(timestamp::Call::set { now }) } Call::Coins(coins) => match coins { serai_abi::coins::Call::transfer { to, balance } => { RuntimeCall::Coins(coins::Call::transfer { to: to.into(), balance }) } serai_abi::coins::Call::burn { balance } => { RuntimeCall::Coins(coins::Call::burn { balance }) } serai_abi::coins::Call::burn_with_instruction { instruction } => { RuntimeCall::Coins(coins::Call::burn_with_instruction { instruction }) } }, Call::LiquidityTokens(lt) => match lt { serai_abi::liquidity_tokens::Call::transfer { to, balance } => { RuntimeCall::LiquidityTokens(coins::Call::transfer { to: to.into(), balance }) } serai_abi::liquidity_tokens::Call::burn { balance } => { RuntimeCall::LiquidityTokens(coins::Call::burn { balance }) } }, Call::Dex(dex) => match dex { serai_abi::dex::Call::add_liquidity { coin, coin_desired, sri_desired, coin_min, sri_min, mint_to, } => RuntimeCall::Dex(dex::Call::add_liquidity { coin, coin_desired, sri_desired, coin_min, sri_min, mint_to: mint_to.into(), }), serai_abi::dex::Call::remove_liquidity { coin, lp_token_burn, coin_min_receive, sri_min_receive, withdraw_to, } => RuntimeCall::Dex(dex::Call::remove_liquidity { coin, lp_token_burn, coin_min_receive, sri_min_receive, withdraw_to: withdraw_to.into(), }), serai_abi::dex::Call::swap_exact_tokens_for_tokens { path, amount_in, amount_out_min, send_to, } => RuntimeCall::Dex(dex::Call::swap_exact_tokens_for_tokens { path, amount_in, amount_out_min, send_to: send_to.into(), }), serai_abi::dex::Call::swap_tokens_for_exact_tokens { path, amount_out, amount_in_max, send_to, } => RuntimeCall::Dex(dex::Call::swap_tokens_for_exact_tokens { path, amount_out, amount_in_max, send_to: send_to.into(), }), }, Call::ValidatorSets(vs) => match vs { serai_abi::validator_sets::Call::set_keys { network, removed_participants, key_pair, signature, } => RuntimeCall::ValidatorSets(validator_sets::Call::set_keys { network, removed_participants: <_>::try_from( removed_participants.into_iter().map(PublicKey::from).collect::>(), ) .unwrap(), key_pair, signature, }), serai_abi::validator_sets::Call::report_slashes { network, slashes, signature } => { RuntimeCall::ValidatorSets(validator_sets::Call::report_slashes { network, slashes: <_>::try_from( slashes .into_iter() .map(|(addr, slash)| (PublicKey::from(addr), slash)) .collect::>(), ) .unwrap(), signature, }) } serai_abi::validator_sets::Call::allocate { network, amount } => { RuntimeCall::ValidatorSets(validator_sets::Call::allocate { network, amount }) } serai_abi::validator_sets::Call::deallocate { network, amount } => { RuntimeCall::ValidatorSets(validator_sets::Call::deallocate { network, amount }) } serai_abi::validator_sets::Call::claim_deallocation { network, session } => { RuntimeCall::ValidatorSets(validator_sets::Call::claim_deallocation { network, session }) } }, Call::GenesisLiquidity(gl) => match gl { serai_abi::genesis_liquidity::Call::remove_coin_liquidity { balance } => { RuntimeCall::GenesisLiquidity(genesis_liquidity::Call::remove_coin_liquidity { balance }) } serai_abi::genesis_liquidity::Call::oraclize_values { values, signature } => { RuntimeCall::GenesisLiquidity(genesis_liquidity::Call::oraclize_values { values, signature, }) } }, Call::InInstructions(ii) => match ii { serai_abi::in_instructions::Call::execute_batch { batch } => { RuntimeCall::InInstructions(in_instructions::Call::execute_batch { batch }) } }, Call::Signals(signals) => match signals { serai_abi::signals::Call::register_retirement_signal { in_favor_of } => { RuntimeCall::Signals(signals::Call::register_retirement_signal { in_favor_of }) } serai_abi::signals::Call::revoke_retirement_signal { retirement_signal_id } => { RuntimeCall::Signals(signals::Call::revoke_retirement_signal { retirement_signal_id }) } serai_abi::signals::Call::favor { signal_id, for_network } => { RuntimeCall::Signals(signals::Call::favor { signal_id, for_network }) } serai_abi::signals::Call::revoke_favor { signal_id, for_network } => { RuntimeCall::Signals(signals::Call::revoke_favor { signal_id, for_network }) } serai_abi::signals::Call::stand_against { signal_id, for_network } => { RuntimeCall::Signals(signals::Call::stand_against { signal_id, for_network }) } }, Call::Babe(babe) => match babe { serai_abi::babe::Call::report_equivocation(report) => { RuntimeCall::Babe(babe::Call::report_equivocation { // TODO: Find a better way to go from Proof<[u8; 32]> to Proof equivocation_proof: <_>::decode(&mut report.equivocation_proof.encode().as_slice()) .unwrap(), key_owner_proof: MembershipProof(report.key_owner_proof.into(), PhantomData), }) } serai_abi::babe::Call::report_equivocation_unsigned(report) => { RuntimeCall::Babe(babe::Call::report_equivocation_unsigned { // TODO: Find a better way to go from Proof<[u8; 32]> to Proof equivocation_proof: <_>::decode(&mut report.equivocation_proof.encode().as_slice()) .unwrap(), key_owner_proof: MembershipProof(report.key_owner_proof.into(), PhantomData), }) } }, Call::Grandpa(grandpa) => match grandpa { serai_abi::grandpa::Call::report_equivocation(report) => { RuntimeCall::Grandpa(grandpa::Call::report_equivocation { // TODO: Find a better way to go from Proof<[u8; 32]> to Proof equivocation_proof: <_>::decode(&mut report.equivocation_proof.encode().as_slice()) .unwrap(), key_owner_proof: MembershipProof(report.key_owner_proof.into(), PhantomData), }) } serai_abi::grandpa::Call::report_equivocation_unsigned(report) => { RuntimeCall::Grandpa(grandpa::Call::report_equivocation_unsigned { // TODO: Find a better way to go from Proof<[u8; 32]> to Proof equivocation_proof: <_>::decode(&mut report.equivocation_proof.encode().as_slice()) .unwrap(), key_owner_proof: MembershipProof(report.key_owner_proof.into(), PhantomData), }) } }, } } } impl TryInto for RuntimeCall { type Error = (); fn try_into(self) -> Result { Ok(match self { RuntimeCall::System(_) => Err(())?, RuntimeCall::Timestamp(timestamp::Call::set { now }) => { Call::Timestamp(serai_abi::timestamp::Call::set { now }) } RuntimeCall::Coins(call) => Call::Coins(match call { coins::Call::transfer { to, balance } => { serai_abi::coins::Call::transfer { to: to.into(), balance } } coins::Call::burn { balance } => serai_abi::coins::Call::burn { balance }, coins::Call::burn_with_instruction { instruction } => { serai_abi::coins::Call::burn_with_instruction { instruction } } }), RuntimeCall::LiquidityTokens(call) => Call::LiquidityTokens(match call { coins::Call::transfer { to, balance } => { serai_abi::liquidity_tokens::Call::transfer { to: to.into(), balance } } coins::Call::burn { balance } => serai_abi::liquidity_tokens::Call::burn { balance }, _ => Err(())?, }), RuntimeCall::Dex(call) => Call::Dex(match call { dex::Call::add_liquidity { coin, coin_desired, sri_desired, coin_min, sri_min, mint_to, } => serai_abi::dex::Call::add_liquidity { coin, coin_desired, sri_desired, coin_min, sri_min, mint_to: mint_to.into(), }, dex::Call::remove_liquidity { coin, lp_token_burn, coin_min_receive, sri_min_receive, withdraw_to, } => serai_abi::dex::Call::remove_liquidity { coin, lp_token_burn, coin_min_receive, sri_min_receive, withdraw_to: withdraw_to.into(), }, dex::Call::swap_exact_tokens_for_tokens { path, amount_in, amount_out_min, send_to } => { serai_abi::dex::Call::swap_exact_tokens_for_tokens { path, amount_in, amount_out_min, send_to: send_to.into(), } } dex::Call::swap_tokens_for_exact_tokens { path, amount_out, amount_in_max, send_to } => { serai_abi::dex::Call::swap_tokens_for_exact_tokens { path, amount_out, amount_in_max, send_to: send_to.into(), } } }), RuntimeCall::GenesisLiquidity(call) => Call::GenesisLiquidity(match call { genesis_liquidity::Call::remove_coin_liquidity { balance } => { serai_abi::genesis_liquidity::Call::remove_coin_liquidity { balance } } genesis_liquidity::Call::oraclize_values { values, signature } => { serai_abi::genesis_liquidity::Call::oraclize_values { values, signature } } }), RuntimeCall::ValidatorSets(call) => Call::ValidatorSets(match call { validator_sets::Call::set_keys { network, removed_participants, key_pair, signature } => { serai_abi::validator_sets::Call::set_keys { network, removed_participants: <_>::try_from( removed_participants.into_iter().map(SeraiAddress::from).collect::>(), ) .unwrap(), key_pair, signature, } } validator_sets::Call::report_slashes { network, slashes, signature } => { serai_abi::validator_sets::Call::report_slashes { network, slashes: <_>::try_from( slashes .into_iter() .map(|(addr, slash)| (SeraiAddress::from(addr), slash)) .collect::>(), ) .unwrap(), signature, } } validator_sets::Call::allocate { network, amount } => { serai_abi::validator_sets::Call::allocate { network, amount } } validator_sets::Call::deallocate { network, amount } => { serai_abi::validator_sets::Call::deallocate { network, amount } } validator_sets::Call::claim_deallocation { network, session } => { serai_abi::validator_sets::Call::claim_deallocation { network, session } } }), RuntimeCall::InInstructions(call) => Call::InInstructions(match call { in_instructions::Call::execute_batch { batch } => { serai_abi::in_instructions::Call::execute_batch { batch } } }), RuntimeCall::Signals(call) => Call::Signals(match call { signals::Call::register_retirement_signal { in_favor_of } => { serai_abi::signals::Call::register_retirement_signal { in_favor_of } } signals::Call::revoke_retirement_signal { retirement_signal_id } => { serai_abi::signals::Call::revoke_retirement_signal { retirement_signal_id } } signals::Call::favor { signal_id, for_network } => { serai_abi::signals::Call::favor { signal_id, for_network } } signals::Call::revoke_favor { signal_id, for_network } => { serai_abi::signals::Call::revoke_favor { signal_id, for_network } } signals::Call::stand_against { signal_id, for_network } => { serai_abi::signals::Call::stand_against { signal_id, for_network } } }), RuntimeCall::Babe(call) => Call::Babe(match call { babe::Call::report_equivocation { equivocation_proof, key_owner_proof } => { serai_abi::babe::Call::report_equivocation(serai_abi::babe::ReportEquivocation { // TODO: Find a better way to go from Proof to Proof<[u8; 32]> equivocation_proof: <_>::decode(&mut equivocation_proof.encode().as_slice()).unwrap(), key_owner_proof: key_owner_proof.0.into(), }) } babe::Call::report_equivocation_unsigned { equivocation_proof, key_owner_proof } => { serai_abi::babe::Call::report_equivocation_unsigned(serai_abi::babe::ReportEquivocation { // TODO: Find a better way to go from Proof to Proof<[u8; 32]> equivocation_proof: <_>::decode(&mut equivocation_proof.encode().as_slice()).unwrap(), key_owner_proof: key_owner_proof.0.into(), }) } _ => Err(())?, }), RuntimeCall::Grandpa(call) => Call::Grandpa(match call { grandpa::Call::report_equivocation { equivocation_proof, key_owner_proof } => { serai_abi::grandpa::Call::report_equivocation(serai_abi::grandpa::ReportEquivocation { // TODO: Find a better way to go from Proof to Proof<[u8; 32]> equivocation_proof: <_>::decode(&mut equivocation_proof.encode().as_slice()).unwrap(), key_owner_proof: key_owner_proof.0.into(), }) } grandpa::Call::report_equivocation_unsigned { equivocation_proof, key_owner_proof } => { serai_abi::grandpa::Call::report_equivocation_unsigned( serai_abi::grandpa::ReportEquivocation { // TODO: Find a better way to go from Proof to Proof<[u8; 32]> equivocation_proof: <_>::decode(&mut equivocation_proof.encode().as_slice()).unwrap(), key_owner_proof: key_owner_proof.0.into(), }, ) } _ => Err(())?, }), }) } } ================================================ FILE: substrate/runtime/src/lib.rs ================================================ #![allow(deprecated)] #![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(not(feature = "std"), no_std)] #![recursion_limit = "256"] #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); use core::marker::PhantomData; // Re-export all components pub use serai_primitives as primitives; pub use primitives::{BlockNumber, Header}; pub use frame_system as system; pub use frame_support as support; pub use pallet_timestamp as timestamp; pub use pallet_transaction_payment as transaction_payment; pub use coins_pallet as coins; pub use dex_pallet as dex; pub use validator_sets_pallet as validator_sets; pub use in_instructions_pallet as in_instructions; pub use signals_pallet as signals; pub use pallet_babe as babe; pub use pallet_grandpa as grandpa; pub use genesis_liquidity_pallet as genesis_liquidity; pub use emissions_pallet as emissions; pub use economic_security_pallet as economic_security; use sp_std::prelude::*; use sp_version::RuntimeVersion; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, KeyTypeId, traits::{Convert, BlakeTwo256, Block as BlockT}, transaction_validity::{TransactionSource, TransactionValidity}, BoundedVec, Perbill, ApplyExtrinsicResult, }; #[allow(unused_imports)] use primitives::{ NetworkId, PublicKey, AccountLookup, SubstrateAmount, Coin, EXTERNAL_NETWORKS, MEDIAN_PRICE_WINDOW_LENGTH, HOURS, DAYS, MINUTES, TARGET_BLOCK_TIME, BLOCK_SIZE, FAST_EPOCH_DURATION, }; use support::{ traits::{ConstU8, ConstU16, ConstU32, ConstU64, Contains}, weights::{ constants::{RocksDbWeight, WEIGHT_REF_TIME_PER_SECOND}, IdentityFee, Weight, }, parameter_types, construct_runtime, }; use validator_sets::MembershipProof; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; use babe::AuthorityId as BabeId; use grandpa::AuthorityId as GrandpaId; mod abi; /// Nonce of a transaction in the chain, for a given account. pub type Nonce = u32; /// A hash of some data used by the chain. pub type Hash = sp_core::H256; pub type SignedExtra = ( system::CheckNonZeroSender, system::CheckSpecVersion, system::CheckTxVersion, system::CheckGenesis, system::CheckEra, system::CheckNonce, system::CheckWeight, transaction_payment::ChargeTransactionPayment, ); pub type Transaction = serai_abi::tx::Transaction; pub type Block = generic::Block; pub type BlockId = generic::BlockId; pub mod opaque { use super::*; impl_opaque_keys! { pub struct SessionKeys { pub babe: Babe, pub grandpa: Grandpa, } } } #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("serai"), impl_name: create_runtime_str!("core"), spec_version: 1, impl_version: 1, authoring_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, system_version: 1, }; pub const PRIMARY_PROBABILITY: (u64, u64) = (1, 4); pub const BABE_GENESIS_EPOCH_CONFIG: sp_consensus_babe::BabeEpochConfiguration = sp_consensus_babe::BabeEpochConfiguration { c: PRIMARY_PROBABILITY, allowed_slots: sp_consensus_babe::AllowedSlots::PrimaryAndSecondaryPlainSlots, }; const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); parameter_types! { pub const BlockHashCount: BlockNumber = 2400; pub const Version: RuntimeVersion = VERSION; // 1 MB block size limit pub BlockLength: system::limits::BlockLength = system::limits::BlockLength::max_with_normal_ratio(BLOCK_SIZE, NORMAL_DISPATCH_RATIO); pub BlockWeights: system::limits::BlockWeights = system::limits::BlockWeights::with_sensible_defaults( Weight::from_parts(2u64 * WEIGHT_REF_TIME_PER_SECOND, u64::MAX), NORMAL_DISPATCH_RATIO, ); } pub struct CallFilter; impl Contains for CallFilter { fn contains(call: &RuntimeCall) -> bool { // If the call is defined in our ABI, it's allowed let call: Result = call.clone().try_into(); call.is_ok() } } impl system::Config for Runtime { type BaseCallFilter = CallFilter; type BlockWeights = BlockWeights; type BlockLength = BlockLength; type AccountId = PublicKey; type RuntimeCall = RuntimeCall; type Lookup = AccountLookup; type Hash = Hash; type Hashing = BlakeTwo256; type Nonce = Nonce; type Block = Block; type RuntimeOrigin = RuntimeOrigin; type RuntimeEvent = RuntimeEvent; type BlockHashCount = BlockHashCount; type DbWeight = RocksDbWeight; type Version = Version; type PalletInfo = PalletInfo; type OnNewAccount = (); type OnKilledAccount = (); type OnSetCode = (); type AccountData = (); type SystemWeightInfo = (); type MaxConsumers = support::traits::ConstU32<16>; type RuntimeTask = (); type ExtensionsWeightInfo = (); // TODO type SingleBlockMigrations = (); type MultiBlockMigrator = (); type PreInherents = (); type PostInherents = (); type PostTransactions = (); } impl timestamp::Config for Runtime { type Moment = u64; type OnTimestampSet = Babe; type MinimumPeriod = ConstU64<{ (TARGET_BLOCK_TIME * 1000) / 2 }>; type WeightInfo = (); } impl transaction_payment::Config for Runtime { type RuntimeEvent = RuntimeEvent; type OnChargeTransaction = Coins; type OperationalFeeMultiplier = ConstU8<5>; type WeightToFee = IdentityFee; type LengthToFee = IdentityFee; type FeeMultiplierUpdate = (); type WeightInfo = (); } impl coins::Config for Runtime { type AllowMint = ValidatorSets; } impl coins::Config for Runtime { type AllowMint = (); } impl dex::Config for Runtime { type LPFee = ConstU32<3>; // 0.3% type MintMinLiquidity = ConstU64<10000>; type MaxSwapPathLength = ConstU32<3>; // coin1 -> SRI -> coin2 type MedianPriceWindowLength = ConstU16<{ MEDIAN_PRICE_WINDOW_LENGTH }>; type WeightInfo = dex::weights::SubstrateWeight; } impl validator_sets::Config for Runtime { type ShouldEndSession = Babe; } pub struct IdentityValidatorIdOf; impl Convert> for IdentityValidatorIdOf { fn convert(key: PublicKey) -> Option { Some(key) } } impl signals::Config for Runtime { // 1 week #[allow(clippy::cast_possible_truncation)] type RetirementValidityDuration = ConstU32<{ (7 * 24 * 60 * 60) / (TARGET_BLOCK_TIME as u32) }>; // 2 weeks #[allow(clippy::cast_possible_truncation)] type RetirementLockInDuration = ConstU32<{ (2 * 7 * 24 * 60 * 60) / (TARGET_BLOCK_TIME as u32) }>; } impl in_instructions::Config for Runtime {} impl genesis_liquidity::Config for Runtime {} impl emissions::Config for Runtime {} impl economic_security::Config for Runtime {} // for validating equivocation evidences. // The following runtime construction doesn't actually implement the pallet as doing so is // unnecessary // TODO: Replace the requirement on Config for a requirement on FindAuthor directly impl pallet_authorship::Config for Runtime { type FindAuthor = ValidatorSets; type EventHandler = (); } // Maximum number of authorities per session. pub type MaxAuthorities = ConstU32<{ validator_sets::primitives::MAX_KEY_SHARES_PER_SET }>; /// Longevity of an offence report. pub type ReportLongevity = ::EpochDuration; impl babe::Config for Runtime { #[cfg(feature = "fast-epoch")] type EpochDuration = ConstU64<{ FAST_EPOCH_DURATION }>; #[cfg(not(feature = "fast-epoch"))] type EpochDuration = ConstU64<{ 4 * 7 * DAYS }>; type ExpectedBlockTime = ConstU64<{ TARGET_BLOCK_TIME * 1000 }>; type EpochChangeTrigger = babe::ExternalTrigger; type DisabledValidators = ValidatorSets; type WeightInfo = (); type MaxAuthorities = MaxAuthorities; type MaxNominators = ConstU32<1>; type KeyOwnerProof = MembershipProof; type EquivocationReportSystem = babe::EquivocationReportSystem; } impl grandpa::Config for Runtime { type RuntimeEvent = RuntimeEvent; type WeightInfo = (); type MaxAuthorities = MaxAuthorities; type MaxNominators = ConstU32<1>; type MaxSetIdSessionEntries = ConstU64<0>; type KeyOwnerProof = MembershipProof; type EquivocationReportSystem = grandpa::EquivocationReportSystem; } #[doc(hidden)] pub struct GetCurrentSessionForSubstrate; impl pallet_session::GetCurrentSessionForSubstrate for GetCurrentSessionForSubstrate { fn get() -> u32 { validator_sets::Pallet::::latest_decided_session(NetworkId::Serai).unwrap().0 - 1 } } impl pallet_session::Config for Runtime { type Session = GetCurrentSessionForSubstrate; } pub type Executive = frame_executive::Executive< Runtime, Block, system::ChainContext, Runtime, AllPalletsWithSystem, >; construct_runtime!( pub enum Runtime { System: system, Timestamp: timestamp, TransactionPayment: transaction_payment, Coins: coins, LiquidityTokens: coins::::{Pallet, Call, Storage, Event}, Dex: dex, ValidatorSets: validator_sets, GenesisLiquidity: genesis_liquidity, Emissions: emissions, EconomicSecurity: economic_security, InInstructions: in_instructions, Signals: signals, Babe: babe, Grandpa: grandpa, } ); #[cfg(feature = "runtime-benchmarks")] #[macro_use] extern crate frame_benchmarking; #[cfg(feature = "runtime-benchmarks")] mod benches { define_benchmarks!( [frame_benchmarking, BaselineBench::] [system, SystemBench::] [pallet_timestamp, Timestamp] [balances, Balances] [babe, Babe] [grandpa, Grandpa] ); } sp_api::decl_runtime_apis! { #[api_version(1)] pub trait SeraiRuntimeApi { fn validators(network_id: NetworkId) -> Vec; } #[api_version(1)] pub trait GenesisApi { fn build(genesis: RuntimeGenesisConfig); } } sp_api::impl_runtime_apis! { impl sp_api::Core for Runtime { fn version() -> RuntimeVersion { VERSION } fn execute_block(block: Block) { for tx in &block.extrinsics { if let Some(signer) = tx.signer() { let signer = PublicKey::from(signer.0); let mut info = frame_system::Account::::get(signer); if info.providers == 0 { info.providers = 1; frame_system::Account::::set(signer, info); } } } Executive::execute_block(block); } fn initialize_block(header: &Header) -> sp_runtime::ExtrinsicInclusionMode { Executive::initialize_block(header); sp_runtime::ExtrinsicInclusionMode::AllExtrinsics } } impl sp_block_builder::BlockBuilder for Runtime { fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { if let Some(signer) = extrinsic.signer() { let signer = PublicKey::from(signer.0); let mut info = frame_system::Account::::get(signer); if info.providers == 0 { info.providers = 1; frame_system::Account::::set(signer, info); } } Executive::apply_extrinsic(extrinsic) } fn finalize_block() -> Header { Executive::finalize_block() } fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { data.create_extrinsics() } fn check_inherents( block: Block, data: sp_inherents::InherentData, ) -> sp_inherents::CheckInherentsResult { data.check_extrinsics(&block) } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { fn validate_transaction( source: TransactionSource, tx: ::Extrinsic, block_hash: ::Hash, ) -> TransactionValidity { if let Some(signer) = tx.signer() { let signer = PublicKey::from(signer.0); let mut info = frame_system::Account::::get(signer); if info.providers == 0 { info.providers = 1; frame_system::Account::::set(signer, info); } } Executive::validate_transaction(source, tx, block_hash) } } impl sp_offchain::OffchainWorkerApi for Runtime { fn offchain_worker(header: &Header) { Executive::offchain_worker(header) } } impl sp_session::SessionKeys for Runtime { fn generate_session_keys(seed: Option>) -> Vec { opaque::SessionKeys::generate(seed) } fn decode_session_keys( encoded: Vec, ) -> Option, KeyTypeId)>> { opaque::SessionKeys::decode_into_raw_public_keys(&encoded) } } impl sp_consensus_babe::BabeApi for Runtime { fn configuration() -> sp_consensus_babe::BabeConfiguration { use support::traits::Get; let epoch_config = Babe::epoch_config().unwrap_or(BABE_GENESIS_EPOCH_CONFIG); sp_consensus_babe::BabeConfiguration { slot_duration: Babe::slot_duration(), epoch_length: ::EpochDuration::get(), c: epoch_config.c, authorities: Babe::authorities().to_vec(), randomness: Babe::randomness(), allowed_slots: epoch_config.allowed_slots, } } fn current_epoch_start() -> sp_consensus_babe::Slot { Babe::current_epoch_start() } fn current_epoch() -> sp_consensus_babe::Epoch { Babe::current_epoch() } fn next_epoch() -> sp_consensus_babe::Epoch { Babe::next_epoch() } // This refers to a key being 'owned' by an authority in a system with multiple keys per // validator // Since we do not have such an infrastructure, we do not need this fn generate_key_ownership_proof( _slot: sp_consensus_babe::Slot, _authority_id: BabeId, ) -> Option { Some(sp_consensus_babe::OpaqueKeyOwnershipProof::new(vec![])) } fn submit_report_equivocation_unsigned_extrinsic( equivocation_proof: sp_consensus_babe::EquivocationProof

, _: sp_consensus_babe::OpaqueKeyOwnershipProof, ) -> Option<()> { let proof = MembershipProof(equivocation_proof.offender.clone().into(), PhantomData); Babe::submit_unsigned_equivocation_report(equivocation_proof, proof) } } impl sp_consensus_grandpa::GrandpaApi for Runtime { fn grandpa_authorities() -> sp_consensus_grandpa::AuthorityList { Grandpa::grandpa_authorities() } fn current_set_id() -> sp_consensus_grandpa::SetId { Grandpa::current_set_id() } fn generate_key_ownership_proof( _set_id: sp_consensus_grandpa::SetId, _authority_id: GrandpaId, ) -> Option { Some(sp_consensus_grandpa::OpaqueKeyOwnershipProof::new(vec![])) } fn submit_report_equivocation_unsigned_extrinsic( equivocation_proof: sp_consensus_grandpa::EquivocationProof<::Hash, u64>, _: sp_consensus_grandpa::OpaqueKeyOwnershipProof, ) -> Option<()> { let proof = MembershipProof(equivocation_proof.offender().clone().into(), PhantomData); Grandpa::submit_unsigned_equivocation_report(equivocation_proof, proof) } } impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { fn account_nonce(account: PublicKey) -> Nonce { System::account_nonce(account) } } impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi< Block, SubstrateAmount > for Runtime { fn query_info( uxt: ::Extrinsic, len: u32, ) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo { TransactionPayment::query_info(uxt, len) } fn query_fee_details( uxt: ::Extrinsic, len: u32, ) -> transaction_payment::FeeDetails { TransactionPayment::query_fee_details(uxt, len) } fn query_weight_to_fee(weight: Weight) -> SubstrateAmount { TransactionPayment::weight_to_fee(weight) } fn query_length_to_fee(length: u32) -> SubstrateAmount { TransactionPayment::length_to_fee(length) } } impl sp_authority_discovery::AuthorityDiscoveryApi for Runtime { fn authorities() -> Vec { // Converts to `[u8; 32]` so it can be hashed let serai_validators = Babe::authorities() .into_iter() .map(|(id, _)| id.into_inner().0) .collect::>(); let mut all = serai_validators; for network in EXTERNAL_NETWORKS { // Returning the latest-decided, not latest and active, means the active set // may fail to peer find if there isn't sufficient overlap. If a large amount reboot, // forcing some validators to successfully peer find in order for the threshold to become // online again, this may cause a liveness failure. // // This is assumed not to matter in real life, yet an interesting note. let participants = ValidatorSets::participants_for_latest_decided_set(NetworkId::from(network)) .map_or(vec![], BoundedVec::into_inner); for (participant, _) in participants { all.insert(participant.0); } } all.into_iter().map(|id| AuthorityDiscoveryId::from(PublicKey::from_raw(id))).collect() } } impl crate::SeraiRuntimeApi for Runtime { fn validators(network_id: NetworkId) -> Vec { if network_id == NetworkId::Serai { Babe::authorities() .into_iter() .map(|(id, _)| id.into_inner()) .collect() } else { ValidatorSets::participants_for_latest_decided_set(network_id) .map_or( vec![], |vec| vec.into_inner().into_iter().map(|(validator, _)| validator).collect() ) } } } impl crate::GenesisApi for Runtime { fn build(genesis: RuntimeGenesisConfig) { ::build(&genesis) } } impl dex::DexApi for Runtime { fn quote_price_exact_tokens_for_tokens( coin1: Coin, coin2: Coin, amount: SubstrateAmount, include_fee: bool ) -> Option { Dex::quote_price_exact_tokens_for_tokens(coin1, coin2, amount, include_fee) } fn quote_price_tokens_for_exact_tokens( coin1: Coin, coin2: Coin, amount: SubstrateAmount, include_fee: bool ) -> Option { Dex::quote_price_tokens_for_exact_tokens(coin1, coin2, amount, include_fee) } fn get_reserves(coin1: Coin, coin2: Coin) -> Option<(SubstrateAmount, SubstrateAmount)> { Dex::get_reserves(&coin1, &coin2).ok() } } } impl frame_system::offchain::CreateTransactionBase for Runtime where RuntimeCall: From, { type Extrinsic = ::Extrinsic; type RuntimeCall = RuntimeCall; } impl frame_system::offchain::CreateBare for Runtime where RuntimeCall: From, { fn create_bare(call: RuntimeCall) -> ::Extrinsic { <::Extrinsic as frame_support::traits::InherentBuilder>::new_inherent(call) } } ================================================ FILE: substrate/signals/pallet/Cargo.toml ================================================ [package] name = "serai-signals-pallet" version = "0.1.0" description = "Signals pallet" license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/substrate/signals/pallet" authors = ["Luke Parker "] edition = "2021" rust-version = "1.74" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [package.metadata.cargo-machete] ignored = ["scale"] [lints] workspace = true [dependencies] scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive"] } sp-core = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-io = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } frame-system = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } frame-support = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } serai-primitives = { path = "../../primitives", default-features = false } serai-signals-primitives = { path = "../primitives", default-features = false } validator-sets-pallet = { package = "serai-validator-sets-pallet", path = "../../validator-sets/pallet", default-features = false } in-instructions-pallet = { package = "serai-in-instructions-pallet", path = "../../in-instructions/pallet", default-features = false } [features] std = [ "scale/std", "sp-core/std", "sp-io/std", "frame-system/std", "frame-support/std", "serai-primitives/std", "serai-signals-primitives/std", "validator-sets-pallet/std", "in-instructions-pallet/std", ] runtime-benchmarks = [ "frame-system/runtime-benchmarks", "frame-support/runtime-benchmarks", ] # TODO try-runtime = [] default = ["std"] ================================================ FILE: substrate/signals/pallet/LICENSE ================================================ AGPL-3.0-only license Copyright (c) 2023 Luke Parker This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License Version 3 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see . ================================================ FILE: substrate/signals/pallet/src/lib.rs ================================================ #![cfg_attr(not(feature = "std"), no_std)] #[allow( deprecated, unreachable_patterns, clippy::let_unit_value, clippy::cast_possible_truncation, clippy::ignored_unit_patterns )] // TODO #[frame_support::pallet] pub mod pallet { use sp_core::sr25519::Public; use sp_io::hashing::blake2_256; use frame_system::pallet_prelude::*; // False positive #[allow(unused)] use frame_support::{pallet_prelude::*, sp_runtime}; use serai_primitives::*; use serai_signals_primitives::SignalId; use validator_sets_pallet::{primitives::ValidatorSet, Config as VsConfig, Pallet as VsPallet}; use in_instructions_pallet::{Config as IiConfig, Pallet as InInstructions}; #[pallet::config] pub trait Config: frame_system::Config + VsConfig + IiConfig { type RetirementValidityDuration: Get; type RetirementLockInDuration: Get; } #[pallet::genesis_config] #[derive(Debug)] pub struct GenesisConfig { _config: PhantomData, } impl Default for GenesisConfig { fn default() -> Self { GenesisConfig { _config: PhantomData } } } #[pallet::genesis_build] impl BuildGenesisConfig for GenesisConfig { fn build(&self) { // Assert the validity duration is less than the lock-in duration so lock-in periods // automatically invalidate other retirement signals assert!(T::RetirementValidityDuration::get() < T::RetirementLockInDuration::get()); } } #[pallet::pallet] pub struct Pallet(PhantomData); #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen)] pub struct RegisteredRetirementSignal { in_favor_of: [u8; 32], registrant: T::AccountId, registered_at: BlockNumberFor, } impl RegisteredRetirementSignal { fn id(&self) -> [u8; 32] { let mut preimage = b"Signal".to_vec(); preimage.extend(&self.encode()); blake2_256(&preimage) } } #[pallet::storage] type RegisteredRetirementSignals = StorageMap<_, Blake2_128Concat, [u8; 32], RegisteredRetirementSignal, OptionQuery>; #[pallet::storage] pub type Favors = StorageDoubleMap< _, Blake2_128Concat, (SignalId, NetworkId), Blake2_128Concat, T::AccountId, (), OptionQuery, >; #[pallet::storage] pub type SetsInFavor = StorageMap<_, Blake2_128Concat, (SignalId, ValidatorSet), (), OptionQuery>; #[pallet::storage] pub type LockedInRetirement = StorageValue<_, ([u8; 32], BlockNumberFor), OptionQuery>; #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { RetirementSignalRegistered { signal_id: [u8; 32], in_favor_of: [u8; 32], registrant: T::AccountId, }, RetirementSignalRevoked { signal_id: [u8; 32], }, SignalFavored { signal_id: SignalId, by: T::AccountId, for_network: NetworkId, }, SetInFavor { signal_id: SignalId, set: ValidatorSet, }, RetirementSignalLockedIn { signal_id: [u8; 32], }, SetNoLongerInFavor { signal_id: SignalId, set: ValidatorSet, }, FavorRevoked { signal_id: SignalId, by: T::AccountId, for_network: NetworkId, }, AgainstSignal { signal_id: SignalId, who: T::AccountId, for_network: NetworkId, }, } #[pallet::error] pub enum Error { RetirementSignalLockedIn, RetirementSignalAlreadyRegistered, NotRetirementSignalRegistrant, NonExistentRetirementSignal, ExpiredRetirementSignal, NotValidator, RevokingNonExistentFavor, } // 80% threshold // TODO: Use 34% for halting a set (not 80%) const REQUIREMENT_NUMERATOR: u64 = 4; const REQUIREMENT_DIVISOR: u64 = 5; impl Pallet { // Returns true if this network's current set is in favor of the signal. // // Must only be called for networks which have a set decided. fn tally_for_network(signal_id: SignalId, network: NetworkId) -> bool { let this_network_session = VsPallet::::latest_decided_session(network).unwrap(); let this_set = ValidatorSet { network, session: this_network_session }; // This is a bounded O(n) (which is still acceptable) due to the infeasibility of caching // here // TODO: Make caching feasible? Do a first-pass with cache then actual pass before // execution? let mut iter = Favors::::iter_prefix_values((signal_id, network)); let mut needed_favor = (VsPallet::::total_allocated_stake(network).unwrap().0 * REQUIREMENT_NUMERATOR) .div_ceil(REQUIREMENT_DIVISOR); while iter.next().is_some() && (needed_favor != 0) { let item_key = iter.last_raw_key(); // `.len() - 32` is safe because AccountId is bound to being Public, which is 32 bytes let account = T::AccountId::decode(&mut &item_key[(item_key.len() - 32) ..]).unwrap(); if VsPallet::::in_latest_decided_set(network, account) { // This call uses the current allocation, not the allocation at the time of set // decision // This is deemed safe due to the validator-set pallet's deallocation scheduling // unwrap is safe due to being in the latest decided set needed_favor = needed_favor.saturating_sub(VsPallet::::allocation((network, account)).unwrap().0); } } if needed_favor == 0 { // Set the set as in favor until someone triggers a re-tally // // Since a re-tally is an extra step we can't assume will occur, this effectively means a // network in favor across any point in its Session is in favor for its entire Session // While a malicious actor could increase their stake, favor a signal, then deallocate, // this is largely prevented by deallocation scheduling // // At any given point, only just under 50% of a set can be immediately deallocated // (if each validator has just under two key shares, they can deallocate the entire amount // above a single key share) // // This means that if a signal has a 67% adoption threshold, and someone executes this // attack, they still have a majority of the allocated stake (though less of a majority // than desired) // // With the 80% threshold, removing 39.9% creates a 40.1% to 20% ratio, which is still // the BFT threshold of 67% if !SetsInFavor::::contains_key((signal_id, this_set)) { SetsInFavor::::set((signal_id, this_set), Some(())); Self::deposit_event(Event::SetInFavor { signal_id, set: this_set }); } true } else { if SetsInFavor::::contains_key((signal_id, this_set)) { // This should no longer be under the current tally SetsInFavor::::remove((signal_id, this_set)); Self::deposit_event(Event::SetNoLongerInFavor { signal_id, set: this_set }); } false } } fn tally_for_all_networks(signal_id: SignalId) -> bool { let mut total_in_favor_stake = 0; let mut total_allocated_stake = 0; for network in serai_primitives::NETWORKS { let Some(latest_decided_session) = VsPallet::::latest_decided_session(network) else { continue; }; // If it has a session, it should have a total allocated stake value let network_stake = VsPallet::::total_allocated_stake(network).unwrap(); if SetsInFavor::::contains_key(( signal_id, ValidatorSet { network, session: latest_decided_session }, )) { total_in_favor_stake += network_stake.0; } total_allocated_stake += network_stake.0; } total_in_favor_stake >= (total_allocated_stake * REQUIREMENT_NUMERATOR).div_ceil(REQUIREMENT_DIVISOR) } fn revoke_favor_internal( account: T::AccountId, signal_id: SignalId, for_network: NetworkId, ) -> DispatchResult { if !Favors::::contains_key((signal_id, for_network), account) { Err::<(), _>(Error::::RevokingNonExistentFavor)?; } Favors::::remove((signal_id, for_network), account); Self::deposit_event(Event::::FavorRevoked { signal_id, by: account, for_network }); // tally_for_network assumes the network is active, which is implied by having prior set a // favor for it // Technically, this tally may make the network in favor and justify re-tallying for all // networks // Its assumed not to Self::tally_for_network(signal_id, for_network); Ok(()) } } #[pallet::call] impl Pallet { /// Register a retirement signal, declaring the consensus protocol this signal is in favor of. /// /// Retirement signals are registered so that the proposer, presumably a developer, can revoke /// the signal if there's a fault discovered. #[pallet::call_index(0)] #[pallet::weight(0)] // TODO pub fn register_retirement_signal( origin: OriginFor, in_favor_of: [u8; 32], ) -> DispatchResult { // Don't allow retirement signals to be registered once a retirement has been locked in if LockedInRetirement::::exists() { Err::<(), _>(Error::::RetirementSignalLockedIn)?; } let account = ensure_signed(origin)?; // Bind the signal ID to the proposer // This prevents a malicious actor from frontrunning a proposal, causing them to be the // registrant, just to cancel it later let signal = RegisteredRetirementSignal { in_favor_of, registrant: account, registered_at: frame_system::Pallet::::block_number(), }; let signal_id = signal.id(); if RegisteredRetirementSignals::::get(signal_id).is_some() { Err::<(), _>(Error::::RetirementSignalAlreadyRegistered)?; } Self::deposit_event(Event::::RetirementSignalRegistered { signal_id, in_favor_of, registrant: account, }); RegisteredRetirementSignals::::set(signal_id, Some(signal)); Ok(()) } #[pallet::call_index(1)] #[pallet::weight(0)] // TODO pub fn revoke_retirement_signal( origin: OriginFor, retirement_signal_id: [u8; 32], ) -> DispatchResult { let account = ensure_signed(origin)?; let Some(registered_signal) = RegisteredRetirementSignals::::get(retirement_signal_id) else { return Err::<(), _>(Error::::NonExistentRetirementSignal.into()); }; if account != registered_signal.registrant { Err::<(), _>(Error::::NotRetirementSignalRegistrant)?; } RegisteredRetirementSignals::::remove(retirement_signal_id); // If this signal was locked in, remove it // This lets a post-lock-in discovered fault be prevented from going live without // intervention by all validators if LockedInRetirement::::get().map(|(signal_id, _block_number)| signal_id) == Some(retirement_signal_id) { LockedInRetirement::::kill(); } Self::deposit_event(Event::::RetirementSignalRevoked { signal_id: retirement_signal_id }); Ok(()) } #[pallet::call_index(2)] #[pallet::weight(0)] // TODO pub fn favor( origin: OriginFor, signal_id: SignalId, for_network: NetworkId, ) -> DispatchResult { let account = ensure_signed(origin)?; // If this is a retirement signal, perform the relevant checks if let SignalId::Retirement(signal_id) = signal_id { // Make sure a retirement hasn't already been locked in if LockedInRetirement::::exists() { Err::<(), _>(Error::::RetirementSignalLockedIn)?; } // Make sure this is a registered retirement // We don't have to do this for a `Halt` signal as `Halt` doesn't have the registration // process let Some(registered_signal) = RegisteredRetirementSignals::::get(signal_id) else { return Err::<(), _>(Error::::NonExistentRetirementSignal.into()); }; // Check the signal isn't out of date // This isn't truly necessary since we only track votes from the most recent validator // sets, ensuring modern relevancy // The reason to still have it is because locking in a dated runtime may cause a corrupt // blockchain and lead to a failure in system integrity // `Halt`, which doesn't have this check, at worst causes temporary downtime if (registered_signal.registered_at + T::RetirementValidityDuration::get().into()) < frame_system::Pallet::::block_number() { Err::<(), _>(Error::::ExpiredRetirementSignal)?; } } // Check the signer is a validator // Technically, in the case of Serai, this will check they're planned to be in the next set, // not that they are in the current set // This is a practical requirement due to the lack of tracking historical allocations, and // fine for the purposes here if !VsPallet::::in_latest_decided_set(for_network, account) { Err::<(), _>(Error::::NotValidator)?; } // Set them as in-favor // Doesn't error if they already voted in order to let any validator trigger a re-tally if !Favors::::contains_key((signal_id, for_network), account) { Favors::::set((signal_id, for_network), account, Some(())); Self::deposit_event(Event::SignalFavored { signal_id, by: account, for_network }); } // Check if the network is in favor // tally_for_network expects the network to be active, which is implied by being in the // latest decided set let network_in_favor = Self::tally_for_network(signal_id, for_network); // If this network is in favor, check if enough networks are // We could optimize this by only running the following code when the network is *newly* in // favor // Re-running the following code ensures that if networks' allocated stakes change relative // to each other, any new votes will cause a re-tally if network_in_favor { // If enough are, lock in the signal if Self::tally_for_all_networks(signal_id) { match signal_id { SignalId::Retirement(signal_id) => { LockedInRetirement::::set(Some(( signal_id, frame_system::Pallet::::block_number() + T::RetirementLockInDuration::get().into(), ))); Self::deposit_event(Event::RetirementSignalLockedIn { signal_id }); } SignalId::Halt(network) => { InInstructions::::halt(network)?; } } } } Ok(()) } /// Revoke favor into an abstaining position. #[pallet::call_index(3)] #[pallet::weight(0)] // TODO pub fn revoke_favor( origin: OriginFor, signal_id: SignalId, for_network: NetworkId, ) -> DispatchResult { if matches!(&signal_id, SignalId::Retirement(_)) && LockedInRetirement::::exists() { Err::<(), _>(Error::::RetirementSignalLockedIn)?; } // Doesn't check the signal exists due to later checking the favor exists // While the signal may have been revoked, making this pointless, it's not worth the storage // read on every call to check // Since revoke will re-tally, this does technically mean a network will become in-favor of a // revoked signal. Since revoke won't re-tally for all networks/lock-in, this is also fine Self::revoke_favor_internal(ensure_signed(origin)?, signal_id, for_network) } /// Emit an event standing against the signal. /// /// If the origin is currently in favor of the signal, their favor will be revoked. #[pallet::call_index(4)] #[pallet::weight(0)] // TODO pub fn stand_against( origin: OriginFor, signal_id: SignalId, for_network: NetworkId, ) -> DispatchResult { if LockedInRetirement::::exists() { Err::<(), _>(Error::::RetirementSignalLockedIn)?; } let account = ensure_signed(origin)?; // If currently in favor, revoke the favor if Favors::::contains_key((signal_id, for_network), account) { Self::revoke_favor_internal(account, signal_id, for_network)?; } else { // Check this Signal exists (which would've been implied by Favors for it existing) if let SignalId::Retirement(signal_id) = signal_id { if RegisteredRetirementSignals::::get(signal_id).is_none() { Err::<(), _>(Error::::NonExistentRetirementSignal)?; } } } // Emit an event that we're against the signal // No actual effects happen besides this Self::deposit_event(Event::::AgainstSignal { signal_id, who: account, for_network }); Ok(()) } } #[pallet::hooks] impl Hooks> for Pallet { fn on_initialize(current_number: BlockNumberFor) -> Weight { // If this is the block at which a locked-in signal has been set for long enough, panic // This will prevent this block from executing and halt the chain if let Some((signal, block_number)) = LockedInRetirement::::get() { if block_number == current_number { panic!( "locked-in signal {} has been set for too long", sp_core::hexdisplay::HexDisplay::from(&signal), ); } } Weight::zero() // TODO } } } pub use pallet::*; ================================================ FILE: substrate/signals/primitives/Cargo.toml ================================================ [package] name = "serai-signals-primitives" version = "0.1.0" description = "Signals primitives" license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/substrate/signals/primitives" authors = ["Luke Parker "] edition = "2021" rust-version = "1.74" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] zeroize = { version = "^1.5", features = ["derive"], optional = true } scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive"] } borsh = { version = "1", default-features = false, features = ["derive", "de_strict_order"], optional = true } serde = { version = "1", default-features = false, features = ["derive", "alloc"], optional = true } serai-primitives = { path = "../../primitives", version = "0.1", default-features = false } [features] std = [ "zeroize", "scale/std", "borsh?/std", "serde?/std", "serai-primitives/std", ] borsh = ["dep:borsh"] serde = ["dep:serde"] default = ["std"] ================================================ FILE: substrate/signals/primitives/LICENSE ================================================ MIT License Copyright (c) 2023 Luke Parker Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: substrate/signals/primitives/src/lib.rs ================================================ #![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(not(feature = "std"), no_std)] #![expect(clippy::cast_possible_truncation)] use scale::{Encode, Decode, DecodeWithMemTracking, MaxEncodedLen}; use serai_primitives::ExternalNetworkId; #[derive( Clone, Copy, PartialEq, Eq, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen, )] #[cfg_attr(feature = "std", derive(zeroize::Zeroize))] #[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize, borsh::BorshDeserialize))] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub enum SignalId { Retirement([u8; 32]), Halt(ExternalNetworkId), } ================================================ FILE: substrate/validator-sets/pallet/Cargo.toml ================================================ [package] name = "serai-validator-sets-pallet" version = "0.1.0" description = "Validator sets pallet" license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/substrate/validator-sets/pallet" authors = ["Luke Parker "] edition = "2021" rust-version = "1.74" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [package.metadata.cargo-machete] ignored = ["scale"] [lints] workspace = true [dependencies] hashbrown = { version = "0.14", default-features = false, features = ["ahash", "inline-more"] } scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive"] } sp-core = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-io = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-std = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-application-crypto = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-runtime = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-session = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-staking = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } frame-system = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } frame-support = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } pallet-session = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } pallet-babe = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } pallet-grandpa = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } serai-primitives = { path = "../../primitives", default-features = false } validator-sets-primitives = { package = "serai-validator-sets-primitives", path = "../primitives", default-features = false } coins-pallet = { package = "serai-coins-pallet", path = "../../coins/pallet", default-features = false } dex-pallet = { package = "serai-dex-pallet", path = "../../dex/pallet", default-features = false } [features] std = [ "scale/std", "sp-core/std", "sp-io/std", "sp-std/std", "sp-application-crypto/std", "sp-session/std", "sp-runtime/std", "sp-staking/std", "frame-system/std", "frame-support/std", "pallet-session/std", "pallet-babe/std", "pallet-grandpa/std", "serai-primitives/std", "validator-sets-primitives/std", "coins-pallet/std", "dex-pallet/std", ] # TODO try-runtime = [] runtime-benchmarks = [ "frame-system/runtime-benchmarks", "frame-support/runtime-benchmarks", ] default = ["std"] ================================================ FILE: substrate/validator-sets/pallet/LICENSE ================================================ AGPL-3.0-only license Copyright (c) 2022-2023 Luke Parker This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License Version 3 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see . ================================================ FILE: substrate/validator-sets/pallet/src/lib.rs ================================================ #![cfg_attr(not(feature = "std"), no_std)] use core::marker::PhantomData; use scale::{Encode, Decode, DecodeWithMemTracking}; use sp_std::{vec, vec::Vec}; use sp_core::sr25519::{Public, Signature}; use sp_application_crypto::RuntimePublic; use sp_session::{GetSessionNumber, GetValidatorCount}; use pallet_session::ShouldEndSession; use sp_runtime::{KeyTypeId, ConsensusEngineId, traits::IsMember}; use sp_staking::offence::{ReportOffence, Offence, OffenceError}; use frame_system::{pallet_prelude::*, RawOrigin}; use frame_support::{ pallet_prelude::*, sp_runtime::SaturatedConversion, traits::{DisabledValidators, KeyOwnerProofSystem, FindAuthor, OneSessionHandler}, BoundedVec, WeakBoundedVec, StoragePrefixedMap, }; use serai_primitives::*; pub use validator_sets_primitives as primitives; use primitives::*; use coins_pallet::{Pallet as Coins, AllowMint}; use dex_pallet::Pallet as Dex; use pallet_babe::{ Pallet as Babe, AuthorityId as BabeAuthorityId, EquivocationOffence as BabeEquivocationOffence, }; use pallet_grandpa::{ Pallet as Grandpa, AuthorityId as GrandpaAuthorityId, EquivocationOffence as GrandpaEquivocationOffence, }; #[derive(Debug, Encode, Decode, DecodeWithMemTracking, PartialEq, Eq, Clone)] pub struct MembershipProof(pub Public, pub PhantomData); impl GetSessionNumber for MembershipProof { fn session(&self) -> u32 { let current = Pallet::::session(NetworkId::Serai).unwrap().0; if Babe::::is_member(&BabeAuthorityId::from(self.0)) { current } else { // if it isn't in the current session, it should have been in the previous one. current - 1 } } } impl GetValidatorCount for MembershipProof { // We only implement and this interface to satisfy trait requirements // Although this might return the wrong count if the offender was in the previous set, we don't // rely on it and Substrate only relies on it to offer economic calculations we also don't rely // on fn validator_count(&self) -> u32 { u32::try_from(Babe::::authorities().len()).unwrap() } } #[allow( deprecated, unreachable_patterns, clippy::let_unit_value, clippy::cast_possible_truncation, clippy::ignored_unit_patterns )] // TODO #[frame_support::pallet] pub mod pallet { use super::*; #[pallet::config] pub trait Config: frame_system::Config + coins_pallet::Config + dex_pallet::Config + pallet_session::Config + pallet_babe::Config + pallet_grandpa::Config { type ShouldEndSession: ShouldEndSession>; } #[pallet::genesis_config] #[derive(Clone, Debug)] pub struct GenesisConfig { /// Networks to spawn Serai with, and the stake requirement per key share. /// /// Every participant at genesis will automatically be assumed to have this much stake. /// This stake cannot be withdrawn however as there's no actual stake behind it. pub networks: Vec<(NetworkId, Amount)>, /// List of participants to place in the initial validator sets. pub participants: Vec, } impl Default for GenesisConfig { fn default() -> Self { GenesisConfig { networks: Default::default(), participants: Default::default() } } } #[pallet::pallet] pub struct Pallet(PhantomData); /// The current session for a network. // Uses Identity for the lookup to avoid a hash of a severely limited fixed key-space. #[pallet::storage] #[pallet::getter(fn session)] pub type CurrentSession = StorageMap<_, Identity, NetworkId, Session, OptionQuery>; impl Pallet { pub fn latest_decided_session(network: NetworkId) -> Option { let session = Self::session(network); // we already decided about the next session for serai. if network == NetworkId::Serai { return session.map(|s| Session(s.0 + 1)); } session } } /// The allocation required per key share. // Uses Identity for the lookup to avoid a hash of a severely limited fixed key-space. #[pallet::storage] #[pallet::getter(fn allocation_per_key_share)] pub type AllocationPerKeyShare = StorageMap<_, Identity, NetworkId, Amount, OptionQuery>; /// The validators selected to be in-set (and their key shares), regardless of if removed. /// /// This method allows iterating over all validators and their stake. #[pallet::storage] #[pallet::getter(fn participants_for_latest_decided_set)] pub(crate) type Participants = StorageMap< _, Identity, NetworkId, BoundedVec<(Public, u64), ConstU32<{ MAX_KEY_SHARES_PER_SET }>>, OptionQuery, >; /// The validators selected to be in-set, regardless of if removed. /// /// This method allows quickly checking for presence in-set and looking up a validator's key /// shares. // Uses Identity for NetworkId to avoid a hash of a severely limited fixed key-space. #[pallet::storage] pub(crate) type InSet = StorageDoubleMap<_, Identity, NetworkId, Blake2_128Concat, Public, u64, OptionQuery>; impl Pallet { // This exists as InSet, for Serai, is the validators set for the next session, *not* the // current set's validators #[inline] fn in_active_serai_set(account: Public) -> bool { // TODO: is_member is internally O(n). Update Babe to use an O(1) storage lookup? Babe::::is_member(&BabeAuthorityId::from(account)) } /// Returns true if the account is included in an active set. /// /// This will still include participants which were removed from the DKG. pub fn in_active_set(network: NetworkId, account: Public) -> bool { if network == NetworkId::Serai { Self::in_active_serai_set(account) } else { InSet::::contains_key(network, account) } } /// Returns true if the account has been definitively included in an active or upcoming set. /// /// This will still include participants which were removed from the DKG. pub fn in_set(network: NetworkId, account: Public) -> bool { if InSet::::contains_key(network, account) { return true; } if network == NetworkId::Serai { return Self::in_active_serai_set(account); } false } /// Returns true if the account is present in the latest decided set. /// /// This is useful when working with `allocation` and `total_allocated_stake`, which return the /// latest information. pub fn in_latest_decided_set(network: NetworkId, account: Public) -> bool { InSet::::contains_key(network, account) } } /// The total stake allocated to this network by the active set of validators. #[pallet::storage] #[pallet::getter(fn total_allocated_stake)] pub type TotalAllocatedStake = StorageMap<_, Identity, NetworkId, Amount, OptionQuery>; /// The current amount allocated to a validator set by a validator. #[pallet::storage] #[pallet::getter(fn allocation)] pub type Allocations = StorageMap<_, Blake2_128Concat, (NetworkId, Public), Amount, OptionQuery>; /// A sorted view of the current allocations premised on the underlying DB itself being sorted. /* This uses Identity so we can take advantage of the DB's lexicographic ordering to iterate over the key space from highest-to-lowest allocated. This does remove the protection using a hash algorithm here offers against spam attacks (by flooding the DB with layers, increasing lookup time and merkle proof sizes, not that we use merkle proofs as Polkadot does). Since amounts are represented with just 8 bytes, only 16 nibbles are presents. This caps the potential depth caused by spam at 16 layers (as the underlying DB operates on nibbles). While there is an entire 32-byte public key after this, a Blake hash of the key is inserted after the amount to prevent the key from also being used to cause layer spam. There's also a minimum stake requirement, which further reduces the potential for spam. */ #[pallet::storage] type SortedAllocations = StorageMap<_, Identity, (NetworkId, [u8; 8], [u8; 16], Public), (), OptionQuery>; impl Pallet { #[inline] fn sorted_allocation_key( network: NetworkId, key: Public, amount: Amount, ) -> (NetworkId, [u8; 8], [u8; 16], Public) { let amount = reverse_lexicographic_order(amount.0.to_be_bytes()); let hash = sp_io::hashing::blake2_128(&(network, amount, key).encode()); (network, amount, hash, key) } fn recover_amount_from_sorted_allocation_key(key: &[u8]) -> Amount { let distance_from_end = 8 + 16 + 32; let start_pos = key.len() - distance_from_end; let mut raw: [u8; 8] = key[start_pos .. (start_pos + 8)].try_into().unwrap(); for byte in &mut raw { *byte = !*byte; } Amount(u64::from_be_bytes(raw)) } fn recover_key_from_sorted_allocation_key(key: &[u8]) -> Public { let key: [u8; 32] = key[(key.len() - 32) ..].try_into().unwrap(); key.into() } // Returns if this validator already had an allocation set. fn set_allocation(network: NetworkId, key: Public, amount: Amount) -> bool { let prior = Allocations::::take((network, key)); if let Some(amount) = prior { SortedAllocations::::remove(Self::sorted_allocation_key(network, key, amount)); } if amount.0 != 0 { Allocations::::set((network, key), Some(amount)); SortedAllocations::::set(Self::sorted_allocation_key(network, key, amount), Some(())); } prior.is_some() } } // Doesn't use PrefixIterator as we need to yield the keys *and* values // PrefixIterator only yields the values struct SortedAllocationsIter { _t: PhantomData, prefix: Vec, last: Vec, allocation_per_key_share: Amount, } impl SortedAllocationsIter { fn new(network: NetworkId) -> Self { let mut prefix = SortedAllocations::::final_prefix().to_vec(); prefix.extend(&network.encode()); Self { _t: PhantomData, prefix: prefix.clone(), last: prefix, allocation_per_key_share: Pallet::::allocation_per_key_share(network).expect( "SortedAllocationsIter iterating over a network without a set allocation per key share", ), } } } impl Iterator for SortedAllocationsIter { type Item = (Public, Amount); fn next(&mut self) -> Option { let next = sp_io::storage::next_key(&self.last)?; if !next.starts_with(&self.prefix) { None?; } let key = Pallet::::recover_key_from_sorted_allocation_key(&next); let amount = Pallet::::recover_amount_from_sorted_allocation_key(&next); // We may have validators present, with less than the minimum allocation, due to block // rewards if amount.0 < self.allocation_per_key_share.0 { None?; } self.last = next; Some((key, amount)) } } /// Pending deallocations, keyed by the Session they become unlocked on. #[pallet::storage] type PendingDeallocations = StorageDoubleMap< _, Blake2_128Concat, (NetworkId, Public), Identity, Session, Amount, OptionQuery, >; /// The generated key pair for a given validator set instance. #[pallet::storage] #[pallet::getter(fn keys)] pub type Keys = StorageMap<_, Twox64Concat, ExternalValidatorSet, KeyPair, OptionQuery>; /// The key for validator sets which can (and still need to) publish their slash reports. #[pallet::storage] pub type PendingSlashReport = StorageMap<_, Identity, ExternalNetworkId, Public, OptionQuery>; /// Disabled validators. #[pallet::storage] pub type SeraiDisabledIndices = StorageMap<_, Identity, u32, Public, OptionQuery>; /// Mapping from session to its starting block number. #[pallet::storage] #[pallet::getter(fn session_begin_block)] pub type SessionBeginBlock = StorageDoubleMap<_, Identity, NetworkId, Identity, Session, u64, ValueQuery>; #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { NewSet { set: ValidatorSet, }, ParticipantRemoved { set: ValidatorSet, removed: T::AccountId, }, KeyGen { set: ExternalValidatorSet, key_pair: KeyPair, }, AcceptedHandover { set: ValidatorSet, }, SetRetired { set: ValidatorSet, }, AllocationIncreased { validator: T::AccountId, network: NetworkId, amount: Amount, }, AllocationDecreased { validator: T::AccountId, network: NetworkId, amount: Amount, delayed_until: Option, }, DeallocationClaimed { validator: T::AccountId, network: NetworkId, session: Session, }, } impl Pallet { fn new_set(network: NetworkId) { // TODO: prevent new set if it doesn't have enough stake for economic security. // Update CurrentSession let session = { let new_session = CurrentSession::::get(network).map_or(Session(0), |session| Session(session.0 + 1)); CurrentSession::::set(network, Some(new_session)); new_session }; // Clear the current InSet assert_eq!( InSet::::clear_prefix(network, MAX_KEY_SHARES_PER_SET, None).maybe_cursor, None ); let allocation_per_key_share = Self::allocation_per_key_share(network).unwrap().0; let mut participants = vec![]; { let mut iter = SortedAllocationsIter::::new(network); let mut key_shares = 0; while key_shares < u64::from(MAX_KEY_SHARES_PER_SET) { let Some((key, amount)) = iter.next() else { break }; let these_key_shares = (amount.0 / allocation_per_key_share).min(u64::from(MAX_KEY_SHARES_PER_SET)); participants.push((key, these_key_shares)); key_shares += these_key_shares; } amortize_excess_key_shares(&mut participants); } for (key, shares) in &participants { InSet::::set(network, key, Some(*shares)); } let set = ValidatorSet { network, session }; Pallet::::deposit_event(Event::NewSet { set }); Participants::::set(network, Some(participants.try_into().unwrap())); SessionBeginBlock::::set( network, session, >::block_number().saturated_into::(), ); } } #[pallet::error] pub enum Error { /// Validator Set doesn't exist. NonExistentValidatorSet, /// Not enough allocation to obtain a key share in the set. InsufficientAllocation, /// Trying to deallocate more than allocated. NotEnoughAllocated, /// Allocation would cause the validator set to no longer achieve fault tolerance. AllocationWouldRemoveFaultTolerance, /// Allocation would cause the validator set to never be able to achieve fault tolerance. AllocationWouldPreventFaultTolerance, /// Deallocation would remove the participant from the set, despite the validator not /// specifying so. DeallocationWouldRemoveParticipant, /// Deallocation would cause the validator set to no longer achieve fault tolerance. DeallocationWouldRemoveFaultTolerance, /// Deallocation to be claimed doesn't exist. NonExistentDeallocation, /// Validator Set already generated keys. AlreadyGeneratedKeys, /// An invalid MuSig signature was provided. BadSignature, /// Validator wasn't registered or active. NonExistentValidator, /// Deallocation would take the stake below what is required. DeallocationWouldRemoveEconomicSecurity, } #[pallet::hooks] impl Hooks> for Pallet { fn on_initialize(n: BlockNumberFor) -> Weight { if T::ShouldEndSession::should_end_session(n) { Self::rotate_session(); // TODO: set the proper weights T::BlockWeights::get().max_block } else { Weight::zero() } } } #[pallet::genesis_build] impl BuildGenesisConfig for GenesisConfig { fn build(&self) { for (id, stake) in self.networks.clone() { AllocationPerKeyShare::::set(id, Some(stake)); for participant in self.participants.clone() { if Pallet::::set_allocation(id, participant, stake) { panic!("participants contained duplicates"); } } Pallet::::new_set(id); } } } impl Pallet { fn account() -> T::AccountId { system_address(b"ValidatorSets").into() } // is_bft returns if the network is able to survive any single node becoming byzantine. fn is_bft(network: NetworkId) -> bool { let allocation_per_key_share = AllocationPerKeyShare::::get(network).unwrap().0; let mut validators_len = 0; let mut top = None; let mut key_shares = 0; for (_, amount) in SortedAllocationsIter::::new(network) { validators_len += 1; key_shares += amount.0 / allocation_per_key_share; if top.is_none() { top = Some(key_shares); } if key_shares > u64::from(MAX_KEY_SHARES_PER_SET) { break; } } let Some(top) = top else { return false }; // key_shares may be over MAX_KEY_SHARES_PER_SET, which will cause a round robin reduction of // each validator's key shares until their sum is MAX_KEY_SHARES_PER_SET // post_amortization_key_shares_for_top_validator yields what the top validator's key shares // would be after such a reduction, letting us evaluate this correctly let top = post_amortization_key_shares_for_top_validator(validators_len, top, key_shares); (top * 3) < key_shares.min(MAX_KEY_SHARES_PER_SET.into()) } fn increase_allocation( network: NetworkId, account: T::AccountId, amount: Amount, block_reward: bool, ) -> DispatchResult { let old_allocation = Self::allocation((network, account)).unwrap_or(Amount(0)).0; let new_allocation = old_allocation + amount.0; let allocation_per_key_share = Self::allocation_per_key_share(network).unwrap().0; // If this is a block reward, we always allow it to be allocated if (new_allocation < allocation_per_key_share) && (!block_reward) { Err(Error::::InsufficientAllocation)?; } let increased_key_shares = (old_allocation / allocation_per_key_share) < (new_allocation / allocation_per_key_share); // Check if the net exhibited the ability to handle any single node becoming byzantine let mut was_bft = None; if increased_key_shares { was_bft = Some(Self::is_bft(network)); } // Increase the allocation now Self::set_allocation(network, account, Amount(new_allocation)); Self::deposit_event(Event::AllocationIncreased { validator: account, network, amount }); // Error if the net no longer can handle any single node becoming byzantine if let Some(was_bft) = was_bft { if was_bft && (!Self::is_bft(network)) { Err(Error::::AllocationWouldRemoveFaultTolerance)?; } } // The above is_bft calls are only used to check a BFT net doesn't become non-BFT // Check here if this call would prevent a non-BFT net from *ever* becoming BFT if (new_allocation / allocation_per_key_share) >= (MAX_KEY_SHARES_PER_SET / 3).into() { Err(Error::::AllocationWouldPreventFaultTolerance)?; } // If they're in the current set, and the current set has completed its handover (so its // currently being tracked by TotalAllocatedStake), update the TotalAllocatedStake if let Some(session) = Self::session(network) { if InSet::::contains_key(network, account) && Self::handover_completed(network, session) { TotalAllocatedStake::::set( network, Some(Amount(TotalAllocatedStake::::get(network).unwrap_or(Amount(0)).0 + amount.0)), ); } } Ok(()) } fn session_to_unlock_on_for_current_set(network: NetworkId) -> Option { let mut to_unlock_on = Self::session(network)?; // Move to the next session, as deallocating currently in-use stake is obviously invalid to_unlock_on.0 += 1; if network == NetworkId::Serai { // Since the next Serai set will already have been decided, we can only deallocate one // session later to_unlock_on.0 += 1; } // Increase the session by one, creating a cooldown period to_unlock_on.0 += 1; Some(to_unlock_on) } /// Decreases a validator's allocation to a set. /// /// Errors if the capacity provided by this allocation is in use. /// /// Errors if a partial decrease of allocation which puts the remaining allocation below the /// minimum requirement. /// /// The capacity prior provided by the allocation is immediately removed, in order to ensure it /// doesn't become used (preventing deallocation). /// /// Returns if the amount is immediately eligible for deallocation. fn decrease_allocation( network: NetworkId, account: T::AccountId, amount: Amount, ) -> Result { // Check it's safe to decrease this set's stake by this amount if let NetworkId::External(n) = network { let new_total_staked = Self::total_allocated_stake(NetworkId::from(n)) .unwrap() .0 .checked_sub(amount.0) .ok_or(Error::::NotEnoughAllocated)?; let required_stake = Self::required_stake_for_network(n); if new_total_staked < required_stake { Err(Error::::DeallocationWouldRemoveEconomicSecurity)?; } } let old_allocation = Self::allocation((network, account)).ok_or(Error::::NonExistentValidator)?.0; let new_allocation = old_allocation.checked_sub(amount.0).ok_or(Error::::NotEnoughAllocated)?; // If we're not removing the entire allocation, yet the allocation is no longer at or above // the threshold for a key share, error let allocation_per_key_share = Self::allocation_per_key_share(network).unwrap().0; if (new_allocation != 0) && (new_allocation < allocation_per_key_share) { Err(Error::::DeallocationWouldRemoveParticipant)?; } let decreased_key_shares = (old_allocation / allocation_per_key_share) > (new_allocation / allocation_per_key_share); // If this decreases the validator's key shares, error if the new set is unable to handle // byzantine faults let mut was_bft = None; if decreased_key_shares { was_bft = Some(Self::is_bft(network)); } // Decrease the allocation now // Since we don't also update TotalAllocatedStake here, TotalAllocatedStake may be greater // than the sum of all allocations, according to the Allocations StorageMap // This is intentional as this allocation has only been queued for deallocation at this time Self::set_allocation(network, account, Amount(new_allocation)); if let Some(was_bft) = was_bft { if was_bft && (!Self::is_bft(network)) { Err(Error::::DeallocationWouldRemoveFaultTolerance)?; } } // If we're not in-set, allow immediate deallocation if !Self::in_set(network, account) { Self::deposit_event(Event::AllocationDecreased { validator: account, network, amount, delayed_until: None, }); return Ok(true); } // Set it to PendingDeallocations, letting it be released upon a future session // This unwrap should be fine as this account is active, meaning a session has occurred let to_unlock_on = Self::session_to_unlock_on_for_current_set(network).unwrap(); let existing = PendingDeallocations::::get((network, account), to_unlock_on).unwrap_or(Amount(0)); PendingDeallocations::::set( (network, account), to_unlock_on, Some(Amount(existing.0 + amount.0)), ); Self::deposit_event(Event::AllocationDecreased { validator: account, network, amount, delayed_until: Some(to_unlock_on), }); Ok(false) } // Checks if this session has completed the handover from the prior session. fn handover_completed(network: NetworkId, session: Session) -> bool { let Some(current_session) = Self::session(network) else { return false }; // If the session we've been queried about is old, it must have completed its handover if current_session.0 > session.0 { return true; } // If the session we've been queried about has yet to start, it can't have completed its // handover if current_session.0 < session.0 { return false; } let NetworkId::External(n) = network else { // Handover is automatically complete for Serai as it doesn't have a handover protocol return true; }; // The current session must have set keys for its handover to be completed if !Keys::::contains_key(ExternalValidatorSet { network: n, session }) { return false; } // This must be the first session (which has set keys) OR the prior session must have been // retired (signified by its keys no longer being present) (session.0 == 0) || (!Keys::::contains_key(ExternalValidatorSet { network: n, session: Session(session.0 - 1), })) } fn new_session() { for network in serai_primitives::NETWORKS { // If this network hasn't started sessions yet, don't start one now let Some(current_session) = Self::session(network) else { continue }; // Only spawn a new set if: // - This is Serai, as we need to rotate Serai upon a new session (per Babe) // - The current set was actually established with a completed handover protocol if (network == NetworkId::Serai) || Self::handover_completed(network, current_session) { Pallet::::new_set(network); // let the Dex know session is rotated. Dex::::on_new_session(network); } } } fn set_total_allocated_stake(network: NetworkId) { let participants = Participants::::get(network) .expect("setting TotalAllocatedStake for a network without participants"); let total_stake = participants.iter().fold(0, |acc, (addr, _)| { acc + Allocations::::get((network, addr)).unwrap_or(Amount(0)).0 }); TotalAllocatedStake::::set(network, Some(Amount(total_stake))); } // TODO: This is called retire_set, yet just starts retiring the set // Update the nomenclature within this function pub fn retire_set(set: ValidatorSet) { // Serai doesn't set keys and network slashes are handled by BABE/GRANDPA if let NetworkId::External(n) = set.network { // If the prior prior set didn't report, emit they're retired now if PendingSlashReport::::get(n).is_some() { Self::deposit_event(Event::SetRetired { set: ValidatorSet { network: set.network, session: Session(set.session.0 - 1) }, }); } // This overwrites the prior value as the prior to-report set's stake presumably just // unlocked, making their report unenforceable let keys = Keys::::take(ExternalValidatorSet { network: n, session: set.session }).unwrap(); PendingSlashReport::::set(n, Some(keys.0)); } else { // emit the event for serai network Self::deposit_event(Event::SetRetired { set }); } // We're retiring this set because the set after it accepted the handover Self::deposit_event(Event::AcceptedHandover { set: ValidatorSet { network: set.network, session: Session(set.session.0 + 1) }, }); // Update the total allocated stake to be for the current set Self::set_total_allocated_stake(set.network); } /// Take the amount deallocatable. /// /// `session` refers to the Session the stake becomes deallocatable on. fn take_deallocatable_amount( network: NetworkId, session: Session, key: Public, ) -> Option { // Check this Session has properly started, completing the handover from the prior session. if !Self::handover_completed(network, session) { return None; } PendingDeallocations::::take((network, key), session) } fn rotate_session() { // next serai validators that is in the queue. let now_validators = Participants::::get(NetworkId::Serai) .expect("no Serai participants upon rotate_session"); let prior_serai_session = Self::session(NetworkId::Serai).unwrap(); // TODO: T::SessionHandler::on_before_session_ending() was here. // end the current serai session. Self::retire_set(ValidatorSet { network: NetworkId::Serai, session: prior_serai_session }); // make a new session and get the next validator set. Self::new_session(); // Update Babe and Grandpa let session = prior_serai_session.0 + 1; let next_validators = Participants::::get(NetworkId::Serai).unwrap(); Babe::::enact_epoch_change( WeakBoundedVec::force_from( now_validators.iter().copied().map(|(id, w)| (BabeAuthorityId::from(id), w)).collect(), None, ), WeakBoundedVec::force_from( next_validators.iter().copied().map(|(id, w)| (BabeAuthorityId::from(id), w)).collect(), None, ), Some(session), ); fn grandpa_map(i: &(Public, u64)) -> (&Public, GrandpaAuthorityId) { (&i.0, i.0.into()) } Grandpa::::on_new_session( true, now_validators.iter().map(grandpa_map), next_validators.iter().map(grandpa_map), ); // Clear SeraiDisabledIndices, only preserving keys still present in the new session // First drain so we don't mutate as we iterate let mut disabled = vec![]; for (_, validator) in SeraiDisabledIndices::::drain() { disabled.push(validator); } for disabled in disabled { Self::disable_serai_validator(disabled); } } /// Returns the required stake in terms SRI for a given `Balance`. pub fn required_stake(balance: &ExternalBalance) -> SubstrateAmount { use dex_pallet::HigherPrecisionBalance; // This is inclusive to an increase in accuracy let sri_per_coin = Dex::::security_oracle_value(balance.coin).unwrap_or(Amount(0)); // See dex-pallet for the reasoning on these let coin_decimals = balance.coin.decimals().max(5); let accuracy_increase = HigherPrecisionBalance::from(SubstrateAmount::pow(10, coin_decimals)); let total_coin_value = u64::try_from( HigherPrecisionBalance::from(balance.amount.0) * HigherPrecisionBalance::from(sri_per_coin.0) / accuracy_increase, ) .unwrap_or(u64::MAX); // required stake formula (COIN_VALUE * 1.5) + margin(20%) let required_stake = total_coin_value.saturating_mul(3).saturating_div(2); required_stake.saturating_add(total_coin_value.saturating_div(5)) } /// Returns the current total required stake for a given `network`. pub fn required_stake_for_network(network: ExternalNetworkId) -> SubstrateAmount { let mut total_required = 0; for coin in network.coins() { let supply = Coins::::supply(Coin::from(coin)); total_required += Self::required_stake(&ExternalBalance { coin, amount: Amount(supply) }); } total_required } pub fn distribute_block_rewards( network: NetworkId, account: T::AccountId, amount: Amount, ) -> DispatchResult { // TODO: Should this call be part of the `increase_allocation` since we have to have it // before each call to it? Coins::::transfer_internal( account, Self::account(), Balance { coin: Coin::Serai, amount }, )?; Self::increase_allocation(network, account, amount, true) } fn can_slash_serai_validator(validator: Public) -> bool { // Checks if they're active or actively deallocating (letting us still slash them) // We could check if they're upcoming/still allocating, yet that'd mean the equivocation is // invalid (as they aren't actively signing anything) or severely dated // It's not an edge case worth being comprehensive to due to the complexity of being so Babe::::is_member(&BabeAuthorityId::from(validator)) || PendingDeallocations::::iter_prefix((NetworkId::Serai, validator)).next().is_some() } fn slash_serai_validator(validator: Public) { let network = NetworkId::Serai; let mut allocation = Self::allocation((network, validator)).unwrap_or(Amount(0)); // reduce the current allocation to 0. Self::set_allocation(network, validator, Amount(0)); // Take the pending deallocation from the current session allocation.0 += PendingDeallocations::::take( (network, validator), Self::session_to_unlock_on_for_current_set(network).unwrap(), ) .unwrap_or(Amount(0)) .0; // Reduce the TotalAllocatedStake for the network, if in set // TotalAllocatedStake is the sum of allocations and pending deallocations from the current // session, since pending deallocations can still be slashed and therefore still contribute // to economic security, hence the allocation calculations above being above and the ones // below being below if InSet::::contains_key(NetworkId::Serai, validator) { let current_staked = Self::total_allocated_stake(network).unwrap(); TotalAllocatedStake::::set(network, Some(current_staked - allocation)); } // Clear any other pending deallocations. for (_, pending) in PendingDeallocations::::drain_prefix((network, validator)) { allocation.0 += pending.0; } // burn the allocation from the stake account Coins::::burn( RawOrigin::Signed(Self::account()).into(), Balance { coin: Coin::Serai, amount: allocation }, ) .unwrap(); } /// Disable a Serai validator, preventing them from further authoring blocks. /// /// Returns true if the validator-to-disable was actually a validator. /// Returns false if they weren't. fn disable_serai_validator(validator: Public) -> bool { if let Some(index) = Babe::::authorities().into_iter().position(|(id, _)| id.into_inner() == validator) { SeraiDisabledIndices::::set(u32::try_from(index).unwrap(), Some(validator)); let session = Self::session(NetworkId::Serai).unwrap(); Self::deposit_event(Event::ParticipantRemoved { set: ValidatorSet { network: NetworkId::Serai, session }, removed: validator, }); true } else { false } } } #[pallet::call] impl Pallet { #[pallet::call_index(0)] #[pallet::weight(0)] // TODO pub fn set_keys( origin: OriginFor, network: ExternalNetworkId, removed_participants: BoundedVec>, key_pair: KeyPair, signature: Signature, ) -> DispatchResult { ensure_none(origin)?; // signature isn't checked as this is an unsigned transaction, and validate_unsigned // (called by pre_dispatch) checks it let _ = signature; let session = Self::session(NetworkId::from(network)).unwrap(); let set = ExternalValidatorSet { network, session }; Keys::::set(set, Some(key_pair.clone())); // If this is the first ever set for this network, set TotalAllocatedStake now // We generally set TotalAllocatedStake when the prior set retires, and the new set is fully // active and liable. Since this is the first set, there is no prior set to wait to retire if session == Session(0) { Self::set_total_allocated_stake(NetworkId::from(network)); } // This does not remove from TotalAllocatedStake or InSet in order to: // 1) Not decrease the stake present in this set. This means removed participants are // still liable for the economic security of the external network. This prevents // a decided set, which is economically secure, from falling below the threshold. // 2) Not allow parties removed to immediately deallocate, per commentary on deallocation // scheduling (https://github.com/serai-dex/serai/issues/394). for removed in removed_participants { Self::deposit_event(Event::ParticipantRemoved { set: set.into(), removed }); } Self::deposit_event(Event::KeyGen { set, key_pair }); Ok(()) } #[pallet::call_index(1)] #[pallet::weight(0)] // TODO pub fn report_slashes( origin: OriginFor, network: ExternalNetworkId, slashes: BoundedVec<(Public, u32), ConstU32<{ MAX_KEY_SHARES_PER_SET / 3 }>>, signature: Signature, ) -> DispatchResult { ensure_none(origin)?; // signature isn't checked as this is an unsigned transaction, and validate_unsigned // (called by pre_dispatch) checks it let _ = signature; // TODO: Handle slashes let _ = slashes; // Emit set retireed Pallet::::deposit_event(Event::SetRetired { set: ValidatorSet { network: network.into(), session: Session(Self::session(NetworkId::from(network)).unwrap().0 - 1), }, }); Ok(()) } #[pallet::call_index(2)] #[pallet::weight(0)] // TODO pub fn allocate(origin: OriginFor, network: NetworkId, amount: Amount) -> DispatchResult { let validator = ensure_signed(origin)?; Coins::::transfer_internal( validator, Self::account(), Balance { coin: Coin::Serai, amount }, )?; Self::increase_allocation(network, validator, amount, false) } #[pallet::call_index(3)] #[pallet::weight(0)] // TODO pub fn deallocate(origin: OriginFor, network: NetworkId, amount: Amount) -> DispatchResult { let account = ensure_signed(origin)?; let can_immediately_deallocate = Self::decrease_allocation(network, account, amount)?; if can_immediately_deallocate { Coins::::transfer_internal( Self::account(), account, Balance { coin: Coin::Serai, amount }, )?; } Ok(()) } #[pallet::call_index(4)] #[pallet::weight((0, DispatchClass::Operational))] // TODO pub fn claim_deallocation( origin: OriginFor, network: NetworkId, session: Session, ) -> DispatchResult { let account = ensure_signed(origin)?; let Some(amount) = Self::take_deallocatable_amount(network, session, account) else { Err(Error::::NonExistentDeallocation)? }; Coins::::transfer_internal( Self::account(), account, Balance { coin: Coin::Serai, amount }, )?; Self::deposit_event(Event::DeallocationClaimed { validator: account, network, session }); Ok(()) } } #[pallet::validate_unsigned] impl ValidateUnsigned for Pallet { type Call = Call; fn validate_unsigned(_: TransactionSource, call: &Self::Call) -> TransactionValidity { // Match to be exhaustive match call { Call::set_keys { network, ref removed_participants, ref key_pair, ref signature } => { let network = *network; // Confirm this set has a session let Some(current_session) = Self::session(NetworkId::from(network)) else { Err(InvalidTransaction::Custom(1))? }; let set = ExternalValidatorSet { network, session: current_session }; // Confirm it has yet to set keys if Keys::::get(set).is_some() { Err(InvalidTransaction::Stale)?; } // This is a needed precondition as this uses storage variables for the latest decided // session on this assumption assert_eq!(Pallet::::latest_decided_session(network.into()), Some(current_session)); // This does not slash the removed participants as that'll be done at the end of the // set's lifetime let mut removed = hashbrown::HashSet::new(); for participant in removed_participants { // Confirm this wasn't duplicated if removed.contains(&participant.0) { Err(InvalidTransaction::Custom(2))?; } removed.insert(participant.0); } let participants = Participants::::get(NetworkId::from(network)) .expect("session existed without participants"); let mut all_key_shares = 0; let mut signers = vec![]; let mut signing_key_shares = 0; for participant in participants { let participant = participant.0; let shares = InSet::::get(NetworkId::from(network), participant) .expect("participant from Participants wasn't InSet"); all_key_shares += shares; if removed.contains(&participant.0) { continue; } signers.push(participant); signing_key_shares += shares; } { let f = all_key_shares - signing_key_shares; if signing_key_shares < ((2 * f) + 1) { Err(InvalidTransaction::Custom(3))?; } } // Verify the signature with the MuSig key of the signers // We theoretically don't need set_keys_message to bind to removed_participants, as the // key we're signing with effectively already does so, yet there's no reason not to if !musig_key(set.into(), &signers) .verify(&set_keys_message(&set, removed_participants, key_pair), signature) { Err(InvalidTransaction::BadProof)?; } ValidTransaction::with_tag_prefix("ValidatorSets") .and_provides((0, set)) .longevity(u64::MAX) .propagate(true) .build() } Call::report_slashes { network, ref slashes, ref signature } => { let network = *network; let Some(key) = PendingSlashReport::::take(network) else { // Assumed already published Err(InvalidTransaction::Stale)? }; // There must have been a previous session is PendingSlashReport is populated let set = ExternalValidatorSet { network, session: Session(Self::session(NetworkId::from(network)).unwrap().0 - 1), }; if !key.verify(&report_slashes_message(&set, slashes), signature) { Err(InvalidTransaction::BadProof)?; } ValidTransaction::with_tag_prefix("ValidatorSets") .and_provides((1, set)) .longevity(MAX_KEY_SHARES_PER_SET.into()) .propagate(true) .build() } Call::allocate { .. } | Call::deallocate { .. } | Call::claim_deallocation { .. } => { Err(InvalidTransaction::Call)? } Call::__Ignore(_, _) => unreachable!(), } } // Explicitly provide a pre-dispatch which calls validate_unsigned fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> { Self::validate_unsigned(TransactionSource::InBlock, call).map(|_| ()).map_err(Into::into) } } impl AllowMint for Pallet { fn is_allowed(balance: &ExternalBalance) -> bool { // get the required stake let current_required = Self::required_stake_for_network(balance.coin.network()); let new_required = current_required + Self::required_stake(balance); // get the total stake for the network & compare. let staked = Self::total_allocated_stake(NetworkId::from(balance.coin.network())).unwrap_or(Amount(0)); staked.0 >= new_required } } #[rustfmt::skip] impl + From> KeyOwnerProofSystem<(KeyTypeId, V)> for Pallet { type Proof = MembershipProof; type IdentificationTuple = Public; fn prove(key: (KeyTypeId, V)) -> Option { Some(MembershipProof(key.1.into(), PhantomData)) } fn check_proof(key: (KeyTypeId, V), proof: Self::Proof) -> Option { let validator = key.1.into(); // check the offender and the proof offender are the same. if validator != proof.0 { return None; } // check validator is valid if !Self::can_slash_serai_validator(validator) { return None; } Some(validator) } } impl ReportOffence> for Pallet { /// Report an `offence` and reward given `reporters`. fn report_offence( _: Vec, offence: BabeEquivocationOffence, ) -> Result<(), OffenceError> { // slash the offender let offender = offence.offender; Self::slash_serai_validator(offender); // disable it Self::disable_serai_validator(offender); Ok(()) } fn is_known_offence( offenders: &[Public], _: & as Offence>::TimeSlot, ) -> bool { for offender in offenders { // It's not a known offence if we can still slash them if Self::can_slash_serai_validator(*offender) { return false; } } true } } impl ReportOffence> for Pallet { /// Report an `offence` and reward given `reporters`. fn report_offence( _: Vec, offence: GrandpaEquivocationOffence, ) -> Result<(), OffenceError> { // slash the offender let offender = offence.offender; Self::slash_serai_validator(offender); // disable it Self::disable_serai_validator(offender); Ok(()) } fn is_known_offence( offenders: &[Public], _slot: & as Offence>::TimeSlot, ) -> bool { for offender in offenders { if Self::can_slash_serai_validator(*offender) { return false; } } true } } impl FindAuthor for Pallet { fn find_author<'a, I>(digests: I) -> Option where I: 'a + IntoIterator, { let i = Babe::::find_author(digests)?; Some(Babe::::authorities()[i as usize].0.clone().into()) } } impl DisabledValidators for Pallet { fn is_disabled(index: u32) -> bool { SeraiDisabledIndices::::get(index).is_some() } fn disabled_validators() -> Vec { // TODO: Use a storage iterator here let mut res = vec![]; for i in 0 .. MAX_KEY_SHARES_PER_SET { let i = i.into(); if Self::is_disabled(i) { res.push(i); } } res } } } pub use pallet::*; ================================================ FILE: substrate/validator-sets/primitives/Cargo.toml ================================================ [package] name = "serai-validator-sets-primitives" version = "0.1.0" description = "Primitives for validator sets" license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/substrate/validator-sets/primitives" authors = ["Luke Parker "] edition = "2021" rust-version = "1.74" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] zeroize = { version = "^1.5", features = ["derive"], optional = true } dalek-ff-group = { path = "../../../crypto/dalek-ff-group", default-features = false, features = ["alloc"] } ciphersuite = { path = "../../../crypto/ciphersuite", version = "0.4", default-features = false, features = ["alloc"] } dkg-musig = { path = "../../../crypto/dkg/musig", default-features = false } borsh = { version = "1", default-features = false, features = ["derive", "de_strict_order"], optional = true } serde = { version = "1", default-features = false, features = ["derive", "alloc"], optional = true } scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive", "max-encoded-len"] } sp-core = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } sp-std = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false } serai-primitives = { path = "../../primitives", default-features = false } [features] std = ["zeroize", "ciphersuite/std", "dkg-musig/std", "borsh?/std", "serde?/std", "scale/std", "sp-core/std", "sp-std/std", "serai-primitives/std"] borsh = ["dep:borsh", "serai-primitives/borsh"] serde = ["dep:serde", "serai-primitives/serde"] default = ["std"] ================================================ FILE: substrate/validator-sets/primitives/LICENSE ================================================ MIT License Copyright (c) 2022-2023 Luke Parker Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: substrate/validator-sets/primitives/src/lib.rs ================================================ #![cfg_attr(not(feature = "std"), no_std)] #[cfg(feature = "std")] use zeroize::Zeroize; use dalek_ff_group::Ristretto; use ciphersuite::{group::GroupEncoding, Ciphersuite}; use scale::{Encode, Decode, DecodeWithMemTracking, MaxEncodedLen}; #[cfg(feature = "borsh")] use borsh::{BorshSerialize, BorshDeserialize}; #[cfg(feature = "serde")] use serde::{Serialize, Deserialize}; use sp_core::{ConstU32, sr25519::Public, bounded::BoundedVec}; #[cfg(not(feature = "std"))] use sp_std::vec::Vec; use serai_primitives::{ExternalNetworkId, NetworkId}; /// The maximum amount of key shares per set. pub const MAX_KEY_SHARES_PER_SET: u32 = 150; // Support keys up to 96 bytes (BLS12-381 G2). pub const MAX_KEY_LEN: u32 = 96; /// The type used to identify a specific session of validators. #[derive( Clone, Copy, PartialEq, Eq, Hash, Default, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen, )] #[cfg_attr(feature = "std", derive(Zeroize))] #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct Session(pub u32); /// The type used to identify a specific validator set during a specific session. #[derive( Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen, )] #[cfg_attr(feature = "std", derive(Zeroize))] #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct ValidatorSet { pub session: Session, pub network: NetworkId, } /// The type used to identify a specific validator set during a specific session. #[derive( Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen, )] #[cfg_attr(feature = "std", derive(Zeroize))] #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct ExternalValidatorSet { pub session: Session, pub network: ExternalNetworkId, } impl From for ValidatorSet { fn from(set: ExternalValidatorSet) -> Self { ValidatorSet { session: set.session, network: set.network.into() } } } impl TryFrom for ExternalValidatorSet { type Error = (); fn try_from(set: ValidatorSet) -> Result { match set.network { NetworkId::Serai => Err(())?, NetworkId::External(network) => Ok(ExternalValidatorSet { session: set.session, network }), } } } type MaxKeyLen = ConstU32; /// The type representing a Key from an external network. pub type ExternalKey = BoundedVec; /// The key pair for a validator set. /// /// This is their Ristretto key, used for signing Batches, and their key on the external network. #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, DecodeWithMemTracking, MaxEncodedLen)] #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct KeyPair( #[cfg_attr( feature = "borsh", borsh( serialize_with = "serai_primitives::borsh_serialize_public", deserialize_with = "serai_primitives::borsh_deserialize_public" ) )] pub Public, #[cfg_attr( feature = "borsh", borsh( serialize_with = "serai_primitives::borsh_serialize_bounded_vec", deserialize_with = "serai_primitives::borsh_deserialize_bounded_vec" ) )] pub ExternalKey, ); #[cfg(feature = "std")] impl Zeroize for KeyPair { fn zeroize(&mut self) { self.0 .0.zeroize(); self.1.as_mut().zeroize(); } } /// The MuSig context for a validator set. pub fn musig_context(set: ValidatorSet) -> [u8; 32] { let mut context = [0; 32]; const DST: &[u8] = b"ValidatorSets-musig_key"; context[.. DST.len()].copy_from_slice(DST); let set = set.encode(); context[DST.len() .. (DST.len() + set.len())].copy_from_slice(&set); context } /// The MuSig public key for a validator set. /// /// This function panics on invalid input. pub fn musig_key(set: ValidatorSet, set_keys: &[Public]) -> Public { let mut keys = Vec::new(); for key in set_keys { keys.push( ::read_G::<&[u8]>(&mut key.0.as_ref()) .expect("invalid participant"), ); } dkg_musig::musig_key_vartime::(musig_context(set), &keys).unwrap().to_bytes().into() } /// The message for the set_keys signature. pub fn set_keys_message( set: &ExternalValidatorSet, removed_participants: &[Public], key_pair: &KeyPair, ) -> Vec { (b"ValidatorSets-set_keys", set, removed_participants, key_pair).encode() } pub fn report_slashes_message(set: &ExternalValidatorSet, slashes: &[(Public, u32)]) -> Vec { (b"ValidatorSets-report_slashes", set, slashes).encode() } /// For a set of validators whose key shares may exceed the maximum, reduce until they equal the /// maximum. /// /// Reduction occurs by reducing each validator in a reverse round-robin. pub fn amortize_excess_key_shares(validators: &mut [(Public, u64)]) { let total_key_shares = validators.iter().map(|(_, shares)| shares).sum::(); for i in 0 .. usize::try_from(total_key_shares.saturating_sub(u64::from(MAX_KEY_SHARES_PER_SET))) .unwrap() { validators[validators.len() - ((i % validators.len()) + 1)].1 -= 1; } } /// Returns the post-amortization key shares for the top validator. /// /// Panics when `validators == 0`. pub fn post_amortization_key_shares_for_top_validator( validators: usize, top: u64, key_shares: u64, ) -> u64 { top - (key_shares.saturating_sub(MAX_KEY_SHARES_PER_SET.into()) / u64::try_from(validators).unwrap()) } ================================================ FILE: tests/coordinator/Cargo.toml ================================================ [package] name = "serai-coordinator-tests" version = "0.1.0" description = "Tests for Serai's Coordinator" license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/tests/coordinator" authors = ["Luke Parker "] keywords = [] edition = "2021" publish = false [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] hex = "0.4" async-trait = "0.1" zeroize = { version = "1", default-features = false } rand_core = { version = "0.6", default-features = false } blake2 = "0.10" dalek-ff-group = { path = "../../crypto/dalek-ff-group", default-features = false } ciphersuite = { path = "../../crypto/ciphersuite", default-features = false } ciphersuite-kp256 = { path = "../../crypto/ciphersuite/kp256", default-features = false } schnorrkel = "0.11" dkg = { path = "../../crypto/dkg", default-features = false } messages = { package = "serai-processor-messages", path = "../../processor/messages" } scale = { package = "parity-scale-codec", version = "3" } serai-client = { path = "../../substrate/client", features = ["serai"] } serai-message-queue = { path = "../../message-queue" } borsh = { version = "1", features = ["de_strict_order"] } tokio = { version = "1", features = ["time"] } dockertest = "0.5" serai-docker-tests = { path = "../docker" } serai-message-queue-tests = { path = "../message-queue" } ================================================ FILE: tests/coordinator/LICENSE ================================================ AGPL-3.0-only license Copyright (c) 2023 Luke Parker This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License Version 3 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see . ================================================ FILE: tests/coordinator/src/lib.rs ================================================ #![allow(clippy::needless_pass_by_ref_mut)] // False positives use std::{ sync::{OnceLock, Arc}, time::Duration, }; use tokio::{ task::AbortHandle, sync::{Mutex as AsyncMutex, mpsc}, }; use rand_core::{RngCore, OsRng}; use zeroize::Zeroizing; use dalek_ff_group::Ristretto; use ciphersuite::{ group::{ff::PrimeField, GroupEncoding}, Ciphersuite, }; use serai_client::primitives::ExternalNetworkId; use messages::{ coordinator::{SubstrateSignableId, SubstrateSignId, cosign_block_msg}, CoordinatorMessage, ProcessorMessage, }; use serai_message_queue::{Service, Metadata, client::MessageQueue}; use serai_client::Serai; use dockertest::{PullPolicy, Image, TestBodySpecification, DockerOperations}; #[cfg(test)] mod tests; pub fn coordinator_instance( name: &str, message_queue_key: ::F, ) -> TestBodySpecification { serai_docker_tests::build("coordinator".to_string()); TestBodySpecification::with_image( Image::with_repository("serai-dev-coordinator").pull_policy(PullPolicy::Never), ) .replace_env( [ ("MESSAGE_QUEUE_KEY".to_string(), hex::encode(message_queue_key.to_repr())), ("DB_PATH".to_string(), "./coordinator-db".to_string()), ("SERAI_KEY".to_string(), { use serai_client::primitives::insecure_pair_from_name; hex::encode(&insecure_pair_from_name(name).as_ref().secret.to_bytes()[.. 32]) }), ( "RUST_LOG".to_string(), "serai_coordinator=trace,".to_string() + "tributary_chain=trace," + "tendermint=trace", ), ] .into(), ) } pub fn serai_composition(name: &str, fast_epoch: bool) -> TestBodySpecification { (if fast_epoch { serai_docker_tests::build("serai-fast-epoch".to_string()); TestBodySpecification::with_image( Image::with_repository("serai-dev-serai-fast-epoch").pull_policy(PullPolicy::Never), ) } else { serai_docker_tests::build("serai".to_string()); TestBodySpecification::with_image( Image::with_repository("serai-dev-serai").pull_policy(PullPolicy::Never), ) }) .replace_env( [("SERAI_NAME".to_string(), name.to_lowercase()), ("KEY".to_string(), " ".to_string())].into(), ) .set_publish_all_ports(true) } fn is_cosign_message(msg: &CoordinatorMessage) -> bool { matches!( msg, CoordinatorMessage::Coordinator( messages::coordinator::CoordinatorMessage::CosignSubstrateBlock { .. } ) ) || matches!( msg, CoordinatorMessage::Coordinator( messages::coordinator::CoordinatorMessage::SubstratePreprocesses { id: SubstrateSignId { id: SubstrateSignableId::CosigningSubstrateBlock(_), .. }, .. } ), ) || matches!( msg, CoordinatorMessage::Coordinator(messages::coordinator::CoordinatorMessage::SubstrateShares { id: SubstrateSignId { id: SubstrateSignableId::CosigningSubstrateBlock(_), .. }, .. }), ) } #[derive(Clone, PartialEq, Eq, Debug)] pub struct Handles { pub(crate) serai: String, pub(crate) message_queue: String, } pub struct Processor { network: ExternalNetworkId, serai_rpc: String, #[allow(unused)] handles: Handles, msgs: mpsc::UnboundedReceiver, queue_for_sending: MessageQueue, abort_handle: Option>, substrate_key: Arc::F>>>>, } impl Drop for Processor { fn drop(&mut self) { if let Some(abort_handle) = self.abort_handle.take() { abort_handle.abort(); }; } } impl Processor { pub async fn new( raw_i: u8, network: ExternalNetworkId, ops: &DockerOperations, handles: Handles, processor_key: ::F, ) -> Processor { let message_queue_rpc = ops.handle(&handles.message_queue).host_port(2287).unwrap(); let message_queue_rpc = format!("{}:{}", message_queue_rpc.0, message_queue_rpc.1); // Sleep until the Substrate RPC starts let serai_rpc = ops.handle(&handles.serai).host_port(9944).unwrap(); let serai_rpc = format!("http://{}:{}", serai_rpc.0, serai_rpc.1); // Bound execution to 60 seconds for _ in 0 .. 60 { tokio::time::sleep(Duration::from_secs(1)).await; let Ok(client) = Serai::new(serai_rpc.clone()).await else { continue }; if client.latest_finalized_block_hash().await.is_err() { continue; } break; } // The Serai RPC may or may not be started // Assume it is and continue, so if it's a few seconds late, it's still within tolerance // Create the queue let mut queue = ( 0, Arc::new(MessageQueue::new( Service::Processor(network), message_queue_rpc.clone(), Zeroizing::new(processor_key), )), ); let (msg_send, msg_recv) = mpsc::unbounded_channel(); let substrate_key = Arc::new(AsyncMutex::new(None)); let mut res = Processor { network, serai_rpc, handles, queue_for_sending: MessageQueue::new( Service::Processor(network), message_queue_rpc, Zeroizing::new(processor_key), ), msgs: msg_recv, abort_handle: None, substrate_key: substrate_key.clone(), }; // Spawn a task to handle cosigns and forward messages as appropriate let abort_handle = tokio::spawn({ async move { loop { // Get new messages let (next_recv_id, queue) = &mut queue; let msg = queue.next(Service::Coordinator).await; assert_eq!(msg.from, Service::Coordinator); assert_eq!(msg.id, *next_recv_id); queue.ack(Service::Coordinator, msg.id).await; *next_recv_id += 1; let msg_msg = borsh::from_slice(&msg.msg).unwrap(); // Remove any BatchReattempts clogging the pipe // TODO: Set up a wrapper around serai-client so we aren't throwing this away yet // leave it for the tests if matches!( msg_msg, messages::CoordinatorMessage::Coordinator( messages::coordinator::CoordinatorMessage::BatchReattempt { .. } ) ) { continue; } if !is_cosign_message(&msg_msg) { msg_send.send(msg_msg).unwrap(); continue; } let msg = msg_msg; let send_message = |msg: ProcessorMessage| async move { queue .queue( Metadata { from: Service::Processor(network), to: Service::Coordinator, intent: msg.intent(), }, borsh::to_vec(&msg).unwrap(), ) .await; }; struct CurrentCosign { block_number: u64, block: [u8; 32], } static CURRENT_COSIGN: OnceLock>> = OnceLock::new(); let mut current_cosign = CURRENT_COSIGN.get_or_init(|| AsyncMutex::new(None)).lock().await; match msg { // If this is a CosignSubstrateBlock, reset the CurrentCosign // While technically, each processor should individually track the current cosign, // this is fine for current testing purposes CoordinatorMessage::Coordinator( messages::coordinator::CoordinatorMessage::CosignSubstrateBlock { id, block_number }, ) => { let SubstrateSignId { id: SubstrateSignableId::CosigningSubstrateBlock(block), .. } = id else { panic!("CosignSubstrateBlock didn't have CosigningSubstrateBlock ID") }; let new_cosign = CurrentCosign { block_number, block }; if current_cosign.is_none() || (current_cosign.as_ref().unwrap().block != block) { *current_cosign = Some(new_cosign); } send_message( messages::coordinator::ProcessorMessage::CosignPreprocess { id: id.clone(), preprocesses: vec![[raw_i; 64]], } .into(), ) .await; } CoordinatorMessage::Coordinator( messages::coordinator::CoordinatorMessage::SubstratePreprocesses { id, .. }, ) => { // TODO: Assert the ID matches CURRENT_COSIGN // TODO: Verify the received preprocesses send_message( messages::coordinator::ProcessorMessage::SubstrateShare { id, shares: vec![[raw_i; 32]], } .into(), ) .await; } CoordinatorMessage::Coordinator( messages::coordinator::CoordinatorMessage::SubstrateShares { .. }, ) => { // TODO: Assert the ID matches CURRENT_COSIGN // TODO: Verify the shares let block_number = current_cosign.as_ref().unwrap().block_number; let block = current_cosign.as_ref().unwrap().block; let substrate_key = substrate_key.lock().await.clone().unwrap(); // Expand to a key pair as Schnorrkel expects // It's the private key + 32-bytes of entropy for nonces + the public key let mut schnorrkel_key_pair = [0; 96]; schnorrkel_key_pair[.. 32].copy_from_slice(&substrate_key.to_repr()); OsRng.fill_bytes(&mut schnorrkel_key_pair[32 .. 64]); schnorrkel_key_pair[64 ..].copy_from_slice( &(::generator() * *substrate_key).to_bytes(), ); let signature = schnorrkel::keys::Keypair::from_bytes(&schnorrkel_key_pair) .unwrap() .sign_simple(b"substrate", &cosign_block_msg(block_number, block)) .to_bytes(); send_message( messages::coordinator::ProcessorMessage::CosignedBlock { block_number, block, signature: signature.to_vec(), } .into(), ) .await; } _ => panic!("unexpected message passed is_cosign_message"), } } } }) .abort_handle(); res.abort_handle = Some(Arc::new(abort_handle)); res } pub async fn serai(&self) -> Serai { Serai::new(self.serai_rpc.clone()).await.unwrap() } /// Send a message to the coordinator as a processor. pub async fn send_message(&mut self, msg: impl Into) { let msg: ProcessorMessage = msg.into(); self .queue_for_sending .queue( Metadata { from: Service::Processor(self.network), to: Service::Coordinator, intent: msg.intent(), }, borsh::to_vec(&msg).unwrap(), ) .await; } /// Receive a message from the coordinator as a processor. pub async fn recv_message(&mut self) -> CoordinatorMessage { // Set a timeout of 20 minutes to allow effectively any protocol to occur without a fear of // an arbitrary timeout cutting it short tokio::time::timeout(Duration::from_secs(20 * 60), self.msgs.recv()).await.unwrap().unwrap() } pub async fn set_substrate_key( &mut self, substrate_key: Zeroizing<::F>, ) { *self.substrate_key.lock().await = Some(substrate_key); } } ================================================ FILE: tests/coordinator/src/tests/batch.rs ================================================ use std::{ time::Duration, collections::{HashSet, HashMap}, }; use zeroize::Zeroizing; use rand_core::{RngCore, OsRng}; use blake2::{ digest::{consts::U32, Digest}, Blake2b, }; use dalek_ff_group::Ristretto; use ciphersuite::{group::GroupEncoding, Ciphersuite}; use ciphersuite_kp256::Secp256k1; use dkg::Participant; use scale::Encode; use serai_client::{ primitives::BlockHash, in_instructions::{ primitives::{Batch, SignedBatch, batch_message}, InInstructionsEvent, }, validator_sets::primitives::Session, }; use messages::{ coordinator::{SubstrateSignableId, SubstrateSignId}, SubstrateContext, CoordinatorMessage, }; use crate::{*, tests::*}; pub async fn batch( processors: &mut [Processor], processor_is: &[u8], session: Session, substrate_key: &Zeroizing<::F>, batch: Batch, ) -> u64 { let id = SubstrateSignId { session, id: SubstrateSignableId::Batch(batch.id), attempt: 0 }; for processor in &mut *processors { processor .send_message(messages::substrate::ProcessorMessage::Batch { batch: batch.clone() }) .await; } // Select a random participant to exclude, so we know for sure who *is* participating assert_eq!(COORDINATORS - THRESHOLD, 1); let excluded_signer = usize::try_from(OsRng.next_u64() % u64::try_from(processors.len()).unwrap()).unwrap(); for (i, processor) in processors.iter_mut().enumerate() { if i == excluded_signer { continue; } processor .send_message(messages::coordinator::ProcessorMessage::BatchPreprocess { id: id.clone(), block: batch.block, preprocesses: vec![[processor_is[i]; 64]], }) .await; } // Before this Batch is signed, the Tributary will agree this block occurred, adding an extra // step of latency wait_for_tributary().await; wait_for_tributary().await; // Send from the excluded signer so they don't stay stuck processors[excluded_signer] .send_message(messages::coordinator::ProcessorMessage::BatchPreprocess { id: id.clone(), block: batch.block, preprocesses: vec![[processor_is[excluded_signer]; 64]], }) .await; // Read from a known signer to find out who was selected to sign let known_signer = (excluded_signer + 1) % COORDINATORS; let first_preprocesses = processors[known_signer].recv_message().await; let participants = match first_preprocesses { CoordinatorMessage::Coordinator( messages::coordinator::CoordinatorMessage::SubstratePreprocesses { id: this_id, preprocesses, }, ) => { assert_eq!(&id, &this_id); assert_eq!(preprocesses.len(), THRESHOLD - 1); let known_signer_i = Participant::new(u16::from(processor_is[known_signer])).unwrap(); assert!(!preprocesses.contains_key(&known_signer_i)); let mut participants = preprocesses.keys().copied().collect::>(); for (p, preprocess) in preprocesses { assert_eq!(preprocess, [u8::try_from(u16::from(p)).unwrap(); 64]); } participants.insert(known_signer_i); participants } other => panic!("coordinator didn't send back SubstratePreprocesses: {other:?}"), }; for i in participants.clone() { if u16::from(i) == u16::from(processor_is[known_signer]) { continue; } let processor = &mut processors[processor_is.iter().position(|p_i| u16::from(*p_i) == u16::from(i)).unwrap()]; let mut preprocesses = participants .clone() .into_iter() .map(|i| (i, [u8::try_from(u16::from(i)).unwrap(); 64])) .collect::>(); preprocesses.remove(&i); assert_eq!( processor.recv_message().await, CoordinatorMessage::Coordinator( messages::coordinator::CoordinatorMessage::SubstratePreprocesses { id: id.clone(), preprocesses } ) ); } for i in participants.clone() { let processor = &mut processors[processor_is.iter().position(|p_i| u16::from(*p_i) == u16::from(i)).unwrap()]; processor .send_message(messages::coordinator::ProcessorMessage::SubstrateShare { id: id.clone(), shares: vec![[u8::try_from(u16::from(i)).unwrap(); 32]], }) .await; } wait_for_tributary().await; for i in participants.clone() { let processor = &mut processors[processor_is.iter().position(|p_i| u16::from(*p_i) == u16::from(i)).unwrap()]; let mut shares = participants .clone() .into_iter() .map(|i| (i, [u8::try_from(u16::from(i)).unwrap(); 32])) .collect::>(); shares.remove(&i); assert_eq!( processor.recv_message().await, CoordinatorMessage::Coordinator(messages::coordinator::CoordinatorMessage::SubstrateShares { id: id.clone(), shares, }) ); } // Expand to a key pair as Schnorrkel expects // It's the private key + 32-bytes of entropy for nonces + the public key let mut schnorrkel_key_pair = [0; 96]; schnorrkel_key_pair[.. 32].copy_from_slice(&substrate_key.to_repr()); OsRng.fill_bytes(&mut schnorrkel_key_pair[32 .. 64]); schnorrkel_key_pair[64 ..] .copy_from_slice(&(::generator() * **substrate_key).to_bytes()); let signature = schnorrkel::keys::Keypair::from_bytes(&schnorrkel_key_pair) .unwrap() .sign_simple(b"substrate", &batch_message(&batch)) .to_bytes() .into(); let batch = SignedBatch { batch, signature }; let serai = processors[0].serai().await; let mut last_serai_block = serai.latest_finalized_block().await.unwrap().number(); for (i, processor) in processors.iter_mut().enumerate() { if i == excluded_signer { continue; } processor .send_message(messages::substrate::ProcessorMessage::SignedBatch { batch: batch.clone() }) .await; } // Verify the Batch was published to Substrate 'outer: for _ in 0 .. 20 { tokio::time::sleep(Duration::from_secs(6)).await; if std::env::var("GITHUB_CI") == Ok("true".to_string()) { tokio::time::sleep(Duration::from_secs(6)).await; } while last_serai_block <= serai.latest_finalized_block().await.unwrap().number() { let batch_events = serai .as_of(serai.finalized_block_by_number(last_serai_block).await.unwrap().unwrap().hash()) .in_instructions() .batch_events() .await .unwrap(); if !batch_events.is_empty() { assert_eq!(batch_events.len(), 1); assert_eq!( batch_events[0], InInstructionsEvent::Batch { network: batch.batch.network, id: batch.batch.id, block: batch.batch.block, instructions_hash: Blake2b::::digest(batch.batch.instructions.encode()).into(), } ); break 'outer; } last_serai_block += 1; } } // Verify the coordinator sends SubstrateBlock to all processors let last_block = serai.finalized_block_by_number(last_serai_block).await.unwrap().unwrap(); for processor in &mut *processors { // Handle a potential re-attempt message in the pipeline let mut received = processor.recv_message().await; if matches!( received, messages::CoordinatorMessage::Coordinator( messages::coordinator::CoordinatorMessage::BatchReattempt { .. } ) ) { received = processor.recv_message().await } assert_eq!( received, messages::CoordinatorMessage::Substrate( messages::substrate::CoordinatorMessage::SubstrateBlock { context: SubstrateContext { serai_time: last_block.time().unwrap() / 1000, network_latest_finalized_block: batch.batch.block, }, block: last_serai_block, burns: vec![], batches: vec![batch.batch.id], } ) ); // Send the ack as expected processor .send_message(messages::ProcessorMessage::Coordinator( messages::coordinator::ProcessorMessage::SubstrateBlockAck { block: last_serai_block, plans: vec![], }, )) .await; } last_block.number() } #[tokio::test] async fn batch_test() { new_test( |mut processors: Vec| async move { // pop the last participant since genesis keygen has only 4 participants processors.pop().unwrap(); assert_eq!(processors.len(), COORDINATORS); let (processor_is, substrate_key, _) = key_gen::(&mut processors, Session(0)).await; batch( &mut processors, &processor_is, Session(0), &substrate_key, Batch { network: ExternalNetworkId::Bitcoin, id: 0, block: BlockHash([0x22; 32]), instructions: vec![], }, ) .await; }, false, ) .await; } ================================================ FILE: tests/coordinator/src/tests/key_gen.rs ================================================ use std::{ time::{Duration, SystemTime}, collections::HashMap, }; use zeroize::Zeroizing; use rand_core::OsRng; use dalek_ff_group::Ristretto; use ciphersuite::{ group::{ff::Field, GroupEncoding}, Ciphersuite, }; use ciphersuite_kp256::Secp256k1; use dkg::ThresholdParams; use serai_client::validator_sets::primitives::{ExternalValidatorSet, KeyPair, Session}; use messages::{key_gen::KeyGenId, CoordinatorMessage}; use crate::tests::*; pub async fn key_gen( processors: &mut [Processor], session: Session, ) -> (Vec, Zeroizing<::F>, Zeroizing) { let coordinators = processors.len(); let mut participant_is = vec![]; let set = ExternalValidatorSet { session, network: ExternalNetworkId::Bitcoin }; let id = KeyGenId { session: set.session, attempt: 0 }; for (i, processor) in processors.iter_mut().enumerate() { let msg = processor.recv_message().await; match &msg { CoordinatorMessage::KeyGen(messages::key_gen::CoordinatorMessage::GenerateKey { params, .. }) => { participant_is.push(params.i()); } _ => panic!("unexpected message: {msg:?}"), } assert_eq!( msg, CoordinatorMessage::KeyGen(messages::key_gen::CoordinatorMessage::GenerateKey { id, params: ThresholdParams::new( u16::try_from(((coordinators * 2) / 3) + 1).unwrap(), u16::try_from(coordinators).unwrap(), participant_is[i], ) .unwrap(), shares: 1, }) ); processor .send_message(messages::key_gen::ProcessorMessage::Commitments { id, commitments: vec![vec![u8::try_from(u16::from(participant_is[i])).unwrap()]], }) .await; } wait_for_tributary().await; for (i, processor) in processors.iter_mut().enumerate() { let mut commitments = (0 .. u8::try_from(coordinators).unwrap()) .map(|l| { ( participant_is[usize::from(l)], vec![u8::try_from(u16::from(participant_is[usize::from(l)])).unwrap()], ) }) .collect::>(); commitments.remove(&participant_is[i]); assert_eq!( processor.recv_message().await, CoordinatorMessage::KeyGen(messages::key_gen::CoordinatorMessage::Commitments { id, commitments, }) ); // Recipient it's for -> (Sender i, Recipient i) let mut shares = (0 .. u8::try_from(coordinators).unwrap()) .map(|l| { ( participant_is[usize::from(l)], vec![ u8::try_from(u16::from(participant_is[i])).unwrap(), u8::try_from(u16::from(participant_is[usize::from(l)])).unwrap(), ], ) }) .collect::>(); shares.remove(&participant_is[i]); processor .send_message(messages::key_gen::ProcessorMessage::Shares { id, shares: vec![shares] }) .await; } let substrate_priv_key = Zeroizing::new(::F::random(&mut OsRng)); let substrate_key = (::generator() * *substrate_priv_key).to_bytes(); let network_priv_key = Zeroizing::new(C::F::random(&mut OsRng)); let network_key = (C::generator() * *network_priv_key).to_bytes().as_ref().to_vec(); let serai = processors[0].serai().await; let mut last_serai_block = serai.latest_finalized_block().await.unwrap().number(); wait_for_tributary().await; for (i, processor) in processors.iter_mut().enumerate() { let i = participant_is[i]; assert_eq!( processor.recv_message().await, CoordinatorMessage::KeyGen(messages::key_gen::CoordinatorMessage::Shares { id, shares: { let mut shares = (0 .. u8::try_from(coordinators).unwrap()) .map(|l| { ( participant_is[usize::from(l)], vec![ u8::try_from(u16::from(participant_is[usize::from(l)])).unwrap(), u8::try_from(u16::from(i)).unwrap(), ], ) }) .collect::>(); shares.remove(&i); vec![shares] }, }) ); processor .send_message(messages::key_gen::ProcessorMessage::GeneratedKeyPair { id, substrate_key, network_key: network_key.clone(), }) .await; } // Sleeps for longer since we need to wait for a Substrate block as well 'outer: for _ in 0 .. 20 { tokio::time::sleep(Duration::from_secs(6)).await; if std::env::var("GITHUB_CI") == Ok("true".to_string()) { tokio::time::sleep(Duration::from_secs(6)).await; } while last_serai_block <= serai.latest_finalized_block().await.unwrap().number() { if !serai .as_of(serai.finalized_block_by_number(last_serai_block).await.unwrap().unwrap().hash()) .validator_sets() .key_gen_events() .await .unwrap() .is_empty() { break 'outer; } last_serai_block += 1; } } let mut message = None; for processor in &mut *processors { let msg = processor.recv_message().await; if message.is_none() { match msg { CoordinatorMessage::Substrate( messages::substrate::CoordinatorMessage::ConfirmKeyPair { context, session, ref key_pair, }, ) => { assert!( SystemTime::now() .duration_since(SystemTime::UNIX_EPOCH) .unwrap() .as_secs() .abs_diff(context.serai_time) < (60 * 60 * 3) // 3 hours, which should exceed the length of any test we run ); assert_eq!(context.network_latest_finalized_block.0, [0; 32]); assert_eq!(set.session, session); assert_eq!(key_pair.0 .0, substrate_key); assert_eq!(&key_pair.1, &network_key); } _ => panic!("coordinator didn't respond with ConfirmKeyPair. msg: {msg:?}"), } message = Some(msg); } else { assert_eq!(message, Some(msg)); } } assert_eq!( serai .as_of(serai.finalized_block_by_number(last_serai_block).await.unwrap().unwrap().hash()) .validator_sets() .keys(set) .await .unwrap() .unwrap(), KeyPair(substrate_key.into(), network_key.try_into().unwrap()) ); for processor in &mut *processors { processor.set_substrate_key(substrate_priv_key.clone()).await; } ( participant_is.into_iter().map(|i| u8::try_from(u16::from(i)).unwrap()).collect(), substrate_priv_key, network_priv_key, ) } #[tokio::test] async fn key_gen_test() { new_test( |mut processors: Vec| async move { // pop the last participant since genesis keygen has only 4 participants processors.pop().unwrap(); assert_eq!(processors.len(), COORDINATORS); key_gen::(&mut processors, Session(0)).await; }, false, ) .await; } ================================================ FILE: tests/coordinator/src/tests/mod.rs ================================================ use core::future::Future; use std::{sync::OnceLock, collections::HashMap}; use tokio::sync::Mutex; use dockertest::{ LogAction, LogPolicy, LogSource, LogOptions, StartPolicy, TestBodySpecification, DockerOperations, DockerTest, }; use serai_docker_tests::fresh_logs_folder; use crate::*; mod key_gen; pub use key_gen::key_gen; mod batch; pub use batch::batch; mod sign; #[allow(unused_imports)] pub use sign::sign; mod rotation; pub(crate) const COORDINATORS: usize = 4; pub(crate) const THRESHOLD: usize = ((COORDINATORS * 2) / 3) + 1; // Provide a unique ID and ensures only one invocation occurs at a time. static UNIQUE_ID: OnceLock> = OnceLock::new(); #[async_trait::async_trait] pub(crate) trait TestBody: 'static + Send + Sync { async fn body(&self, processors: Vec); } #[async_trait::async_trait] impl) -> F> TestBody for TB { async fn body(&self, processors: Vec) { (self)(processors).await; } } pub(crate) async fn new_test(test_body: impl TestBody, fast_epoch: bool) { let mut unique_id_lock = UNIQUE_ID.get_or_init(|| Mutex::new(0)).lock().await; let mut coordinators = vec![]; let mut test = DockerTest::new().with_network(dockertest::Network::Isolated); let mut coordinator_compositions = vec![]; // Spawn one extra coordinator which isn't in-set #[allow(clippy::range_plus_one)] for i in 0 .. (COORDINATORS + 1) { let name = match i { 0 => "Alice", 1 => "Bob", 2 => "Charlie", 3 => "Dave", 4 => "Eve", 5 => "Ferdie", _ => panic!("needed a 7th name for a serai node"), }; let serai_composition = serai_composition(name, fast_epoch); let (processor_key, message_queue_keys, message_queue_composition) = serai_message_queue_tests::instance(); let coordinator_composition = coordinator_instance(name, processor_key); // Give every item in this stack a unique ID // Uses a Mutex as we can't generate a 8-byte random ID without hitting hostname length limits let (first, unique_id) = { let first = *unique_id_lock == 0; let unique_id = *unique_id_lock; *unique_id_lock += 1; (first, unique_id) }; let logs_path = fresh_logs_folder(first, "coordinator"); let mut compositions = vec![]; let mut handles = HashMap::new(); for (name, composition) in [ ("serai_node", serai_composition), ("message_queue", message_queue_composition), ("coordinator", coordinator_composition), ] { let handle = format!("coordinator-{name}-{unique_id}"); compositions.push( composition .set_start_policy(StartPolicy::Strict) .set_handle(handle.clone()) .set_log_options(Some(LogOptions { action: if std::env::var("GITHUB_CI") == Ok("true".to_string()) { LogAction::Forward } else { LogAction::ForwardToFile { path: logs_path.clone() } }, policy: LogPolicy::Always, source: LogSource::Both, })), ); handles.insert(name, handle); } let processor_key = message_queue_keys[&ExternalNetworkId::Bitcoin]; coordinators.push(( Handles { serai: handles.remove("serai_node").unwrap(), message_queue: handles.remove("message_queue").unwrap(), }, processor_key, )); coordinator_compositions.push(compositions.pop().unwrap()); for composition in compositions { test.provide_container(composition); } } struct Context { pending_coordinator_compositions: Mutex>, handles_and_keys: Vec<(Handles, ::F)>, test_body: Box, } static CONTEXT: OnceLock>> = OnceLock::new(); *CONTEXT.get_or_init(|| Mutex::new(None)).lock().await = Some(Context { pending_coordinator_compositions: Mutex::new(coordinator_compositions), handles_and_keys: coordinators, test_body: Box::new(test_body), }); // The DockerOperations from the first invocation, containing the Message Queue servers and the // Serai nodes. static OUTER_OPS: OnceLock>> = OnceLock::new(); // Reset OUTER_OPS *OUTER_OPS.get_or_init(|| Mutex::new(None)).lock().await = None; // Spawns a coordinator, if one has yet to be spawned, or else runs the test. async fn spawn_coordinator_or_run_test(inner_ops: DockerOperations) { // If the outer operations have yet to be set, these *are* the outer operations let outer_ops = OUTER_OPS.get().unwrap(); if outer_ops.lock().await.is_none() { *outer_ops.lock().await = Some(inner_ops); } let context_lock = CONTEXT.get().unwrap().lock().await; let Context { pending_coordinator_compositions, handles_and_keys: coordinators, test_body } = context_lock.as_ref().unwrap(); // Check if there is a coordinator left let maybe_coordinator = { let mut remaining = pending_coordinator_compositions.lock().await; let maybe_coordinator = if !remaining.is_empty() { let handles = coordinators[coordinators.len() - remaining.len()].0.clone(); let composition = remaining.remove(0); Some((composition, handles)) } else { None }; drop(remaining); maybe_coordinator }; if let Some((mut composition, handles)) = maybe_coordinator { let network = { let outer_ops = outer_ops.lock().await; let outer_ops = outer_ops.as_ref().unwrap(); // Spawn it by building another DockerTest which recursively calls this function // TODO: Spawn this outside of DockerTest so we can remove the recursion let serai_container = outer_ops.handle(&handles.serai); composition.modify_env("SERAI_HOSTNAME", serai_container.ip()); let message_queue_container = outer_ops.handle(&handles.message_queue); composition.modify_env("MESSAGE_QUEUE_RPC", message_queue_container.ip()); format!("container:{}", serai_container.name()) }; let mut test = DockerTest::new().with_network(dockertest::Network::External(network)); test.provide_container(composition); drop(context_lock); fn recurse(ops: DockerOperations) -> core::pin::Pin>> { Box::pin(spawn_coordinator_or_run_test(ops)) } test.run_async(recurse).await; } else { let outer_ops = outer_ops.lock().await.take().unwrap(); // Wait for the Serai node to boot, and for the Tendermint chain to get past the first block // TODO: Replace this with a Coordinator RPC we can query tokio::time::sleep(Duration::from_secs(60)).await; // Connect to the Message Queues as the processor let mut processors: Vec = vec![]; for (i, (handles, key)) in coordinators.iter().enumerate() { processors.push( Processor::new( i.try_into().unwrap(), ExternalNetworkId::Bitcoin, &outer_ops, handles.clone(), *key, ) .await, ); } test_body.body(processors).await; } } test.run_async(spawn_coordinator_or_run_test).await; } // TODO: Don't use a pessimistic sleep // Use an RPC to enaluate if a condition was met, with the following time being a timeout // https://github.com/serai-dex/serai/issues/340 pub(crate) async fn wait_for_tributary() { tokio::time::sleep(Duration::from_secs(15)).await; if std::env::var("GITHUB_CI") == Ok("true".to_string()) { tokio::time::sleep(Duration::from_secs(6)).await; } } ================================================ FILE: tests/coordinator/src/tests/rotation.rs ================================================ use tokio::time::{sleep, Duration}; use ciphersuite_kp256::Secp256k1; use serai_client::{ primitives::{insecure_pair_from_name, NetworkId}, validator_sets::{ self, primitives::{Session, ValidatorSet}, ValidatorSetsEvent, }, Amount, Pair, Transaction, }; use crate::{*, tests::*}; // TODO: This is duplicated with serai-client's tests async fn publish_tx(serai: &Serai, tx: &Transaction) -> [u8; 32] { let mut latest = serai .block(serai.latest_finalized_block_hash().await.unwrap()) .await .unwrap() .unwrap() .number(); serai.publish(tx).await.unwrap(); // Get the block it was included in // TODO: Add an RPC method for this/check the guarantee on the subscription let mut ticks = 0; loop { latest += 1; let block = { let mut block; while { block = serai.finalized_block_by_number(latest).await.unwrap(); block.is_none() } { sleep(Duration::from_secs(1)).await; ticks += 1; if ticks > 60 { panic!("60 seconds without inclusion in a finalized block"); } } block.unwrap() }; for transaction in &block.transactions { if transaction == tx { return block.hash(); } } } } #[allow(dead_code)] async fn allocate_stake( serai: &Serai, network: NetworkId, amount: Amount, pair: &Pair, nonce: u32, ) -> [u8; 32] { // get the call let tx = serai.sign(pair, validator_sets::SeraiValidatorSets::allocate(network, amount), nonce, 0); publish_tx(serai, &tx).await } #[allow(dead_code)] async fn deallocate_stake( serai: &Serai, network: NetworkId, amount: Amount, pair: &Pair, nonce: u32, ) -> [u8; 32] { // get the call let tx = serai.sign(pair, validator_sets::SeraiValidatorSets::deallocate(network, amount), nonce, 0); publish_tx(serai, &tx).await } async fn get_session(serai: &Serai, network: NetworkId) -> Session { serai .as_of_latest_finalized_block() .await .unwrap() .validator_sets() .session(network) .await .unwrap() .unwrap() } async fn wait_till_session_1(serai: &Serai, network: NetworkId) { let mut current_session = get_session(serai, network).await; while current_session.0 < 1 { sleep(Duration::from_secs(6)).await; current_session = get_session(serai, network).await; } } async fn most_recent_new_set_event(serai: &Serai, network: NetworkId) -> ValidatorSetsEvent { let mut current_block = serai.latest_finalized_block().await.unwrap(); loop { let events = serai.as_of(current_block.hash()).validator_sets().new_set_events().await.unwrap(); for event in events { match event { ValidatorSetsEvent::NewSet { set } => { if set.network == network { return event; } } _ => panic!("new_set_events gave non-NewSet event: {event:?}"), } } current_block = serai.block(current_block.header.parent_hash.0).await.unwrap().unwrap(); } } #[tokio::test] async fn set_rotation_test() { new_test( |mut processors: Vec| async move { // exclude the last processor from keygen since we will add him later let mut excluded = processors.pop().unwrap(); assert_eq!(processors.len(), COORDINATORS); // excluded participant let pair5 = insecure_pair_from_name("Eve"); let network = ExternalNetworkId::Bitcoin; let amount = Amount(1_000_000 * 10_u64.pow(8)); let serai = processors[0].serai().await; // allocate now for the last participant so that it is guaranteed to be included into session // 1 set. This doesn't affect the genesis set at all since that is a predetermined set. allocate_stake(&serai, network.into(), amount, &pair5, 0).await; // genesis keygen let _ = key_gen::(&mut processors, Session(0)).await; // Even the excluded processor should receive the key pair confirmation match excluded.recv_message().await { CoordinatorMessage::Substrate( messages::substrate::CoordinatorMessage::ConfirmKeyPair { session, .. }, ) => assert_eq!(session, Session(0)), _ => panic!("excluded got message other than ConfirmKeyPair"), } // wait until next session to see the effect on coordinator wait_till_session_1(&serai, network.into()).await; // Ensure the new validator was included in the new set assert_eq!( most_recent_new_set_event(&serai, network.into()).await, ValidatorSetsEvent::NewSet { set: ValidatorSet { session: Session(1), network: network.into() } }, ); // add the last participant & do the keygen processors.push(excluded); let _ = key_gen::(&mut processors, Session(1)).await; }, true, ) .await; } ================================================ FILE: tests/coordinator/src/tests/sign.rs ================================================ use std::{ time::Duration, collections::{HashSet, HashMap}, }; use rand_core::{RngCore, OsRng}; use ciphersuite_kp256::Secp256k1; use dkg::Participant; use serai_client::{ coins::{ primitives::{OutInstruction, OutInstructionWithBalance}, CoinsEvent, }, in_instructions::primitives::{Batch, InInstruction, InInstructionWithBalance}, primitives::{ insecure_pair_from_name, Amount, Balance, BlockHash, Coin, ExternalAddress, ExternalBalance, ExternalCoin, SeraiAddress, }, validator_sets::primitives::Session, PairTrait, SeraiCoins, }; use messages::{coordinator::PlanMeta, sign::SignId, SubstrateContext, CoordinatorMessage}; use crate::tests::*; pub async fn sign( processors: &mut [Processor], processor_is: &[u8], session: Session, plan_id: [u8; 32], ) { let id = SignId { session, id: plan_id, attempt: 0 }; // Select a random participant to exclude, so we know for sure who *is* participating assert_eq!(COORDINATORS - THRESHOLD, 1); let excluded_signer = usize::try_from(OsRng.next_u64() % u64::try_from(processors.len()).unwrap()).unwrap(); for (i, processor) in processors.iter_mut().enumerate() { if i == excluded_signer { continue; } processor .send_message(messages::sign::ProcessorMessage::Preprocess { id: id.clone(), preprocesses: vec![vec![processor_is[i]; 128]], }) .await; } // Before this plan is signed, the Tributary will agree the triggering Substrate block occurred, // adding an extra step of latency wait_for_tributary().await; wait_for_tributary().await; // Send from the excluded signer so they don't stay stuck processors[excluded_signer] .send_message(messages::sign::ProcessorMessage::Preprocess { id: id.clone(), preprocesses: vec![vec![processor_is[excluded_signer]; 128]], }) .await; // Read from a known signer to find out who was selected to sign let known_signer = (excluded_signer + 1) % COORDINATORS; let participants = match processors[known_signer].recv_message().await { CoordinatorMessage::Sign(messages::sign::CoordinatorMessage::Preprocesses { id: this_id, preprocesses, }) => { assert_eq!(&id, &this_id); assert_eq!(preprocesses.len(), THRESHOLD - 1); let known_signer_i = Participant::new(u16::from(processor_is[known_signer])).unwrap(); assert!(!preprocesses.contains_key(&known_signer_i)); let mut participants = preprocesses.keys().copied().collect::>(); for (p, preprocess) in preprocesses { assert_eq!(preprocess, vec![u8::try_from(u16::from(p)).unwrap(); 128]); } participants.insert(known_signer_i); participants } _ => panic!("coordinator didn't send back Preprocesses"), }; for i in participants.clone() { if u16::from(i) == u16::from(processor_is[known_signer]) { continue; } let processor = &mut processors[processor_is.iter().position(|p_i| u16::from(*p_i) == u16::from(i)).unwrap()]; let mut preprocesses = participants .clone() .into_iter() .map(|i| (i, vec![u8::try_from(u16::from(i)).unwrap(); 128])) .collect::>(); preprocesses.remove(&i); assert_eq!( processor.recv_message().await, CoordinatorMessage::Sign(messages::sign::CoordinatorMessage::Preprocesses { id: id.clone(), preprocesses }) ); } for i in participants.clone() { let processor = &mut processors[processor_is.iter().position(|p_i| u16::from(*p_i) == u16::from(i)).unwrap()]; processor .send_message(messages::sign::ProcessorMessage::Share { id: id.clone(), shares: vec![vec![u8::try_from(u16::from(i)).unwrap(); 32]], }) .await; } wait_for_tributary().await; for i in participants.clone() { let processor = &mut processors[processor_is.iter().position(|p_i| u16::from(*p_i) == u16::from(i)).unwrap()]; let mut shares = participants .clone() .into_iter() .map(|i| (i, vec![u8::try_from(u16::from(i)).unwrap(); 32])) .collect::>(); shares.remove(&i); assert_eq!( processor.recv_message().await, CoordinatorMessage::Sign(messages::sign::CoordinatorMessage::Shares { id: id.clone(), shares, }) ); } // Send Completed for i in participants.clone() { let processor = &mut processors[processor_is.iter().position(|p_i| u16::from(*p_i) == u16::from(i)).unwrap()]; processor .send_message(messages::sign::ProcessorMessage::Completed { session, id: id.id, tx: b"signed_tx".to_vec(), }) .await; } wait_for_tributary().await; // Make sure every processor gets Completed for processor in processors { assert_eq!( processor.recv_message().await, CoordinatorMessage::Sign(messages::sign::CoordinatorMessage::Completed { session, id: id.id, tx: b"signed_tx".to_vec() }) ); } } #[tokio::test] async fn sign_test() { new_test( |mut processors: Vec| async move { // pop the last participant since genesis keygen has only 4 participant. processors.pop().unwrap(); assert_eq!(processors.len(), COORDINATORS); let (participant_is, substrate_key, _) = key_gen::(&mut processors, Session(0)).await; // 'Send' external coins into Serai let serai = processors[0].serai().await; let (serai_pair, serai_addr) = { let mut name = [0; 4]; OsRng.fill_bytes(&mut name); let pair = insecure_pair_from_name(&hex::encode(name)); let address = SeraiAddress::from(pair.public()); // Fund the new account to pay for fees let balance = Balance { coin: Coin::Serai, amount: Amount(1_000_000_000) }; serai .publish(&serai.sign( &insecure_pair_from_name("Ferdie"), SeraiCoins::transfer(address, balance), 0, Default::default(), )) .await .unwrap(); (pair, address) }; #[allow(clippy::inconsistent_digit_grouping)] let amount = Amount(1_000_000_00); let balance = ExternalBalance { coin: ExternalCoin::Bitcoin, amount }; let coin_block = BlockHash([0x33; 32]); let block_included_in = batch( &mut processors, &participant_is, Session(0), &substrate_key, Batch { network: balance.coin.network(), id: 0, block: coin_block, instructions: vec![InInstructionWithBalance { instruction: InInstruction::Transfer(serai_addr), balance, }], }, ) .await; { let block_included_in_hash = serai.finalized_block_by_number(block_included_in).await.unwrap().unwrap().hash(); let serai = serai.as_of(block_included_in_hash); let serai = serai.coins(); assert_eq!( serai.coin_balance(Coin::Serai, serai_addr).await.unwrap(), Amount(1_000_000_000) ); // Verify the mint occurred as expected assert_eq!( serai.mint_events().await.unwrap(), vec![CoinsEvent::Mint { to: serai_addr, balance: balance.into() }] ); assert_eq!(serai.coin_supply(ExternalCoin::Bitcoin.into()).await.unwrap(), amount); assert_eq!( serai.coin_balance(ExternalCoin::Bitcoin.into(), serai_addr).await.unwrap(), amount ); } // Trigger a burn let out_instruction = OutInstructionWithBalance { balance, instruction: OutInstruction { address: ExternalAddress::new(b"external".to_vec()).unwrap(), data: None, }, }; serai .publish(&serai.sign( &serai_pair, SeraiCoins::burn_with_instruction(out_instruction.clone()), 0, Default::default(), )) .await .unwrap(); // TODO: We *really* need a helper for this pattern let mut last_serai_block = block_included_in; 'outer: for _ in 0 .. 20 { tokio::time::sleep(Duration::from_secs(6)).await; if std::env::var("GITHUB_CI") == Ok("true".to_string()) { tokio::time::sleep(Duration::from_secs(6)).await; } while last_serai_block <= serai.latest_finalized_block().await.unwrap().number() { let burn_events = serai .as_of(serai.finalized_block_by_number(last_serai_block).await.unwrap().unwrap().hash()) .coins() .burn_with_instruction_events() .await .unwrap(); if !burn_events.is_empty() { assert_eq!(burn_events.len(), 1); assert_eq!( burn_events[0], CoinsEvent::BurnWithInstruction { from: serai_addr, instruction: out_instruction.clone() } ); break 'outer; } last_serai_block += 1; } } let last_serai_block = serai.finalized_block_by_number(last_serai_block).await.unwrap().unwrap(); let last_serai_block_hash = last_serai_block.hash(); let serai = serai.as_of(last_serai_block_hash); let serai = serai.coins(); assert_eq!(serai.coin_supply(ExternalCoin::Bitcoin.into()).await.unwrap(), Amount(0)); assert_eq!( serai.coin_balance(ExternalCoin::Bitcoin.into(), serai_addr).await.unwrap(), Amount(0) ); let mut plan_id = [0; 32]; OsRng.fill_bytes(&mut plan_id); let plan_id = plan_id; // We should now get a SubstrateBlock for processor in &mut processors { assert_eq!( processor.recv_message().await, messages::CoordinatorMessage::Substrate( messages::substrate::CoordinatorMessage::SubstrateBlock { context: SubstrateContext { serai_time: last_serai_block.time().unwrap() / 1000, network_latest_finalized_block: coin_block, }, block: last_serai_block.number(), burns: vec![out_instruction.clone()], batches: vec![], } ) ); // Send the ACK, claiming there's a plan to sign processor .send_message(messages::ProcessorMessage::Coordinator( messages::coordinator::ProcessorMessage::SubstrateBlockAck { block: last_serai_block.number(), plans: vec![PlanMeta { session: Session(0), id: plan_id }], }, )) .await; } sign(&mut processors, &participant_is, Session(0), plan_id).await; }, false, ) .await; } ================================================ FILE: tests/docker/Cargo.toml ================================================ [package] name = "serai-docker-tests" version = "0.1.0" description = "Docker-based testing infrastructure for Serai" license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/tests/docker" authors = ["Luke Parker "] keywords = [] edition = "2021" publish = false [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] chrono = "0.4" ================================================ FILE: tests/docker/LICENSE ================================================ AGPL-3.0-only license Copyright (c) 2023 Luke Parker This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License Version 3 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see . ================================================ FILE: tests/docker/README.md ================================================ # Docker Tests Test infrastructure based around Docker. ================================================ FILE: tests/docker/src/lib.rs ================================================ use std::{ sync::{Mutex, OnceLock}, collections::{HashSet, HashMap}, time::SystemTime, path::PathBuf, fs, env, process::Command, }; pub fn fresh_logs_folder(first: bool, label: &str) -> String { let logs_path = [std::env::current_dir().unwrap().to_str().unwrap(), ".test-logs", label] .iter() .collect::(); if first { let _ = fs::remove_dir_all(&logs_path); fs::create_dir_all(&logs_path).expect("couldn't create logs directory"); assert!( fs::read_dir(&logs_path).expect("couldn't read the logs folder").next().is_none(), "logs folder wasn't empty, despite removing it at the start of the run", ); } logs_path.to_str().unwrap().to_string() } // TODO: Merge this with what's in serai-orchestrator/have serai-orchestrator perform building static BUILT: OnceLock>> = OnceLock::new(); pub fn build(name: String) { let built = BUILT.get_or_init(|| Mutex::new(HashMap::new())); // Only one call to build will acquire this lock let mut built_lock = built.lock().unwrap(); if built_lock.contains_key(&name) { // If it was built, return return; } // Else, hold the lock while we build let mut repo_path = env::current_exe().unwrap(); repo_path.pop(); assert!(repo_path.as_path().ends_with("deps")); repo_path.pop(); assert!(repo_path.as_path().ends_with("debug")); repo_path.pop(); assert!(repo_path.as_path().ends_with("target")); repo_path.pop(); // Run the orchestrator to ensure the most recent files exist if !Command::new("cargo") .current_dir(&repo_path) .arg("run") .arg("-p") .arg("serai-orchestrator") .arg("--") .arg("key_gen") .arg("dev") .spawn() .unwrap() .wait() .unwrap() .success() { panic!("failed to run the orchestrator"); } if !Command::new("cargo") .current_dir(&repo_path) .arg("run") .arg("-p") .arg("serai-orchestrator") .arg("--") .arg("setup") .arg("dev") .spawn() .unwrap() .wait() .unwrap() .success() { panic!("failed to run the orchestrator"); } let mut orchestration_path = repo_path.clone(); orchestration_path.push("orchestration"); if name != "runtime" { orchestration_path.push("dev"); } let mut dockerfile_path = orchestration_path.clone(); if HashSet::from(["bitcoin", "ethereum", "ethereum-relayer", "monero"]).contains(name.as_str()) { dockerfile_path = dockerfile_path.join("networks"); } if name.contains("-processor") { dockerfile_path = dockerfile_path.join("processor").join(name.split('-').next().unwrap()).join("Dockerfile"); } else if name == "serai-fast-epoch" { dockerfile_path = dockerfile_path.join("serai").join("Dockerfile.fast-epoch"); } else { dockerfile_path = dockerfile_path.join(&name).join("Dockerfile"); } // If this Docker image was created after this repo was last edited, return here // This should have better performance than Docker and allows running while offline if let Ok(res) = Command::new("docker") .arg("inspect") .arg("-f") .arg("{{ .Metadata.LastTagTime }}") .arg(format!("serai-dev-{name}")) .output() { let last_tag_time_buf = String::from_utf8(res.stdout).expect("docker had non-utf8 output"); let last_tag_time = last_tag_time_buf.trim(); if !last_tag_time.is_empty() { let created_time = SystemTime::from( chrono::DateTime::parse_and_remainder(last_tag_time, "%F %T.%f %z") .unwrap_or_else(|_| { panic!("docker formatted last tag time unexpectedly: {last_tag_time}") }) .0, ); // For all services, if the Dockerfile was edited after the image was built we should rebuild let mut last_modified = fs::metadata(&dockerfile_path).ok().and_then(|meta| meta.modified().ok()); // Check any additionally specified paths let meta = |path: PathBuf| (path.clone(), fs::metadata(path)); let mut metadatas = match name.as_str() { "bitcoin" | "ethereum" | "monero" => vec![], "ethereum-relayer" => { vec![meta(repo_path.join("common")), meta(repo_path.join("networks"))] } "message-queue" => vec![ meta(repo_path.join("common")), meta(repo_path.join("crypto")), meta(repo_path.join("substrate").join("primitives")), meta(repo_path.join("message-queue")), ], "bitcoin-processor" | "ethereum-processor" | "monero-processor" => vec![ meta(repo_path.join("common")), meta(repo_path.join("crypto")), meta(repo_path.join("networks")), meta(repo_path.join("substrate")), meta(repo_path.join("message-queue")), meta(repo_path.join("processor")), ], "coordinator" => vec![ meta(repo_path.join("common")), meta(repo_path.join("crypto")), meta(repo_path.join("networks")), meta(repo_path.join("substrate")), meta(repo_path.join("message-queue")), meta(repo_path.join("coordinator")), ], "runtime" | "serai" | "serai-fast-epoch" => vec![ meta(repo_path.join("common")), meta(repo_path.join("crypto")), meta(repo_path.join("substrate")), ], _ => panic!("building unrecognized docker image"), }; while !metadatas.is_empty() { if let (path, Ok(metadata)) = metadatas.pop().unwrap() { if metadata.is_file() { if let Ok(modified) = metadata.modified() { if modified > last_modified .expect("got when source was last modified yet not when the Dockerfile was") { last_modified = Some(modified); } } } else { // Recursively crawl since we care when the folder's contents were edited, not the // folder itself for entry in fs::read_dir(path.clone()).expect("couldn't read directory") { metadatas.push(meta( path.join(entry.expect("couldn't access item in directory").file_name()), )); } } } } if let Some(last_modified) = last_modified { if last_modified < created_time { println!("{name} was built after the most recent source code edits, assuming built."); built_lock.insert(name, true); return; } } } } println!("Building {}...", &name); // Version which always prints if !Command::new("docker") .current_dir(&repo_path) .arg("build") .arg("-f") .arg(dockerfile_path) .arg(".") .arg("-t") .arg(format!("serai-dev-{name}")) .spawn() .unwrap() .wait() .unwrap() .success() { panic!("failed to build {name}"); } // Version which only prints on error /* let res = Command::new("docker") .current_dir(dockerfile_path) .arg("build") .arg(".") .arg("-t") .arg(format!("serai-dev-{name}")) .output() .unwrap(); if !res.status.success() { println!("failed to build {name}\n"); println!("-- stdout --"); println!( "{}\r\n", String::from_utf8(res.stdout) .unwrap_or_else(|_| "stdout had non-utf8 characters".to_string()) ); println!("-- stderr --"); println!( "{}\r\n", String::from_utf8(res.stderr) .unwrap_or_else(|_| "stderr had non-utf8 characters".to_string()) ); panic!("failed to build {name}"); } */ println!("Built!"); if std::env::var("GITHUB_CI").is_ok() { println!("In CI, so clearing cache to prevent hitting the storage limits."); if !Command::new("docker") .arg("builder") .arg("prune") .arg("--all") .arg("--force") .output() .unwrap() .status .success() { println!("failed to clear cache after building {name}\n"); } } // Set built built_lock.insert(name, true); } ================================================ FILE: tests/full-stack/Cargo.toml ================================================ [package] name = "serai-full-stack-tests" version = "0.1.0" description = "Tests for Serai's Full Stack" license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/tests/full-stack" authors = ["Luke Parker "] keywords = [] edition = "2021" publish = false [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] hex = "0.4" async-trait = "0.1" zeroize = { version = "1", default-features = false } rand_core = { version = "0.6", default-features = false } curve25519-dalek = { version = "4", features = ["rand_core"] } bitcoin-serai = { path = "../../networks/bitcoin" } monero-simple-request-rpc = { git = "https://github.com/monero-oxide/monero-oxide", rev = "32e6b5fe5ba9e1ea3e68da882550005122a11d22" } monero-wallet = { git = "https://github.com/monero-oxide/monero-oxide", rev = "32e6b5fe5ba9e1ea3e68da882550005122a11d22" } scale = { package = "parity-scale-codec", version = "3" } serde = "1" serde_json = "1" processor = { package = "serai-processor", path = "../../processor", features = ["bitcoin", "monero"] } serai-client = { path = "../../substrate/client", features = ["serai"] } tokio = { version = "1", features = ["time"] } dockertest = "0.5" serai-docker-tests = { path = "../docker" } serai-message-queue-tests = { path = "../message-queue" } serai-processor-tests = { path = "../processor" } serai-coordinator-tests = { path = "../coordinator" } ================================================ FILE: tests/full-stack/LICENSE ================================================ AGPL-3.0-only license Copyright (c) 2023 Luke Parker This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License Version 3 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see . ================================================ FILE: tests/full-stack/src/lib.rs ================================================ use std::time::Duration; use serai_client::Serai; use dockertest::DockerOperations; use serai_processor_tests::{RPC_USER, RPC_PASS}; #[cfg(test)] mod tests; #[allow(unused)] #[derive(Clone, Debug)] pub struct Handles { bitcoin: (String, u32), bitcoin_processor: String, monero: (String, u32), monero_processor: String, message_queue: String, serai: String, } impl Handles { pub async fn serai(&self, ops: &DockerOperations) -> Serai { let serai_rpc = ops.handle(&self.serai).host_port(9944).unwrap(); let serai_rpc = format!("http://{}:{}", serai_rpc.0, serai_rpc.1); // If the RPC server has yet to start, sleep for up to 60s until it does for _ in 0 .. 60 { tokio::time::sleep(Duration::from_secs(1)).await; let Ok(client) = Serai::new(serai_rpc.clone()).await else { continue }; if client.latest_finalized_block_hash().await.is_err() { continue; } return client; } panic!("serai RPC server wasn't available after 60s"); } pub async fn bitcoin(&self, ops: &DockerOperations) -> bitcoin_serai::rpc::Rpc { let rpc = ops.handle(&self.bitcoin.0).host_port(self.bitcoin.1).unwrap(); let rpc = format!("http://{RPC_USER}:{RPC_PASS}@{}:{}", rpc.0, rpc.1); // If the RPC server has yet to start, sleep for up to 60s until it does for _ in 0 .. 60 { tokio::time::sleep(Duration::from_secs(1)).await; let Ok(client) = bitcoin_serai::rpc::Rpc::new(rpc.clone()).await else { continue }; return client; } panic!("bitcoin RPC server wasn't available after 60s"); } pub async fn monero( &self, ops: &DockerOperations, ) -> monero_simple_request_rpc::SimpleRequestRpc { use monero_simple_request_rpc::SimpleRequestRpc; use monero_wallet::rpc::Rpc; let rpc = ops.handle(&self.monero.0).host_port(self.monero.1).unwrap(); let rpc = format!("http://{RPC_USER}:{RPC_PASS}@{}:{}", rpc.0, rpc.1); // If the RPC server has yet to start, sleep for up to 60s until it does for _ in 0 .. 60 { tokio::time::sleep(Duration::from_secs(1)).await; let Ok(client) = SimpleRequestRpc::new(rpc.clone()).await else { continue }; if client.get_height().await.is_err() { continue; } return client; } panic!("monero RPC server wasn't available after 60s"); } } ================================================ FILE: tests/full-stack/src/tests/mint_and_burn.rs ================================================ use std::{ sync::{OnceLock, Arc, Mutex}, time::{Duration, Instant}, }; use zeroize::Zeroizing; use rand_core::{RngCore, OsRng}; use scale::Encode; use serai_client::{ coins::primitives::{OutInstruction, OutInstructionWithBalance}, in_instructions::primitives::Shorthand, primitives::{ insecure_pair_from_name, Amount, Balance, Coin, ExternalAddress, ExternalBalance, ExternalCoin, SeraiAddress, }, validator_sets::primitives::{ExternalValidatorSet, Session}, PairTrait, SeraiCoins, }; use crate::tests::*; // TODO: Break this test out into functions re-usable across processor, processor e2e, and full // stack tests #[tokio::test] async fn mint_and_burn_test() { new_test(|ops, handles: Vec| async move { let ops = Arc::new(ops); let serai = handles[0].serai(&ops).await; // Helper to mine a block on each network async fn mine_blocks( handles: &[Handles], ops: &DockerOperations, producer: &mut usize, count: usize, ) { static MINE_BLOCKS_CALL: OnceLock> = OnceLock::new(); // Only let one instance of this function run at a time let _lock = MINE_BLOCKS_CALL.get_or_init(|| tokio::sync::Mutex::new(())).lock().await; // Pick a block producer via a round robin let producer_handles = &handles[*producer]; *producer += 1; *producer %= handles.len(); // Mine a Bitcoin block let bitcoin_blocks = { use bitcoin_serai::bitcoin::{ secp256k1::{SECP256K1, SecretKey}, PrivateKey, PublicKey, consensus::Encodable, network::Network, address::Address, }; let addr = Address::p2pkh( PublicKey::from_private_key( SECP256K1, &PrivateKey::new(SecretKey::from_slice(&[0x01; 32]).unwrap(), Network::Bitcoin), ), Network::Regtest, ); let rpc = producer_handles.bitcoin(ops).await; let mut res = Vec::with_capacity(count); for _ in 0 .. count { let hash = rpc .rpc_call::>("generatetoaddress", serde_json::json!([1, addr])) .await .unwrap() .swap_remove(0); let mut bytes = vec![]; rpc .get_block(&hex::decode(hash).unwrap().try_into().unwrap()) .await .unwrap() .consensus_encode(&mut bytes) .unwrap(); res.push(serde_json::json!([hex::encode(bytes)])); } res }; // Mine a Monero block let monero_blocks = { use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, scalar::Scalar}; use monero_wallet::{rpc::Rpc, ViewPair, address::Network}; let addr = ViewPair::new(ED25519_BASEPOINT_POINT, Zeroizing::new(Scalar::ONE)) .unwrap() .legacy_address(Network::Mainnet); let rpc = producer_handles.monero(ops).await; let mut res = Vec::with_capacity(count); for _ in 0 .. count { let block = rpc.get_block(rpc.generate_blocks(&addr, 1).await.unwrap().0[0]).await.unwrap(); let mut txs = Vec::with_capacity(block.transactions.len()); for tx in &block.transactions { txs.push(rpc.get_transaction(*tx).await.unwrap()); } res.push((serde_json::json!([hex::encode(block.serialize())]), txs)); } res }; // Relay it to all other nodes // If the producer is 0, the producer variable will be 1 since we already incremented // it // With 4 nodes, this will run 1 .. 4, which is the correct range for receiver in *producer .. (*producer + (handles.len() - 1)) { let receiver = receiver % handles.len(); let handles = &handles[receiver]; { let rpc = handles.bitcoin(ops).await; for block in &bitcoin_blocks { let _: () = rpc.rpc_call("submitblock", block.clone()).await.unwrap(); } } { use monero_wallet::rpc::Rpc; let rpc = handles.monero(ops).await; for (block, txs) in &monero_blocks { // Broadcast the Monero TXs, as they're not simply included with the block for tx in txs { // Ignore any errors since the TX already being present will return an error let _ = rpc.publish_transaction(tx).await; } #[derive(Debug, serde::Deserialize)] struct EmptyResponse {} let _: EmptyResponse = rpc.json_rpc_call("submit_block", Some(block.clone())).await.unwrap(); } } } } // Mine blocks to create mature funds mine_blocks(&handles, &ops, &mut 0, 101).await; // Spawn a background task to mine blocks on Bitcoin/Monero let keep_mining = Arc::new(Mutex::new(true)); { let keep_mining = keep_mining.clone(); let existing = std::panic::take_hook(); std::panic::set_hook(Box::new(move |panic| { // On panic, set keep_mining to false if let Ok(mut keep_mining) = keep_mining.lock() { *keep_mining = false; } else { println!("panic which poisoned keep_mining"); } existing(panic); })); } let mining_task = { let ops = ops.clone(); let handles = handles.clone(); let keep_mining = keep_mining.clone(); tokio::spawn(async move { let start = Instant::now(); let mut producer = 0; while { // Ensure this is deref'd to a bool, not any permutation of the lock let keep_mining: bool = *keep_mining.lock().unwrap(); // Bound execution to 60m keep_mining && (Instant::now().duration_since(start) < Duration::from_secs(60 * 60)) } { // Mine a block every 3s tokio::time::sleep(Duration::from_secs(3)).await; mine_blocks(&handles, &ops, &mut producer, 1).await; } }) }; // Get the generated keys let (bitcoin_key_pair, monero_key_pair) = { let key_pair = { let serai = &serai; move |additional, network| async move { // If this is an additional key pair, it should've completed with the first barring // misc latency, so only sleep up to 5 minutes // If this is the first key pair, wait up to 10 minutes let halt_at = if additional { 5 * 10 } else { 10 * 10 }; let print_at = halt_at / 2; for i in 0 .. halt_at { if let Some(key_pair) = serai .as_of_latest_finalized_block() .await .unwrap() .validator_sets() .keys(ExternalValidatorSet { network, session: Session(0) }) .await .unwrap() { return key_pair; } if i == print_at { println!( "waiting for {}key gen to complete, it has been {} minutes", if additional { "another " } else { "" }, print_at / 10, ); } tokio::time::sleep(Duration::from_secs(6)).await; } panic!( "{}key gen did not complete within {} minutes", if additional { "another " } else { "" }, halt_at / 10, ); } }; ( key_pair(false, ExternalNetworkId::Bitcoin).await, key_pair(true, ExternalNetworkId::Monero).await, ) }; // Because the initial keys only become active when the network's time matches the Serai // time, the Serai time is real yet the network time may be significantly delayed due to // potentially being a median, mine a bunch of blocks now mine_blocks(&handles, &ops, &mut 0, 100).await; // Create a Serai address to receive the sriBTC/sriXMR to let (serai_pair, serai_addr) = { let mut name = [0; 4]; OsRng.fill_bytes(&mut name); let pair = insecure_pair_from_name(&hex::encode(name)); let address = SeraiAddress::from(pair.public()); // Fund the new account to pay for fees let balance = Balance { coin: Coin::Serai, amount: Amount(1_000_000_000) }; serai .publish(&serai.sign( &insecure_pair_from_name("Ferdie"), SeraiCoins::transfer(address, balance), 0, Default::default(), )) .await .unwrap(); (pair, address) }; // Send in BTC { use bitcoin_serai::bitcoin::{ secp256k1::{SECP256K1, SecretKey, Message}, PrivateKey, PublicKey, key::{XOnlyPublicKey, TweakedPublicKey}, sighash::{EcdsaSighashType, SighashCache}, script::{PushBytesBuf, Script, ScriptBuf, Builder}, absolute::LockTime, transaction::{Version, Transaction}, Sequence, Witness, OutPoint, TxIn, Amount, TxOut, Network, Address, }; let private_key = PrivateKey::new(SecretKey::from_slice(&[0x01; 32]).unwrap(), Network::Bitcoin); let public_key = PublicKey::from_private_key(SECP256K1, &private_key); let addr = Address::p2pkh(public_key, Network::Bitcoin); // Use the first block's coinbase let rpc = handles[0].bitcoin(&ops).await; let tx = rpc.get_block(&rpc.get_block_hash(1).await.unwrap()).await.unwrap().txdata.swap_remove(0); #[allow(clippy::inconsistent_digit_grouping)] let mut tx = Transaction { version: Version(2), lock_time: LockTime::ZERO, input: vec![TxIn { previous_output: OutPoint { txid: tx.compute_txid(), vout: 0 }, script_sig: Script::new().into(), sequence: Sequence(u32::MAX), witness: Witness::default(), }], output: vec![ TxOut { value: Amount::from_sat(1_100_000_00), script_pubkey: Address::p2tr_tweaked( TweakedPublicKey::dangerous_assume_tweaked( XOnlyPublicKey::from_slice(&bitcoin_key_pair.1[1 ..]).unwrap(), ), Network::Bitcoin, ) .script_pubkey(), }, TxOut { // change = amount spent - fee value: Amount::from_sat(tx.output[0].value.to_sat() - 1_100_000_00 - 1_000_00), script_pubkey: Address::p2tr_tweaked( TweakedPublicKey::dangerous_assume_tweaked( XOnlyPublicKey::from_slice(&public_key.inner.serialize()[1 ..]).unwrap(), ), Network::Bitcoin, ) .script_pubkey(), }, TxOut { value: Amount::ZERO, script_pubkey: ScriptBuf::new_op_return( PushBytesBuf::try_from(Shorthand::transfer(None, serai_addr).encode()).unwrap(), ), }, ], }; let mut der = SECP256K1 .sign_ecdsa_low_r( &Message::from_digest_slice( SighashCache::new(&tx) .legacy_signature_hash(0, &addr.script_pubkey(), EcdsaSighashType::All.to_u32()) .unwrap() .to_raw_hash() .as_ref(), ) .unwrap(), &private_key.inner, ) .serialize_der() .to_vec(); der.push(1); tx.input[0].script_sig = Builder::new() .push_slice(PushBytesBuf::try_from(der).unwrap()) .push_key(&public_key) .into_script(); rpc.send_raw_transaction(&tx).await.unwrap(); } // Send in XMR { use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, scalar::Scalar}; use monero_wallet::{ io::decompress_point, ringct::RctType, rpc::{FeePriority, Rpc}, address::{Network, AddressType, MoneroAddress}, ViewPair, Scanner, OutputWithDecoys, send::{Change, SignableTransaction}, }; // Grab the first output on the chain let rpc = handles[0].monero(&ops).await; let view_pair = ViewPair::new(ED25519_BASEPOINT_POINT, Zeroizing::new(Scalar::ONE)).unwrap(); let mut scanner = Scanner::new(view_pair.clone()); let output = scanner .scan(rpc.get_scannable_block_by_number(1).await.unwrap()) .unwrap() .additional_timelock_satisfied_by(rpc.get_height().await.unwrap(), 0) .swap_remove(0); let input = OutputWithDecoys::fingerprintable_deterministic_new( &mut OsRng, &rpc, 16, rpc.get_height().await.unwrap(), output.clone(), ) .await .unwrap(); let mut outgoing_view_key = Zeroizing::new([0; 32]); OsRng.fill_bytes(outgoing_view_key.as_mut()); let tx = SignableTransaction::new( RctType::ClsagBulletproofPlus, outgoing_view_key, vec![input], vec![( MoneroAddress::new( Network::Mainnet, AddressType::Featured { guaranteed: true, subaddress: false, payment_id: None }, decompress_point(monero_key_pair.1.to_vec().try_into().unwrap()).unwrap(), ED25519_BASEPOINT_POINT * processor::additional_key::(0).0, ), 1_100_000_000_000, )], Change::new(view_pair.clone(), None), vec![Shorthand::transfer(None, serai_addr).encode()], rpc.get_fee_rate(FeePriority::Unimportant).await.unwrap(), ) .unwrap() .sign(&mut OsRng, &Zeroizing::new(Scalar::ONE)) .unwrap(); rpc.publish_transaction(&tx).await.unwrap() } // Wait for Batch publication // TODO: Merge this block with the above one // (take in a lambda for the specific checks to execute?) { let wait_for_batch = { let serai = &serai; move |additional, network| async move { let halt_at = if additional { 5 * 10 } else { 10 * 10 }; let print_at = halt_at / 2; for i in 0 .. halt_at { if serai .as_of_latest_finalized_block() .await .unwrap() .in_instructions() .last_batch_for_network(network) .await .unwrap() .is_some() { return; } if i == print_at { println!( "waiting for {}batch to complete, it has been {} minutes", if additional { "another " } else { "" }, print_at / 10, ); } tokio::time::sleep(Duration::from_secs(6)).await; } panic!( "{}batch did not complete within {} minutes", if additional { "another " } else { "" }, halt_at / 10, ); } }; wait_for_batch(false, ExternalNetworkId::Bitcoin).await; wait_for_batch(true, ExternalNetworkId::Monero).await; } // TODO: Verify the mints // Create a random Bitcoin/Monero address let bitcoin_addr = { use bitcoin_serai::bitcoin::{key::PublicKey, ScriptBuf}; ScriptBuf::new_p2pkh( &(loop { let mut bytes = [0; 33]; OsRng.fill_bytes(&mut bytes); bytes[0] %= 4; if let Ok(key) = PublicKey::from_slice(&bytes) { break key; } }) .pubkey_hash(), ) }; let (monero_spend, monero_view, monero_addr) = { use curve25519_dalek::{constants::ED25519_BASEPOINT_TABLE, scalar::Scalar}; let spend = ED25519_BASEPOINT_TABLE * &Scalar::random(&mut OsRng); let view = Scalar::random(&mut OsRng); use monero_wallet::address::{Network, AddressType, MoneroAddress}; let addr = MoneroAddress::new( Network::Mainnet, AddressType::Legacy, spend, ED25519_BASEPOINT_TABLE * &view, ); (spend, view, addr) }; // Get the current blocks let mut start_bitcoin_block = handles[0].bitcoin(&ops).await.get_latest_block_number().await.unwrap(); let mut start_monero_block = { use monero_wallet::rpc::Rpc; handles[0].monero(&ops).await.get_height().await.unwrap() }; // Burn the sriBTC/sriXMR { let burn = { let serai = &serai; let serai_pair = &serai_pair; move |nonce, coin, amount, address| async move { let out_instruction = OutInstructionWithBalance { balance: ExternalBalance { coin, amount: Amount(amount) }, instruction: OutInstruction { address, data: None }, }; serai .publish(&serai.sign( serai_pair, SeraiCoins::burn_with_instruction(out_instruction), nonce, Default::default(), )) .await .unwrap(); } }; #[allow(clippy::inconsistent_digit_grouping)] burn( 0, ExternalCoin::Bitcoin, 1_000_000_00, ExternalAddress::new( serai_client::networks::bitcoin::Address::new(bitcoin_addr.clone()).unwrap().into(), ) .unwrap(), ) .await; burn( 1, ExternalCoin::Monero, 1_000_000_000_000, ExternalAddress::new( serai_client::networks::monero::Address::new(monero_addr).unwrap().into(), ) .unwrap(), ) .await; } // TODO: Verify the burns // Verify the received Bitcoin TX #[allow(clippy::inconsistent_digit_grouping)] { let rpc = handles[0].bitcoin(&ops).await; // Check for up to 15 minutes let mut found = false; let mut i = 0; while i < (15 * 6) { if let Ok(hash) = rpc.get_block_hash(start_bitcoin_block).await { let block = rpc.get_block(&hash).await.unwrap(); start_bitcoin_block += 1; if block.txdata.len() > 1 { assert_eq!(block.txdata.len(), 2); assert_eq!(block.txdata[1].output.len(), 2); let received_output = block.txdata[1] .output .iter() .find(|output| output.script_pubkey == bitcoin_addr) .unwrap(); let tx_fee = 1_100_000_00 - block.txdata[1].output.iter().map(|output| output.value.to_sat()).sum::(); assert_eq!(received_output.value.to_sat(), 1_000_000_00 - tx_fee); found = true; } } else { i += 1; tokio::time::sleep(Duration::from_secs(10)).await; } } if !found { panic!("couldn't find the expected Bitcoin transaction within 15 minutes"); } } // Verify the received Monero TX { use monero_wallet::{transaction::Transaction, rpc::Rpc, ViewPair, Scanner}; let rpc = handles[0].monero(&ops).await; let mut scanner = Scanner::new(ViewPair::new(monero_spend, Zeroizing::new(monero_view)).unwrap()); // Check for up to 5 minutes let mut found = false; let mut i = 0; while i < (5 * 6) { if let Ok(block) = rpc.get_block_by_number(start_monero_block).await { start_monero_block += 1; let outputs = scanner .scan(rpc.get_scannable_block(block.clone()).await.unwrap()) .unwrap() .not_additionally_locked(); if !outputs.is_empty() { assert_eq!(outputs.len(), 1); assert_eq!(block.transactions.len(), 1); let tx = rpc.get_transaction(block.transactions[0]).await.unwrap(); let tx_fee = match &tx { Transaction::V2 { proofs: Some(proofs), .. } => proofs.base.fee, _ => panic!("fetched TX wasn't a signed V2 TX"), }; assert_eq!(outputs[0].commitment().amount, 1_000_000_000_000 - tx_fee); found = true; } } else { i += 1; tokio::time::sleep(Duration::from_secs(10)).await; } } if !found { panic!("couldn't find the expected Monero transaction within 5 minutes"); } } *keep_mining.lock().unwrap() = false; mining_task.await.unwrap(); }) .await; } ================================================ FILE: tests/full-stack/src/tests/mod.rs ================================================ use core::future::Future; use std::{sync::OnceLock, collections::HashMap}; use tokio::sync::Mutex; use serai_client::primitives::ExternalNetworkId; use dockertest::{ LogAction, LogPolicy, LogSource, LogOptions, StartPolicy, TestBodySpecification, DockerOperations, DockerTest, }; use serai_docker_tests::fresh_logs_folder; use serai_processor_tests::{network_instance, processor_instance}; use serai_message_queue_tests::instance as message_queue_instance; use serai_coordinator_tests::{coordinator_instance, serai_composition}; use crate::*; mod mint_and_burn; pub(crate) const VALIDATORS: usize = 4; // pub(crate) const THRESHOLD: usize = ((VALIDATORS * 2) / 3) + 1; static UNIQUE_ID: OnceLock> = OnceLock::new(); #[async_trait::async_trait] pub(crate) trait TestBody: 'static + Send + Sync { async fn body(&self, ops: DockerOperations, handles: Vec); } #[async_trait::async_trait] impl) -> F> TestBody for TB { async fn body(&self, ops: DockerOperations, handles: Vec) { (self)(ops, handles).await; } } pub(crate) async fn new_test(test_body: impl TestBody) { let mut unique_id_lock = UNIQUE_ID.get_or_init(|| Mutex::new(0)).lock().await; let mut all_handles = vec![]; let mut test = DockerTest::new().with_network(dockertest::Network::Isolated); let mut coordinator_compositions = vec![]; for i in 0 .. VALIDATORS { let name = match i { 0 => "Alice", 1 => "Bob", 2 => "Charlie", 3 => "Dave", 4 => "Eve", 5 => "Ferdie", _ => panic!("needed a 7th name for a serai node"), }; let (coord_key, message_queue_keys, message_queue_composition) = message_queue_instance(); let (bitcoin_composition, bitcoin_port) = network_instance(ExternalNetworkId::Bitcoin); let mut bitcoin_processor_composition = processor_instance( ExternalNetworkId::Bitcoin, bitcoin_port, message_queue_keys[&ExternalNetworkId::Bitcoin], ); assert_eq!(bitcoin_processor_composition.len(), 1); let bitcoin_processor_composition = bitcoin_processor_composition.swap_remove(0); let (monero_composition, monero_port) = network_instance(ExternalNetworkId::Monero); let mut monero_processor_composition = processor_instance( ExternalNetworkId::Monero, monero_port, message_queue_keys[&ExternalNetworkId::Monero], ); assert_eq!(monero_processor_composition.len(), 1); let monero_processor_composition = monero_processor_composition.swap_remove(0); let coordinator_composition = coordinator_instance(name, coord_key); let serai_composition = serai_composition(name, false); // Give every item in this stack a unique ID // Uses a Mutex as we can't generate a 8-byte random ID without hitting hostname length limits let (first, unique_id) = { let first = *unique_id_lock == 0; let unique_id = *unique_id_lock; *unique_id_lock += 1; (first, unique_id) }; let logs_path = fresh_logs_folder(first, "full-stack"); let mut compositions = HashMap::new(); let mut handles = HashMap::new(); for (name, composition) in [ ("message_queue", message_queue_composition), ("bitcoin", bitcoin_composition), ("bitcoin_processor", bitcoin_processor_composition), ("monero", monero_composition), ("monero_processor", monero_processor_composition), ("coordinator", coordinator_composition), ("serai", serai_composition), ] { let handle = format!("full_stack-{name}-{unique_id}"); compositions.insert( name, composition .set_start_policy(StartPolicy::Strict) .set_handle(handle.clone()) .set_log_options(Some(LogOptions { action: if std::env::var("GITHUB_CI") == Ok("true".to_string()) { LogAction::Forward } else { LogAction::ForwardToFile { path: logs_path.clone() } }, policy: LogPolicy::Always, source: LogSource::Both, })), ); handles.insert(name, handle); } let handles = Handles { message_queue: handles.remove("message_queue").unwrap(), bitcoin: (handles.remove("bitcoin").unwrap(), bitcoin_port), bitcoin_processor: handles.remove("bitcoin_processor").unwrap(), monero: (handles.remove("monero").unwrap(), monero_port), monero_processor: handles.remove("monero_processor").unwrap(), serai: handles.remove("serai").unwrap(), }; { let bitcoin_processor_composition = compositions.get_mut("bitcoin_processor").unwrap(); bitcoin_processor_composition .inject_container_name(handles.message_queue.clone(), "MESSAGE_QUEUE_RPC"); bitcoin_processor_composition .inject_container_name(handles.bitcoin.0.clone(), "NETWORK_RPC_HOSTNAME"); } { let monero_processor_composition = compositions.get_mut("monero_processor").unwrap(); monero_processor_composition .inject_container_name(handles.message_queue.clone(), "MESSAGE_QUEUE_RPC"); monero_processor_composition .inject_container_name(handles.monero.0.clone(), "NETWORK_RPC_HOSTNAME"); } coordinator_compositions.push(compositions.remove("coordinator").unwrap()); all_handles.push(handles); for (_, composition) in compositions { test.provide_container(composition); } } struct Context { pending_coordinator_compositions: Mutex>, handles: Vec, test_body: Box, } static CONTEXT: OnceLock>> = OnceLock::new(); *CONTEXT.get_or_init(|| Mutex::new(None)).lock().await = Some(Context { pending_coordinator_compositions: Mutex::new(coordinator_compositions), handles: all_handles, test_body: Box::new(test_body), }); // The DockerOperations from the first invocation, containing the Message Queue servers and the // Serai nodes. static OUTER_OPS: OnceLock>> = OnceLock::new(); // Reset OUTER_OPS *OUTER_OPS.get_or_init(|| Mutex::new(None)).lock().await = None; // Spawns a coordinator, if one has yet to be spawned, or else runs the test. pub(crate) fn spawn_coordinator_or_run_test( inner_ops: DockerOperations, ) -> core::pin::Pin>> { Box::pin(async { // If the outer operations have yet to be set, these *are* the outer operations let outer_ops = OUTER_OPS.get().unwrap(); if outer_ops.lock().await.is_none() { *outer_ops.lock().await = Some(inner_ops); } let context_lock = CONTEXT.get().unwrap().lock().await; let Context { pending_coordinator_compositions, handles, test_body } = context_lock.as_ref().unwrap(); // Check if there is a coordinator left let maybe_coordinator = { let mut remaining = pending_coordinator_compositions.lock().await; let maybe_coordinator = if !remaining.is_empty() { let handles = handles[handles.len() - remaining.len()].clone(); let composition = remaining.remove(0); Some((composition, handles)) } else { None }; drop(remaining); maybe_coordinator }; if let Some((mut composition, handles)) = maybe_coordinator { let network = { let outer_ops = outer_ops.lock().await; let outer_ops = outer_ops.as_ref().unwrap(); // Spawn it by building another DockerTest which recursively calls this function // TODO: Spawn this outside of DockerTest so we can remove the recursion let serai_container = outer_ops.handle(&handles.serai); composition.modify_env("SERAI_HOSTNAME", serai_container.ip()); let message_queue_container = outer_ops.handle(&handles.message_queue); composition.modify_env("MESSAGE_QUEUE_RPC", message_queue_container.ip()); format!("container:{}", serai_container.name()) }; let mut test = DockerTest::new().with_network(dockertest::Network::External(network)); test.provide_container(composition); drop(context_lock); test.run_async(spawn_coordinator_or_run_test).await; } else { let outer_ops = outer_ops.lock().await.take().unwrap(); test_body.body(outer_ops, handles.clone()).await; } }) } test.run_async(spawn_coordinator_or_run_test).await; } ================================================ FILE: tests/message-queue/Cargo.toml ================================================ [package] name = "serai-message-queue-tests" version = "0.1.0" description = "Tests for Serai's Message Queue" license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/tests/message-queue" authors = ["Luke Parker "] keywords = [] edition = "2021" publish = false [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] hex = "0.4" zeroize = { version = "1", default-features = false } rand_core = { version = "0.6", default-features = false, features = ["getrandom"] } dalek-ff-group = { path = "../../crypto/dalek-ff-group", default-features = false } ciphersuite = { path = "../../crypto/ciphersuite", default-features = false } serai-primitives = { path = "../../substrate/primitives" } serai-message-queue = { path = "../../message-queue" } tokio = { version = "1", features = ["time"] } dockertest = "0.5" serai-docker-tests = { path = "../docker" } ================================================ FILE: tests/message-queue/LICENSE ================================================ AGPL-3.0-only license Copyright (c) 2023 Luke Parker This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License Version 3 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see . ================================================ FILE: tests/message-queue/src/lib.rs ================================================ use std::collections::HashMap; use rand_core::OsRng; use dalek_ff_group::Ristretto; use ciphersuite::{ group::{ff::Field, GroupEncoding}, Ciphersuite, }; use serai_primitives::{ExternalNetworkId, EXTERNAL_NETWORKS}; use dockertest::{ PullPolicy, Image, LogAction, LogPolicy, LogSource, LogOptions, TestBodySpecification, }; pub type MessageQueuePrivateKey = ::F; pub fn instance() -> ( MessageQueuePrivateKey, HashMap, TestBodySpecification, ) { serai_docker_tests::build("message-queue".to_string()); let coord_key = ::F::random(&mut OsRng); let priv_keys = EXTERNAL_NETWORKS .into_iter() .map(|n| (n, ::F::random(&mut OsRng))) .collect::>(); let composition = TestBodySpecification::with_image( Image::with_repository("serai-dev-message-queue").pull_policy(PullPolicy::Never), ) .set_log_options(Some(LogOptions { action: LogAction::Forward, policy: LogPolicy::Always, source: LogSource::Both, })) .replace_env( [ ("COORDINATOR_KEY".to_string(), hex::encode((Ristretto::generator() * coord_key).to_bytes())), ( "BITCOIN_KEY".to_string(), hex::encode((Ristretto::generator() * priv_keys[&ExternalNetworkId::Bitcoin]).to_bytes()), ), ( "ETHEREUM_KEY".to_string(), hex::encode((Ristretto::generator() * priv_keys[&ExternalNetworkId::Ethereum]).to_bytes()), ), ( "MONERO_KEY".to_string(), hex::encode((Ristretto::generator() * priv_keys[&ExternalNetworkId::Monero]).to_bytes()), ), ("DB_PATH".to_string(), "./message-queue-db".to_string()), ("RUST_LOG".to_string(), "serai_message_queue=trace,".to_string()), ] .into(), ) .set_publish_all_ports(true); (coord_key, priv_keys, composition) } #[test] fn basic_functionality() { use zeroize::Zeroizing; use dockertest::DockerTest; use serai_message_queue::{Service, Metadata, client::MessageQueue}; let mut test = DockerTest::new().with_network(dockertest::Network::Isolated); let (coord_key, priv_keys, composition) = instance(); test.provide_container(composition); test.run(|ops| async move { tokio::time::timeout(core::time::Duration::from_secs(60), async move { // Sleep for a second for the message-queue to boot // It isn't an error to start immediately, it just silences an error tokio::time::sleep(core::time::Duration::from_secs(1)).await; let rpc = ops.handle("serai-dev-message-queue").host_port(2287).unwrap(); let rpc = rpc.0.to_string() + ":" + &rpc.1.to_string(); // Queue some messages let coordinator = MessageQueue::new(Service::Coordinator, rpc.clone(), Zeroizing::new(coord_key)); coordinator .queue( Metadata { from: Service::Coordinator, to: Service::Processor(ExternalNetworkId::Bitcoin), intent: b"intent".to_vec(), }, b"Hello, World!".to_vec(), ) .await; // Queue this twice, which message-queue should de-duplicate for _ in 0 .. 2 { coordinator .queue( Metadata { from: Service::Coordinator, to: Service::Processor(ExternalNetworkId::Bitcoin), intent: b"intent 2".to_vec(), }, b"Hello, World, again!".to_vec(), ) .await; } // Successfully get it let bitcoin = MessageQueue::new( Service::Processor(ExternalNetworkId::Bitcoin), rpc.clone(), Zeroizing::new(priv_keys[&ExternalNetworkId::Bitcoin]), ); let msg = bitcoin.next(Service::Coordinator).await; assert_eq!(msg.from, Service::Coordinator); assert_eq!(msg.id, 0); assert_eq!(&msg.msg, b"Hello, World!"); // If we don't ack it, it should continue to be returned assert_eq!(msg, bitcoin.next(Service::Coordinator).await); // Acknowledging it should yield the next message bitcoin.ack(Service::Coordinator, 0).await; let next_msg = bitcoin.next(Service::Coordinator).await; assert!(msg != next_msg); assert_eq!(next_msg.from, Service::Coordinator); assert_eq!(next_msg.id, 1); assert_eq!(&next_msg.msg, b"Hello, World, again!"); bitcoin.ack(Service::Coordinator, 1).await; // No further messages should be available tokio::time::timeout(core::time::Duration::from_secs(10), bitcoin.next(Service::Coordinator)) .await .unwrap_err(); // Queueing to a distinct processor should work, with a unique ID coordinator .queue( Metadata { from: Service::Coordinator, to: Service::Processor(ExternalNetworkId::Monero), // Intents should be per-from-to, making this valid intent: b"intent".to_vec(), }, b"Hello, World!".to_vec(), ) .await; let monero = MessageQueue::new( Service::Processor(ExternalNetworkId::Monero), rpc, Zeroizing::new(priv_keys[&ExternalNetworkId::Monero]), ); assert_eq!(monero.next(Service::Coordinator).await.id, 0); monero.ack(Service::Coordinator, 0).await; tokio::time::timeout(core::time::Duration::from_secs(10), monero.next(Service::Coordinator)) .await .unwrap_err(); }) .await .unwrap(); }); } ================================================ FILE: tests/no-std/Cargo.toml ================================================ [package] name = "serai-no-std-tests" version = "0.1.0" description = "A crate to test no-std builds of Serai crates work" license = "MIT" repository = "https://github.com/kayabaNerve/serai/tree/develop/tests/no-std" authors = ["Luke Parker "] keywords = ["nostd", "no_std", "alloc"] edition = "2021" publish = false [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] flexible-transcript = { path = "../../crypto/transcript", default-features = false, features = ["recommended", "merlin"] } dalek-ff-group = { path = "../../crypto/dalek-ff-group", default-features = false, features = ["alloc"] } minimal-ed448 = { path = "../../crypto/ed448", default-features = false, features = ["alloc"] } ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["alloc"] } ciphersuite-kp256 = { path = "../../crypto/ciphersuite/kp256", default-features = false, features = ["alloc"] } multiexp = { path = "../../crypto/multiexp", default-features = false, features = ["batch"] } dleq = { path = "../../crypto/dleq", default-features = false } schnorr-signatures = { path = "../../crypto/schnorr", default-features = false } dkg = { path = "../../crypto/dkg", default-features = false } dkg-recovery = { path = "../../crypto/dkg/recovery", default-features = false } dkg-dealer = { path = "../../crypto/dkg/dealer", default-features = false } dkg-musig = { path = "../../crypto/dkg/musig", default-features = false } # modular-frost = { path = "../../crypto/frost", default-features = false } # frost-schnorrkel = { path = "../../crypto/schnorrkel", default-features = false } bitcoin-serai = { path = "../../networks/bitcoin", default-features = false, features = ["hazmat"] } ================================================ FILE: tests/no-std/LICENSE ================================================ MIT License Copyright (c) 2023 Luke Parker Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: tests/no-std/README.md ================================================ # no-std tests A crate usable to test building various Serai crates in a no-std environment. ================================================ FILE: tests/no-std/src/lib.rs ================================================ #![no_std] pub use flexible_transcript; pub use dalek_ff_group; pub use minimal_ed448; pub use ciphersuite; pub use ciphersuite_kp256; pub use multiexp; pub use dleq; pub use schnorr_signatures; pub use dkg; pub use dkg_recovery; pub use dkg_dealer; pub use dkg_musig; /* pub use modular_frost; pub use frost_schnorrkel; */ pub use bitcoin_serai; ================================================ FILE: tests/processor/Cargo.toml ================================================ [package] name = "serai-processor-tests" version = "0.1.0" description = "Tests for Serai's Processor" license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/tests/processor" authors = ["Luke Parker "] keywords = [] edition = "2021" publish = false [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] hex = "0.4" zeroize = { version = "1", default-features = false } rand_core = { version = "0.6", default-features = false, features = ["getrandom"] } curve25519-dalek = "4" dalek-ff-group = { path = "../../crypto/dalek-ff-group", default-features = false } ciphersuite = { path = "../../crypto/ciphersuite", default-features = false } ciphersuite-kp256 = { path = "../../crypto/ciphersuite/kp256", default-features = false } dkg = { path = "../../crypto/dkg", default-features = false } bitcoin-serai = { path = "../../networks/bitcoin" } k256 = "0.13" ethereum-serai = { path = "../../networks/ethereum" } monero-simple-request-rpc = { git = "https://github.com/monero-oxide/monero-oxide", rev = "32e6b5fe5ba9e1ea3e68da882550005122a11d22" } monero-wallet = { git = "https://github.com/monero-oxide/monero-oxide", rev = "32e6b5fe5ba9e1ea3e68da882550005122a11d22" } messages = { package = "serai-processor-messages", path = "../../processor/messages" } scale = { package = "parity-scale-codec", version = "3" } serai-client = { path = "../../substrate/client" } serai-db = { path = "../../common/db", default-features = false } serai-message-queue = { path = "../../message-queue" } borsh = { version = "1", features = ["de_strict_order"] } serde_json = { version = "1", default-features = false } tokio = { version = "1", features = ["time"] } processor = { package = "serai-processor", path = "../../processor", features = ["bitcoin", "ethereum", "monero"] } dockertest = "0.5" serai-docker-tests = { path = "../docker" } serai-message-queue-tests = { path = "../message-queue" } ================================================ FILE: tests/processor/LICENSE ================================================ AGPL-3.0-only license Copyright (c) 2023 Luke Parker This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License Version 3 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see . ================================================ FILE: tests/processor/src/lib.rs ================================================ #![allow(clippy::needless_pass_by_ref_mut)] // False positives use std::sync::{OnceLock, Mutex}; use zeroize::Zeroizing; use rand_core::{RngCore, OsRng}; use dalek_ff_group::Ristretto; use ciphersuite::{group::ff::PrimeField, Ciphersuite}; use serai_client::primitives::ExternalNetworkId; use messages::{ProcessorMessage, CoordinatorMessage}; use serai_message_queue::{Service, Metadata, client::MessageQueue}; use dockertest::{ PullPolicy, Image, LogAction, LogPolicy, LogSource, LogOptions, StartPolicy, TestBodySpecification, DockerOperations, }; mod networks; pub use networks::*; #[cfg(test)] mod tests; static UNIQUE_ID: OnceLock> = OnceLock::new(); pub fn processor_instance( network: ExternalNetworkId, port: u32, message_queue_key: ::F, ) -> Vec { let mut entropy = [0; 32]; OsRng.fill_bytes(&mut entropy); let network_str = match network { ExternalNetworkId::Bitcoin => "bitcoin", ExternalNetworkId::Ethereum => "ethereum", ExternalNetworkId::Monero => "monero", }; let image = format!("{network_str}-processor"); serai_docker_tests::build(image.clone()); let mut res = vec![TestBodySpecification::with_image( Image::with_repository(format!("serai-dev-{image}")).pull_policy(PullPolicy::Never), ) .replace_env( [ ("MESSAGE_QUEUE_KEY".to_string(), hex::encode(message_queue_key.to_repr())), ("ENTROPY".to_string(), hex::encode(entropy)), ("NETWORK".to_string(), network_str.to_string()), ("NETWORK_RPC_LOGIN".to_string(), format!("{RPC_USER}:{RPC_PASS}")), ("NETWORK_RPC_PORT".to_string(), port.to_string()), ("DB_PATH".to_string(), "./processor-db".to_string()), ("RUST_LOG".to_string(), "serai_processor=trace,".to_string()), ] .into(), )]; if network == ExternalNetworkId::Ethereum { serai_docker_tests::build("ethereum-relayer".to_string()); res.push( TestBodySpecification::with_image( Image::with_repository("serai-dev-ethereum-relayer".to_string()) .pull_policy(PullPolicy::Never), ) .replace_env( [ ("DB_PATH".to_string(), "./ethereum-relayer-db".to_string()), ("RUST_LOG".to_string(), "serai_ethereum_relayer=trace,".to_string()), ] .into(), ) .set_publish_all_ports(true), ); } res } pub type Handles = (String, String, String, String); pub fn processor_stack( network: ExternalNetworkId, network_hostname_override: Option, ) -> (Handles, ::F, Vec) { let (network_composition, network_rpc_port) = network_instance(network); let (coord_key, message_queue_keys, message_queue_composition) = serai_message_queue_tests::instance(); let mut processor_compositions = processor_instance(network, network_rpc_port, message_queue_keys[&network]); // Give every item in this stack a unique ID // Uses a Mutex as we can't generate a 8-byte random ID without hitting hostname length limits let unique_id = { let unique_id_mutex = UNIQUE_ID.get_or_init(|| Mutex::new(0)); let mut unique_id_lock = unique_id_mutex.lock().unwrap(); let unique_id = *unique_id_lock; *unique_id_lock += 1; unique_id }; let mut compositions = vec![]; let mut handles = vec![]; for (name, composition) in [ Some(( match network { ExternalNetworkId::Bitcoin => "bitcoin", ExternalNetworkId::Ethereum => "ethereum", ExternalNetworkId::Monero => "monero", }, network_composition, )), Some(("message_queue", message_queue_composition)), Some(("processor", processor_compositions.remove(0))), processor_compositions.pop().map(|composition| ("relayer", composition)), ] .into_iter() .flatten() { let handle = format!("processor-{name}-{unique_id}"); compositions.push( composition.set_start_policy(StartPolicy::Strict).set_handle(handle.clone()).set_log_options( Some(LogOptions { action: LogAction::Forward, policy: if handle.contains("-processor-") { LogPolicy::Always } else { LogPolicy::OnError }, source: LogSource::Both, }), ), ); handles.push(handle); } let processor_composition = compositions.get_mut(2).unwrap(); processor_composition.inject_container_name( network_hostname_override.unwrap_or_else(|| handles[0].clone()), "NETWORK_RPC_HOSTNAME", ); if let Some(hostname) = handles.get(3) { processor_composition.inject_container_name(hostname, "ETHEREUM_RELAYER_HOSTNAME"); processor_composition.modify_env("ETHEREUM_RELAYER_PORT", "20830"); } processor_composition.inject_container_name(handles[1].clone(), "MESSAGE_QUEUE_RPC"); ( ( handles[0].clone(), handles[1].clone(), handles[2].clone(), handles.get(3).cloned().unwrap_or(String::new()), ), coord_key, compositions, ) } pub struct Coordinator { network: ExternalNetworkId, network_handle: String, #[allow(unused)] message_queue_handle: String, #[allow(unused)] processor_handle: String, relayer_handle: String, next_send_id: u64, next_recv_id: u64, queue: MessageQueue, } impl Coordinator { pub fn new( network: ExternalNetworkId, ops: &DockerOperations, handles: Handles, coord_key: ::F, ) -> Coordinator { let rpc = ops.handle(&handles.1).host_port(2287).unwrap(); let rpc = rpc.0.to_string() + ":" + &rpc.1.to_string(); let res = Coordinator { network, network_handle: handles.0, message_queue_handle: handles.1, processor_handle: handles.2, relayer_handle: handles.3, next_send_id: 0, next_recv_id: 0, queue: MessageQueue::new(Service::Coordinator, rpc, Zeroizing::new(coord_key)), }; // Sleep for up to a minute in case the external network's RPC has yet to start // Gets an async handle to block on since this function plays nicer when it isn't itself async { let ops = ops.clone(); let network_handle = res.network_handle.clone(); std::thread::spawn(move || { let runtime = tokio::runtime::Runtime::new().unwrap(); let handle = runtime.handle(); let _async = handle.enter(); let rpc_url = network_rpc(network, &ops, &network_handle); let mut iters = 0; while iters < 60 { match network { ExternalNetworkId::Bitcoin => { use bitcoin_serai::rpc::Rpc; // Bitcoin's Rpc::new will test the connection if handle.block_on(Rpc::new(rpc_url.clone())).is_ok() { break; } } ExternalNetworkId::Ethereum => { use std::sync::Arc; use ethereum_serai::{ alloy::{ simple_request_transport::SimpleRequest, rpc_client::ClientBuilder, provider::{Provider, RootProvider}, network::Ethereum, }, deployer::Deployer, }; let provider = Arc::new(RootProvider::<_, Ethereum>::new( ClientBuilder::default().transport(SimpleRequest::new(rpc_url.clone()), true), )); if handle .block_on(provider.raw_request::<_, ()>("evm_setAutomine".into(), [false])) .is_ok() { handle.block_on(async { // Deploy the deployer let tx = Deployer::deployment_tx(); let signer = tx.recover_signer().unwrap(); let (tx, sig, _) = tx.into_parts(); provider .raw_request::<_, ()>( "anvil_setBalance".into(), [signer.to_string(), (u128::from(tx.gas_limit) * tx.gas_price).to_string()], ) .await .unwrap(); let mut bytes = vec![]; tx.encode_with_signature_fields(&sig, &mut bytes); let _ = provider.send_raw_transaction(&bytes).await.unwrap(); provider.raw_request::<_, ()>("anvil_mine".into(), [96]).await.unwrap(); let _ = Deployer::new(provider.clone()).await.unwrap().unwrap(); // Sleep until the actual time is ahead of whatever time is in the epoch we just // mined tokio::time::sleep(core::time::Duration::from_secs(30)).await; }); break; } } ExternalNetworkId::Monero => { use monero_simple_request_rpc::SimpleRequestRpc; use monero_wallet::rpc::Rpc; // Monero's won't, so call get_height if handle .block_on(SimpleRequestRpc::new(rpc_url.clone())) .ok() .and_then(|rpc| handle.block_on(rpc.get_height()).ok()) .is_some() { break; } } } println!("external network RPC has yet to boot, waiting 1 sec, attempt {iters}"); handle.block_on(tokio::time::sleep(core::time::Duration::from_secs(1))); iters += 1; } if iters == 60 { panic!("couldn't connect to external network {network:?} after 60s"); } }) .join() .unwrap(); } res } /// Send a message to a processor as its coordinator. pub async fn send_message(&mut self, msg: impl Into) { let msg: CoordinatorMessage = msg.into(); self .queue .queue( Metadata { from: Service::Coordinator, to: Service::Processor(self.network), intent: msg.intent(), }, borsh::to_vec(&msg).unwrap(), ) .await; self.next_send_id += 1; } /// Receive a message from a processor as its coordinator. pub async fn recv_message(&mut self) -> ProcessorMessage { let msg = tokio::time::timeout( core::time::Duration::from_secs(20), self.queue.next(Service::Processor(self.network)), ) .await .unwrap(); assert_eq!(msg.from, Service::Processor(self.network)); assert_eq!(msg.id, self.next_recv_id); self.queue.ack(Service::Processor(self.network), msg.id).await; self.next_recv_id += 1; borsh::from_slice(&msg.msg).unwrap() } pub async fn add_block(&self, ops: &DockerOperations) -> ([u8; 32], Vec) { let rpc_url = network_rpc(self.network, ops, &self.network_handle); match self.network { ExternalNetworkId::Bitcoin => { use bitcoin_serai::{ bitcoin::{consensus::Encodable, network::Network, Script, Address}, rpc::Rpc, }; // Mine a block let rpc = Rpc::new(rpc_url).await.expect("couldn't connect to the Bitcoin RPC"); rpc .rpc_call::>( "generatetoaddress", serde_json::json!([1, Address::p2sh(Script::new(), Network::Regtest).unwrap()]), ) .await .unwrap(); // Get it so we can return it let hash = rpc.get_block_hash(rpc.get_latest_block_number().await.unwrap()).await.unwrap(); let block = rpc.get_block(&hash).await.unwrap(); let mut block_buf = vec![]; block.consensus_encode(&mut block_buf).unwrap(); (hash, block_buf) } ExternalNetworkId::Ethereum => { use ethereum_serai::alloy::{ simple_request_transport::SimpleRequest, rpc_types::{BlockTransactionsKind, BlockNumberOrTag}, rpc_client::ClientBuilder, provider::{Provider, RootProvider}, network::Ethereum, }; let provider = RootProvider::<_, Ethereum>::new( ClientBuilder::default().transport(SimpleRequest::new(rpc_url.clone()), true), ); let start = provider .get_block(BlockNumberOrTag::Latest.into(), BlockTransactionsKind::Hashes) .await .unwrap() .unwrap() .header .number; // We mine 96 blocks to mine one epoch, then cause its finalization provider.raw_request::<_, ()>("anvil_mine".into(), [96]).await.unwrap(); let end_of_epoch = start + 31; let hash = provider .get_block(BlockNumberOrTag::Number(end_of_epoch).into(), BlockTransactionsKind::Hashes) .await .unwrap() .unwrap() .header .hash; let state = provider .raw_request::<_, String>("anvil_dumpState".into(), ()) .await .unwrap() .into_bytes(); (hash.into(), state) } ExternalNetworkId::Monero => { use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, scalar::Scalar}; use monero_simple_request_rpc::SimpleRequestRpc; use monero_wallet::{rpc::Rpc, address::Network, ViewPair}; let rpc = SimpleRequestRpc::new(rpc_url).await.expect("couldn't connect to the Monero RPC"); rpc .generate_blocks( &ViewPair::new(ED25519_BASEPOINT_POINT, Zeroizing::new(Scalar::ONE)) .unwrap() .legacy_address(Network::Mainnet), 1, ) .await .unwrap(); let hash = rpc.get_block_hash(rpc.get_height().await.unwrap() - 1).await.unwrap(); (hash, rpc.get_block(hash).await.unwrap().serialize()) } } } pub async fn sync(&self, ops: &DockerOperations, others: &[Coordinator]) { let rpc_url = network_rpc(self.network, ops, &self.network_handle); match self.network { ExternalNetworkId::Bitcoin => { use bitcoin_serai::{bitcoin::consensus::Encodable, rpc::Rpc}; let rpc = Rpc::new(rpc_url).await.expect("couldn't connect to the Bitcoin RPC"); let to = rpc.get_latest_block_number().await.unwrap(); for coordinator in others { let other_rpc = Rpc::new(network_rpc(self.network, ops, &coordinator.network_handle)) .await .expect("couldn't connect to the Bitcoin RPC"); let from = other_rpc.get_latest_block_number().await.unwrap() + 1; for b in from ..= to { let mut buf = vec![]; rpc .get_block(&rpc.get_block_hash(b).await.unwrap()) .await .unwrap() .consensus_encode(&mut buf) .unwrap(); let res: Option = other_rpc .rpc_call("submitblock", serde_json::json!([hex::encode(buf)])) .await .unwrap(); if let Some(err) = res { panic!("submitblock failed: {err}"); } } } } ExternalNetworkId::Ethereum => { use ethereum_serai::alloy::{ simple_request_transport::SimpleRequest, rpc_types::{BlockTransactionsKind, BlockNumberOrTag}, rpc_client::ClientBuilder, provider::{Provider, RootProvider}, network::Ethereum, }; let (expected_number, state) = { let provider = RootProvider::<_, Ethereum>::new( ClientBuilder::default().transport(SimpleRequest::new(rpc_url.clone()), true), ); let expected_number = provider .get_block(BlockNumberOrTag::Latest.into(), BlockTransactionsKind::Hashes) .await .unwrap() .unwrap() .header .number; ( expected_number, provider.raw_request::<_, String>("anvil_dumpState".into(), ()).await.unwrap(), ) }; for coordinator in others { let rpc_url = network_rpc(coordinator.network, ops, &coordinator.network_handle); let provider = RootProvider::<_, Ethereum>::new( ClientBuilder::default().transport(SimpleRequest::new(rpc_url.clone()), true), ); assert!(provider .raw_request::<_, bool>("anvil_loadState".into(), &[&state]) .await .unwrap()); let new_number = provider .get_block(BlockNumberOrTag::Latest.into(), BlockTransactionsKind::Hashes) .await .unwrap() .unwrap() .header .number; // TODO: https://github.com/foundry-rs/foundry/issues/7955 let _ = expected_number; let _ = new_number; //assert_eq!(expected_number, new_number); } } ExternalNetworkId::Monero => { use monero_simple_request_rpc::SimpleRequestRpc; use monero_wallet::rpc::Rpc; let rpc = SimpleRequestRpc::new(rpc_url).await.expect("couldn't connect to the Monero RPC"); let to = rpc.get_height().await.unwrap(); for coordinator in others { let other_rpc = SimpleRequestRpc::new(network_rpc( coordinator.network, ops, &coordinator.network_handle, )) .await .expect("couldn't connect to the Monero RPC"); let from = other_rpc.get_height().await.unwrap(); for b in from .. to { let block = rpc.get_block(rpc.get_block_hash(b).await.unwrap()).await.unwrap().serialize(); let res: serde_json::Value = other_rpc .json_rpc_call("submit_block", Some(serde_json::json!([hex::encode(block)]))) .await .unwrap(); let err = res.get("error"); if err.is_some() && (err.unwrap() != &serde_json::Value::Null) { panic!("failed to submit Monero block: {res}"); } } } } } } pub async fn publish_transaction(&self, ops: &DockerOperations, tx: &[u8]) { let rpc_url = network_rpc(self.network, ops, &self.network_handle); match self.network { ExternalNetworkId::Bitcoin => { use bitcoin_serai::{ bitcoin::{consensus::Decodable, Transaction}, rpc::Rpc, }; let rpc = Rpc::new(rpc_url).await.expect("couldn't connect to the coordinator's Bitcoin RPC"); rpc.send_raw_transaction(&Transaction::consensus_decode(&mut &*tx).unwrap()).await.unwrap(); } ExternalNetworkId::Ethereum => { use ethereum_serai::alloy::{ simple_request_transport::SimpleRequest, rpc_client::ClientBuilder, provider::{Provider, RootProvider}, network::Ethereum, }; let provider = RootProvider::<_, Ethereum>::new( ClientBuilder::default().transport(SimpleRequest::new(rpc_url.clone()), true), ); let _ = provider.send_raw_transaction(tx).await.unwrap(); } ExternalNetworkId::Monero => { use monero_simple_request_rpc::SimpleRequestRpc; use monero_wallet::{transaction::Transaction, rpc::Rpc}; let rpc = SimpleRequestRpc::new(rpc_url) .await .expect("couldn't connect to the coordinator's Monero RPC"); rpc.publish_transaction(&Transaction::read(&mut &*tx).unwrap()).await.unwrap(); } } } pub async fn publish_eventuality_completion(&self, ops: &DockerOperations, tx: &[u8]) { match self.network { ExternalNetworkId::Bitcoin | ExternalNetworkId::Monero => { self.publish_transaction(ops, tx).await } ExternalNetworkId::Ethereum => (), } } pub async fn get_published_transaction( &self, ops: &DockerOperations, tx: &[u8], ) -> Option> { let rpc_url = network_rpc(self.network, ops, &self.network_handle); match self.network { ExternalNetworkId::Bitcoin => { use bitcoin_serai::{bitcoin::consensus::Encodable, rpc::Rpc}; let rpc = Rpc::new(rpc_url).await.expect("couldn't connect to the coordinator's Bitcoin RPC"); // Bitcoin publishes a 0-byte TX ID to reduce variables // Accordingly, read the mempool to find the (presumed relevant) TX let entries: Vec = rpc.rpc_call("getrawmempool", serde_json::json!([false])).await.unwrap(); assert_eq!(entries.len(), 1, "more than one entry in the mempool, so unclear which to get"); let mut hash = [0; 32]; hash.copy_from_slice(&hex::decode(&entries[0]).unwrap()); if let Ok(tx) = rpc.get_transaction(&hash).await { let mut buf = vec![]; tx.consensus_encode(&mut buf).unwrap(); Some(buf) } else { None } } ExternalNetworkId::Ethereum => { /* let provider = RootProvider::<_, Ethereum>::new( ClientBuilder::default().transport(SimpleRequest::new(rpc_url.clone()), true), ); let mut hash = [0; 32]; hash.copy_from_slice(tx); let tx = provider.get_transaction_by_hash(hash.into()).await.unwrap()?; let (tx, sig, _) = Signed::::try_from(tx).unwrap().into_parts(); let mut bytes = vec![]; tx.encode_with_signature_fields(&sig, &mut bytes); Some(bytes) */ // This is being passed a signature. We need to check the relayer has a TX with this // signature use tokio::{ io::{AsyncReadExt, AsyncWriteExt}, net::TcpStream, }; let (ip, port) = ops.handle(&self.relayer_handle).host_port(20831).unwrap(); let relayer_url = format!("{ip}:{port}"); let mut socket = TcpStream::connect(&relayer_url).await.unwrap(); // Iterate over every published command for i in 1 .. u32::MAX { socket.write_all(&i.to_le_bytes()).await.unwrap(); let mut recvd_len = [0; 4]; socket.read_exact(&mut recvd_len).await.unwrap(); if recvd_len == [0; 4] { break; } let mut msg = vec![0; usize::try_from(u32::from_le_bytes(recvd_len)).unwrap()]; socket.read_exact(&mut msg).await.unwrap(); for start_pos in 0 .. msg.len() { if (start_pos + tx.len()) > msg.len() { break; } if &msg[start_pos .. (start_pos + tx.len())] == tx { return Some(msg); } } } None } ExternalNetworkId::Monero => { use monero_simple_request_rpc::SimpleRequestRpc; use monero_wallet::rpc::Rpc; let rpc = SimpleRequestRpc::new(rpc_url) .await .expect("couldn't connect to the coordinator's Monero RPC"); let mut hash = [0; 32]; hash.copy_from_slice(tx); if let Ok(tx) = rpc.get_transaction(hash).await { Some(tx.serialize()) } else { None } } } } } ================================================ FILE: tests/processor/src/networks.rs ================================================ #![allow(deprecated)] use zeroize::Zeroizing; use rand_core::{RngCore, OsRng}; use scale::Encode; use serai_client::{ in_instructions::primitives::{InInstruction, RefundableInInstruction, Shorthand}, primitives::{Amount, ExternalAddress, ExternalBalance, ExternalCoin, ExternalNetworkId}, validator_sets::primitives::ExternalKey, }; use dockertest::{PullPolicy, Image, StartPolicy, TestBodySpecification, DockerOperations}; use crate::*; pub const RPC_USER: &str = "serai"; pub const RPC_PASS: &str = "seraidex"; pub const BTC_PORT: u32 = 8332; pub const ETH_PORT: u32 = 8545; pub const XMR_PORT: u32 = 18081; pub fn bitcoin_instance() -> (TestBodySpecification, u32) { serai_docker_tests::build("bitcoin".to_string()); let composition = TestBodySpecification::with_image( Image::with_repository("serai-dev-bitcoin").pull_policy(PullPolicy::Never), ) .set_publish_all_ports(true); (composition, BTC_PORT) } pub fn ethereum_instance() -> (TestBodySpecification, u32) { serai_docker_tests::build("ethereum".to_string()); let composition = TestBodySpecification::with_image( Image::with_repository("serai-dev-ethereum").pull_policy(PullPolicy::Never), ) .set_start_policy(StartPolicy::Strict) .set_publish_all_ports(true); (composition, ETH_PORT) } pub fn monero_instance() -> (TestBodySpecification, u32) { serai_docker_tests::build("monero".to_string()); let composition = TestBodySpecification::with_image( Image::with_repository("serai-dev-monero").pull_policy(PullPolicy::Never), ) .set_start_policy(StartPolicy::Strict) .set_publish_all_ports(true); (composition, XMR_PORT) } pub fn network_instance(network: ExternalNetworkId) -> (TestBodySpecification, u32) { match network { ExternalNetworkId::Bitcoin => bitcoin_instance(), ExternalNetworkId::Ethereum => ethereum_instance(), ExternalNetworkId::Monero => monero_instance(), } } pub fn network_rpc(network: ExternalNetworkId, ops: &DockerOperations, handle: &str) -> String { let (ip, port) = ops .handle(handle) .host_port(match network { ExternalNetworkId::Bitcoin => BTC_PORT, ExternalNetworkId::Ethereum => ETH_PORT, ExternalNetworkId::Monero => XMR_PORT, }) .unwrap(); format!("http://{RPC_USER}:{RPC_PASS}@{ip}:{port}") } pub fn confirmations(network: ExternalNetworkId) -> usize { use processor::networks::*; match network { ExternalNetworkId::Bitcoin => Bitcoin::CONFIRMATIONS, ExternalNetworkId::Ethereum => Ethereum::::CONFIRMATIONS, ExternalNetworkId::Monero => Monero::CONFIRMATIONS, } } #[derive(Clone)] pub enum Wallet { Bitcoin { private_key: bitcoin_serai::bitcoin::PrivateKey, public_key: bitcoin_serai::bitcoin::PublicKey, input_tx: bitcoin_serai::bitcoin::Transaction, }, Ethereum { rpc_url: String, key: ::F, nonce: u64, }, Monero { handle: String, spend_key: Zeroizing, view_pair: monero_wallet::ViewPair, last_tx: (usize, [u8; 32]), }, } // TODO: Merge these functions with the processor's tests, which offers very similar functionality impl Wallet { pub async fn new(network: ExternalNetworkId, ops: &DockerOperations, handle: String) -> Wallet { let rpc_url = network_rpc(network, ops, &handle); match network { ExternalNetworkId::Bitcoin => { use bitcoin_serai::{ bitcoin::{ secp256k1::{SECP256K1, SecretKey}, PrivateKey, PublicKey, ScriptBuf, Network, Address, }, rpc::Rpc, }; let secret_key = SecretKey::new(&mut rand_core::OsRng); let private_key = PrivateKey::new(secret_key, Network::Regtest); let public_key = PublicKey::from_private_key(SECP256K1, &private_key); let main_addr = Address::p2pkh(public_key, Network::Regtest); let rpc = Rpc::new(rpc_url).await.expect("couldn't connect to the Bitcoin RPC"); let new_block = rpc.get_latest_block_number().await.unwrap() + 1; rpc .rpc_call::>("generatetoaddress", serde_json::json!([1, main_addr])) .await .unwrap(); // Mine it to maturity rpc .rpc_call::>( "generatetoaddress", serde_json::json!([100, Address::p2sh(&ScriptBuf::new(), Network::Regtest).unwrap()]), ) .await .unwrap(); let funds = rpc .get_block(&rpc.get_block_hash(new_block).await.unwrap()) .await .unwrap() .txdata .swap_remove(0); Wallet::Bitcoin { private_key, public_key, input_tx: funds } } ExternalNetworkId::Ethereum => { use ciphersuite::group::ff::Field; use ciphersuite_kp256::Secp256k1; use ethereum_serai::alloy::{ primitives::{U256, Address}, simple_request_transport::SimpleRequest, rpc_client::ClientBuilder, provider::{Provider, RootProvider}, network::Ethereum, }; let key = ::F::random(&mut OsRng); let address = ethereum_serai::crypto::address(&(::generator() * key)); let provider = RootProvider::<_, Ethereum>::new( ClientBuilder::default().transport(SimpleRequest::new(rpc_url.clone()), true), ); provider .raw_request::<_, ()>( "anvil_setBalance".into(), [Address(address.into()).to_string(), { let nine_decimals = U256::from(1_000_000_000u64); (U256::from(100u64) * nine_decimals * nine_decimals).to_string() }], ) .await .unwrap(); Wallet::Ethereum { rpc_url: rpc_url.clone(), key, nonce: 0 } } ExternalNetworkId::Monero => { use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, scalar::Scalar}; use monero_simple_request_rpc::SimpleRequestRpc; use monero_wallet::{rpc::Rpc, address::Network, ViewPair}; let spend_key = Scalar::random(&mut OsRng); let view_key = Scalar::random(&mut OsRng); let view_pair = ViewPair::new(ED25519_BASEPOINT_POINT * spend_key, Zeroizing::new(view_key)).unwrap(); let rpc = SimpleRequestRpc::new(rpc_url).await.expect("couldn't connect to the Monero RPC"); let height = rpc.get_height().await.unwrap(); // Mines 200 blocks so sufficient decoys exist, as only 60 is needed for maturity rpc.generate_blocks(&view_pair.legacy_address(Network::Mainnet), 200).await.unwrap(); let block = rpc.get_block(rpc.get_block_hash(height).await.unwrap()).await.unwrap(); Wallet::Monero { handle, spend_key: Zeroizing::new(spend_key), view_pair, last_tx: (height, block.miner_transaction.hash()), } } } } pub async fn send_to_address( &mut self, ops: &DockerOperations, to: &ExternalKey, instruction: Option, ) -> (Vec, ExternalBalance) { match self { Wallet::Bitcoin { private_key, public_key, ref mut input_tx } => { use bitcoin_serai::bitcoin::{ secp256k1::{SECP256K1, Message}, key::{XOnlyPublicKey, TweakedPublicKey}, consensus::Encodable, sighash::{EcdsaSighashType, SighashCache}, script::{PushBytesBuf, Script, ScriptBuf, Builder}, OutPoint, Sequence, Witness, TxIn, Amount, TxOut, absolute::LockTime, transaction::{Version, Transaction}, }; const AMOUNT: u64 = 100000000; let mut tx = Transaction { version: Version(2), lock_time: LockTime::ZERO, input: vec![TxIn { previous_output: OutPoint { txid: input_tx.compute_txid(), vout: 0 }, script_sig: Script::new().into(), sequence: Sequence(u32::MAX), witness: Witness::default(), }], output: vec![ TxOut { value: Amount::from_sat(input_tx.output[0].value.to_sat() - AMOUNT - 10000), script_pubkey: input_tx.output[0].script_pubkey.clone(), }, TxOut { value: Amount::from_sat(AMOUNT), script_pubkey: ScriptBuf::new_p2tr_tweaked( TweakedPublicKey::dangerous_assume_tweaked( XOnlyPublicKey::from_slice(&to[1 ..]).unwrap(), ), ), }, ], }; if let Some(instruction) = instruction { tx.output.push(TxOut { value: Amount::ZERO, script_pubkey: ScriptBuf::new_op_return( PushBytesBuf::try_from( Shorthand::Raw(RefundableInInstruction { origin: None, instruction }).encode(), ) .unwrap(), ), }); } let mut der = SECP256K1 .sign_ecdsa_low_r( &Message::from_digest_slice( SighashCache::new(&tx) .legacy_signature_hash( 0, &input_tx.output[0].script_pubkey, EcdsaSighashType::All.to_u32(), ) .unwrap() .to_raw_hash() .as_ref(), ) .unwrap(), &private_key.inner, ) .serialize_der() .to_vec(); der.push(1); tx.input[0].script_sig = Builder::new() .push_slice(PushBytesBuf::try_from(der).unwrap()) .push_key(public_key) .into_script(); let mut buf = vec![]; tx.consensus_encode(&mut buf).unwrap(); *input_tx = tx; (buf, ExternalBalance { coin: ExternalCoin::Bitcoin, amount: Amount(AMOUNT) }) } Wallet::Ethereum { rpc_url, key, ref mut nonce } => { use std::sync::Arc; use ethereum_serai::{ alloy::{ primitives::{U256, Parity, Signature, TxKind}, sol_types::SolCall, simple_request_transport::SimpleRequest, consensus::{TxLegacy, SignableTransaction}, rpc_client::ClientBuilder, provider::{Provider, RootProvider}, network::Ethereum, }, crypto::PublicKey, deployer::Deployer, }; let eight_decimals = U256::from(100_000_000u64); let nine_decimals = eight_decimals * U256::from(10u64); let eighteen_decimals = nine_decimals * nine_decimals; let one_eth = eighteen_decimals; let provider = Arc::new(RootProvider::<_, Ethereum>::new( ClientBuilder::default().transport(SimpleRequest::new(rpc_url.clone()), true), )); let to_as_key = PublicKey::new( ::read_G(&mut to.as_slice()).unwrap(), ) .unwrap(); let router_addr = { // Find the deployer let deployer = Deployer::new(provider.clone()).await.unwrap().unwrap(); // Find the router, deploying if non-existent let router = if let Some(router) = deployer.find_router(provider.clone(), &to_as_key).await.unwrap() { router } else { let mut tx = deployer.deploy_router(&to_as_key); tx.gas_price = 1_000_000_000u64.into(); let tx = ethereum_serai::crypto::deterministically_sign(&tx); let signer = tx.recover_signer().unwrap(); let (tx, sig, _) = tx.into_parts(); provider .raw_request::<_, ()>( "anvil_setBalance".into(), [signer.to_string(), (u128::from(tx.gas_limit) * tx.gas_price).to_string()], ) .await .unwrap(); let mut bytes = vec![]; tx.encode_with_signature_fields(&sig, &mut bytes); let _ = provider.send_raw_transaction(&bytes).await.unwrap(); provider.raw_request::<_, ()>("anvil_mine".into(), [96]).await.unwrap(); deployer.find_router(provider.clone(), &to_as_key).await.unwrap().unwrap() }; router.address() }; let tx = TxLegacy { chain_id: None, nonce: *nonce, gas_price: 1_000_000_000u128, gas_limit: 200_000, to: TxKind::Call(router_addr.into()), // 1 ETH value: one_eth, input: ethereum_serai::router::abi::inInstructionCall::new(( [0; 20].into(), one_eth, if let Some(instruction) = instruction { Shorthand::Raw(RefundableInInstruction { origin: None, instruction }).encode().into() } else { vec![].into() }, )) .abi_encode() .into(), }; *nonce += 1; let sig = k256::ecdsa::SigningKey::from(k256::elliptic_curve::NonZeroScalar::new(*key).unwrap()) .sign_prehash_recoverable(tx.signature_hash().as_ref()) .unwrap(); let mut bytes = vec![]; let parity = Parity::NonEip155(Parity::from(sig.1).y_parity()); tx.encode_with_signature_fields(&Signature::from(sig).with_parity(parity), &mut bytes); // We drop the bottom 10 decimals ( bytes, ExternalBalance { coin: ExternalCoin::Ether, amount: Amount(u64::try_from(eight_decimals).unwrap()), }, ) } Wallet::Monero { handle, ref spend_key, ref view_pair, ref mut last_tx } => { use curve25519_dalek::constants::ED25519_BASEPOINT_POINT; use monero_simple_request_rpc::SimpleRequestRpc; use monero_wallet::{ io::decompress_point, ringct::RctType, rpc::{FeePriority, Rpc}, address::{Network, AddressType, Address}, Scanner, OutputWithDecoys, send::{Change, SignableTransaction}, }; use processor::{additional_key, networks::Monero}; let rpc_url = network_rpc(ExternalNetworkId::Monero, ops, handle); let rpc = SimpleRequestRpc::new(rpc_url).await.expect("couldn't connect to the Monero RPC"); // Prepare inputs let current_height = rpc.get_height().await.unwrap(); let mut outputs = vec![]; for block in last_tx.0 .. current_height { let block = rpc.get_block_by_number(block).await.unwrap(); if (block.miner_transaction.hash() == last_tx.1) || block.transactions.contains(&last_tx.1) { outputs = Scanner::new(view_pair.clone()) .scan(rpc.get_scannable_block(block).await.unwrap()) .unwrap() .ignore_additional_timelock(); } } assert!(!outputs.is_empty()); let mut inputs = Vec::with_capacity(outputs.len()); for output in outputs { inputs.push( OutputWithDecoys::fingerprintable_deterministic_new( &mut OsRng, &rpc, 16, rpc.get_height().await.unwrap(), output, ) .await .unwrap(), ); } let to_spend_key = decompress_point(<[u8; 32]>::try_from(to.as_ref()).unwrap()).unwrap(); let to_view_key = additional_key::(0); let to_addr = Address::new( Network::Mainnet, AddressType::Featured { subaddress: false, payment_id: None, guaranteed: true }, to_spend_key, ED25519_BASEPOINT_POINT * to_view_key.0, ); // Create and sign the TX const AMOUNT: u64 = 1_000_000_000_000; let mut data = vec![]; if let Some(instruction) = instruction { data.push(Shorthand::Raw(RefundableInInstruction { origin: None, instruction }).encode()); } let mut outgoing_view_key = Zeroizing::new([0; 32]); OsRng.fill_bytes(outgoing_view_key.as_mut()); let tx = SignableTransaction::new( RctType::ClsagBulletproofPlus, outgoing_view_key, inputs, vec![(to_addr, AMOUNT)], Change::new(view_pair.clone(), None), data, rpc.get_fee_rate(FeePriority::Unimportant).await.unwrap(), ) .unwrap() .sign(&mut OsRng, spend_key) .unwrap(); // Update the last TX to track the change output last_tx.0 = current_height; last_tx.1 = tx.hash(); (tx.serialize(), ExternalBalance { coin: ExternalCoin::Monero, amount: Amount(AMOUNT) }) } } } pub fn address(&self) -> ExternalAddress { use serai_client::networks; match self { Wallet::Bitcoin { public_key, .. } => { use bitcoin_serai::bitcoin::ScriptBuf; ExternalAddress::new( networks::bitcoin::Address::new(ScriptBuf::new_p2pkh(&public_key.pubkey_hash())) .unwrap() .into(), ) .unwrap() } Wallet::Ethereum { key, .. } => ExternalAddress::new( ethereum_serai::crypto::address(&(ciphersuite_kp256::Secp256k1::generator() * key)).into(), ) .unwrap(), Wallet::Monero { view_pair, .. } => { use monero_wallet::address::Network; ExternalAddress::new( networks::monero::Address::new(view_pair.legacy_address(Network::Mainnet)) .unwrap() .into(), ) .unwrap() } } } } ================================================ FILE: tests/processor/src/tests/batch.rs ================================================ use std::{ collections::HashMap, time::{SystemTime, Duration}, }; use dkg::Participant; use messages::{coordinator::*, SubstrateContext}; use serai_client::{ in_instructions::primitives::{ batch_message, Batch, InInstruction, InInstructionWithBalance, SignedBatch, }, primitives::{ crypto::RuntimePublic, Amount, BlockHash, ExternalBalance, ExternalNetworkId, PublicKey, SeraiAddress, EXTERNAL_NETWORKS, }, validator_sets::primitives::Session, }; use serai_db::MemDb; use processor::networks::{Network, Bitcoin, Ethereum, Monero}; use crate::{*, tests::*}; pub(crate) async fn recv_batch_preprocesses( coordinators: &mut [Coordinator], session: Session, batch: &Batch, attempt: u32, ) -> (SubstrateSignId, HashMap) { let id = SubstrateSignId { session, id: SubstrateSignableId::Batch(batch.id), attempt }; let mut block = None; let mut preprocesses = HashMap::new(); for (i, coordinator) in coordinators.iter_mut().enumerate() { let i = Participant::new(u16::try_from(i).unwrap() + 1).unwrap(); if attempt == 0 { match coordinator.recv_message().await { messages::ProcessorMessage::Substrate(messages::substrate::ProcessorMessage::Batch { batch: sent_batch, }) => { assert_eq!(&sent_batch, batch); } _ => panic!("processor didn't send batch"), } } match coordinator.recv_message().await { messages::ProcessorMessage::Coordinator( messages::coordinator::ProcessorMessage::BatchPreprocess { id: this_id, block: this_block, preprocesses: mut these_preprocesses, }, ) => { assert_eq!(this_id, id); if block.is_none() { block = Some(this_block); } assert_eq!(&this_block, block.as_ref().unwrap()); assert_eq!(these_preprocesses.len(), 1); preprocesses.insert(i, these_preprocesses.swap_remove(0)); } _ => panic!("processor didn't send batch preprocess"), } } // Reduce the preprocesses down to the threshold while preprocesses.len() > THRESHOLD { preprocesses.remove( &Participant::new( u16::try_from(OsRng.next_u64() % u64::try_from(COORDINATORS).unwrap()).unwrap() + 1, ) .unwrap(), ); } (id, preprocesses) } pub(crate) async fn sign_batch( coordinators: &mut [Coordinator], key: [u8; 32], id: SubstrateSignId, preprocesses: HashMap, ) -> SignedBatch { assert_eq!(preprocesses.len(), THRESHOLD); for (i, coordinator) in coordinators.iter_mut().enumerate() { let i = Participant::new(u16::try_from(i).unwrap() + 1).unwrap(); if preprocesses.contains_key(&i) { coordinator .send_message(messages::coordinator::CoordinatorMessage::SubstratePreprocesses { id: id.clone(), preprocesses: clone_without(&preprocesses, &i), }) .await; } } let mut shares = HashMap::new(); for (i, coordinator) in coordinators.iter_mut().enumerate() { let i = Participant::new(u16::try_from(i).unwrap() + 1).unwrap(); if preprocesses.contains_key(&i) { match coordinator.recv_message().await { messages::ProcessorMessage::Coordinator( messages::coordinator::ProcessorMessage::SubstrateShare { id: this_id, shares: mut these_shares, }, ) => { assert_eq!(&this_id, &id); assert_eq!(these_shares.len(), 1); shares.insert(i, these_shares.swap_remove(0)); } _ => panic!("processor didn't send batch share"), } } } for (i, coordinator) in coordinators.iter_mut().enumerate() { let i = Participant::new(u16::try_from(i).unwrap() + 1).unwrap(); if preprocesses.contains_key(&i) { coordinator .send_message(messages::coordinator::CoordinatorMessage::SubstrateShares { id: id.clone(), shares: clone_without(&shares, &i), }) .await; } } // The selected processors should yield the batch let mut batch = None; for (i, coordinator) in coordinators.iter_mut().enumerate() { let i = Participant::new(u16::try_from(i).unwrap() + 1).unwrap(); if preprocesses.contains_key(&i) { match coordinator.recv_message().await { messages::ProcessorMessage::Substrate( messages::substrate::ProcessorMessage::SignedBatch { batch: this_batch }, ) => { if batch.is_none() { assert!(PublicKey::from_raw(key) .verify(&batch_message(&this_batch.batch), &this_batch.signature)); batch = Some(this_batch.clone()); } assert_eq!(batch.as_ref().unwrap(), &this_batch); } _ => panic!("processor didn't send batch"), } } } batch.unwrap() } pub(crate) async fn substrate_block( coordinator: &mut Coordinator, block: messages::substrate::CoordinatorMessage, ) -> Vec { match block.clone() { messages::substrate::CoordinatorMessage::SubstrateBlock { context: _, block: sent_block, burns: _, batches: _, } => { coordinator.send_message(block).await; match coordinator.recv_message().await { messages::ProcessorMessage::Coordinator( messages::coordinator::ProcessorMessage::SubstrateBlockAck { block: recvd_block, plans }, ) => { assert_eq!(recvd_block, sent_block); plans } _ => panic!("coordinator didn't respond to SubstrateBlock with SubstrateBlockAck"), } } _ => panic!("substrate_block message wasn't a SubstrateBlock"), } } #[test] fn batch_test() { for network in EXTERNAL_NETWORKS { let (coordinators, test) = new_test(network); test.run(|ops| async move { tokio::time::sleep(Duration::from_secs(1)).await; let mut coordinators = coordinators .into_iter() .map(|(handles, key)| Coordinator::new(network, &ops, handles, key)) .collect::>(); // Create a wallet before we start generating keys let mut wallet = Wallet::new(network, &ops, coordinators[0].network_handle.clone()).await; coordinators[0].sync(&ops, &coordinators[1 ..]).await; // Generate keys let key_pair = key_gen(&mut coordinators).await; // Now we we have to mine blocks to activate the key // (the first key is activated when the network's time as of a block exceeds the Serai time // it was confirmed at) // Mine multiple sets of medians to ensure the median is sufficiently advanced for _ in 0 .. (10 * confirmations(network)) { coordinators[0].add_block(&ops).await; tokio::time::sleep(Duration::from_secs(1)).await; } coordinators[0].sync(&ops, &coordinators[1 ..]).await; // Run twice, once with an instruction and once without let substrate_block_num = (OsRng.next_u64() % 4_000_000_000u64) + 1; for i in 0 .. 2 { let mut serai_address = [0; 32]; OsRng.fill_bytes(&mut serai_address); let instruction = if i == 0 { Some(InInstruction::Transfer(SeraiAddress(serai_address))) } else { None }; // Send into the processor's wallet let (tx, balance_sent) = wallet.send_to_address(&ops, &key_pair.1, instruction.clone()).await; for coordinator in &mut coordinators { coordinator.publish_transaction(&ops, &tx).await; } // Put the TX past the confirmation depth let mut block_with_tx = None; for _ in 0 .. confirmations(network) { let (hash, _) = coordinators[0].add_block(&ops).await; if block_with_tx.is_none() { block_with_tx = Some(hash); } } coordinators[0].sync(&ops, &coordinators[1 ..]).await; // Sleep for 10s // The scanner works on a 5s interval, so this leaves a few s for any processing/latency tokio::time::sleep(Duration::from_secs(10)).await; println!("sent in transaction. with in instruction: {}", instruction.is_some()); let expected_batch = Batch { network, id: i, block: BlockHash(block_with_tx.unwrap()), instructions: if let Some(instruction) = &instruction { vec![InInstructionWithBalance { instruction: instruction.clone(), balance: ExternalBalance { coin: balance_sent.coin, amount: Amount( balance_sent.amount.0 - (2 * match network { ExternalNetworkId::Bitcoin => Bitcoin::COST_TO_AGGREGATE, ExternalNetworkId::Ethereum => Ethereum::::COST_TO_AGGREGATE, ExternalNetworkId::Monero => Monero::COST_TO_AGGREGATE, }), ), }, }] } else { // This shouldn't have an instruction as we didn't add any data into the TX we sent // Empty batches remain valuable as they let us achieve consensus on the block and spend // contained outputs vec![] }, }; println!("receiving batch preprocesses..."); // Make sure the processors picked it up by checking they're trying to sign a batch for it let (mut id, mut preprocesses) = recv_batch_preprocesses(&mut coordinators, Session(0), &expected_batch, 0).await; // Trigger a random amount of re-attempts for attempt in 1 ..= u32::try_from(OsRng.next_u64() % 4).unwrap() { // TODO: Double check how the processor handles this ID field // It should be able to assert its perfectly sequential id.attempt = attempt; for coordinator in &mut coordinators { coordinator .send_message(messages::coordinator::CoordinatorMessage::BatchReattempt { id: id.clone(), }) .await; } (id, preprocesses) = recv_batch_preprocesses(&mut coordinators, Session(0), &expected_batch, attempt).await; } println!("signing batch..."); // Continue with signing the batch let batch = sign_batch(&mut coordinators, key_pair.0 .0, id, preprocesses).await; // Check it assert_eq!(batch.batch, expected_batch); // Fire a SubstrateBlock let serai_time = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs(); for coordinator in &mut coordinators { let plans = substrate_block( coordinator, messages::substrate::CoordinatorMessage::SubstrateBlock { context: SubstrateContext { serai_time, network_latest_finalized_block: batch.batch.block, }, block: substrate_block_num + u64::from(i), burns: vec![], batches: vec![batch.batch.id], }, ) .await; if instruction.is_some() || (instruction.is_none() && (network == ExternalNetworkId::Monero)) { assert!(plans.is_empty()); } else { // If no instruction was used, and the processor csn presume the origin, it'd have // created a refund Plan assert_eq!(plans.len(), 1); } } } // With the latter InInstruction not existing, we should've triggered a refund if the origin // was detectable // Check this is trying to sign a Plan if network != ExternalNetworkId::Monero { let mut refund_id = None; for coordinator in &mut coordinators { match coordinator.recv_message().await { messages::ProcessorMessage::Sign(messages::sign::ProcessorMessage::Preprocess { id, .. }) => { if refund_id.is_none() { refund_id = Some(id.clone()); } assert_eq!(refund_id.as_ref().unwrap(), &id); } _ => panic!("processor didn't send preprocess for expected refund transaction"), } } } }); } } ================================================ FILE: tests/processor/src/tests/key_gen.rs ================================================ use std::{collections::HashMap, time::SystemTime}; use dkg::{Participant, ThresholdParams}; use serai_client::{ primitives::{BlockHash, PublicKey, EXTERNAL_NETWORKS}, validator_sets::primitives::{KeyPair, Session}, }; use messages::{SubstrateContext, key_gen::KeyGenId, CoordinatorMessage, ProcessorMessage}; use crate::{*, tests::*}; pub(crate) async fn key_gen(coordinators: &mut [Coordinator]) -> KeyPair { // Perform an interaction with all processors via their coordinators async fn interact_with_all< FS: Fn(Participant) -> messages::key_gen::CoordinatorMessage, FR: FnMut(Participant, messages::key_gen::ProcessorMessage), >( coordinators: &mut [Coordinator], message: FS, mut recv: FR, ) { for (i, coordinator) in coordinators.iter_mut().enumerate() { let participant = Participant::new(u16::try_from(i + 1).unwrap()).unwrap(); coordinator.send_message(CoordinatorMessage::KeyGen(message(participant))).await; match coordinator.recv_message().await { ProcessorMessage::KeyGen(msg) => recv(participant, msg), _ => panic!("processor didn't return KeyGen message"), } } } // Order a key gen let id = KeyGenId { session: Session(0), attempt: 0 }; let mut commitments = HashMap::new(); interact_with_all( coordinators, |participant| messages::key_gen::CoordinatorMessage::GenerateKey { id, params: ThresholdParams::new( u16::try_from(THRESHOLD).unwrap(), u16::try_from(COORDINATORS).unwrap(), participant, ) .unwrap(), shares: 1, }, |participant, msg| match msg { messages::key_gen::ProcessorMessage::Commitments { id: this_id, commitments: mut these_commitments, } => { assert_eq!(this_id, id); assert_eq!(these_commitments.len(), 1); commitments.insert(participant, these_commitments.swap_remove(0)); } _ => panic!("processor didn't return Commitments in response to GenerateKey"), }, ) .await; // Send the commitments to all parties let mut shares = HashMap::new(); interact_with_all( coordinators, |participant| messages::key_gen::CoordinatorMessage::Commitments { id, commitments: clone_without(&commitments, &participant), }, |participant, msg| match msg { messages::key_gen::ProcessorMessage::Shares { id: this_id, shares: mut these_shares } => { assert_eq!(this_id, id); assert_eq!(these_shares.len(), 1); shares.insert(participant, these_shares.swap_remove(0)); } _ => panic!("processor didn't return Shares in response to GenerateKey"), }, ) .await; // Send the shares let mut substrate_key = None; let mut network_key = None; interact_with_all( coordinators, |participant| messages::key_gen::CoordinatorMessage::Shares { id, shares: vec![shares .iter() .filter_map(|(this_participant, shares)| { shares.get(&participant).cloned().map(|share| (*this_participant, share)) }) .collect()], }, |_, msg| match msg { messages::key_gen::ProcessorMessage::GeneratedKeyPair { id: this_id, substrate_key: this_substrate_key, network_key: this_network_key, } => { assert_eq!(this_id, id); if substrate_key.is_none() { substrate_key = Some(this_substrate_key); network_key = Some(this_network_key.clone()); } assert_eq!(substrate_key.unwrap(), this_substrate_key); assert_eq!(network_key.as_ref().unwrap(), &this_network_key); } _ => panic!("processor didn't return GeneratedKeyPair in response to GenerateKey"), }, ) .await; // Confirm the key pair // TODO: Better document network_latest_finalized_block's genesis state, and error if a set claims // [0; 32] was finalized let context = SubstrateContext { serai_time: SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs(), network_latest_finalized_block: BlockHash([0; 32]), }; let key_pair = KeyPair( PublicKey::from_raw(substrate_key.unwrap()), network_key.clone().unwrap().try_into().unwrap(), ); for coordinator in coordinators { coordinator .send_message(CoordinatorMessage::Substrate( messages::substrate::CoordinatorMessage::ConfirmKeyPair { context, session: id.session, key_pair: key_pair.clone(), }, )) .await; } key_pair } #[test] fn key_gen_test() { for network in EXTERNAL_NETWORKS { let (coordinators, test) = new_test(network); test.run(|ops| async move { // Sleep for a second for the message-queue to boot // It isn't an error to start immediately, it just silences an error tokio::time::sleep(core::time::Duration::from_secs(1)).await; // Connect to the Message Queues as the coordinator let mut coordinators = coordinators .into_iter() .map(|(handles, key)| Coordinator::new(network, &ops, handles, key)) .collect::>(); key_gen(&mut coordinators).await; }); } } ================================================ FILE: tests/processor/src/tests/mod.rs ================================================ use std::collections::HashMap; use dalek_ff_group::Ristretto; use ciphersuite::Ciphersuite; use dockertest::DockerTest; use crate::*; mod key_gen; pub(crate) use key_gen::key_gen; mod batch; pub(crate) use batch::{recv_batch_preprocesses, sign_batch, substrate_block}; mod send; pub(crate) const COORDINATORS: usize = 4; pub(crate) const THRESHOLD: usize = ((COORDINATORS * 2) / 3) + 1; fn clone_without( map: &HashMap, without: &K, ) -> HashMap { let mut res = map.clone(); res.remove(without).unwrap(); res } fn new_test( network: ExternalNetworkId, ) -> (Vec<(Handles, ::F)>, DockerTest) { let mut coordinators = vec![]; let mut test = DockerTest::new().with_network(dockertest::Network::Isolated); let mut eth_handle = None; for _ in 0 .. COORDINATORS { let (handles, coord_key, compositions) = processor_stack(network, eth_handle.clone()); // TODO: Remove this once https://github.com/foundry-rs/foundry/issues/7955 // This has all processors share an Ethereum node until we can sync controlled nodes if network == ExternalNetworkId::Ethereum { eth_handle = eth_handle.or_else(|| Some(handles.0.clone())); } coordinators.push((handles, coord_key)); for composition in compositions { test.provide_container(composition); } } (coordinators, test) } ================================================ FILE: tests/processor/src/tests/send.rs ================================================ use std::{ collections::{HashSet, HashMap}, time::{SystemTime, Duration}, }; use dkg::Participant; use messages::{sign::SignId, SubstrateContext}; use serai_client::{ coins::primitives::{OutInstruction, OutInstructionWithBalance}, in_instructions::primitives::{Batch, InInstruction, InInstructionWithBalance}, primitives::{Amount, BlockHash, ExternalBalance, SeraiAddress, EXTERNAL_NETWORKS}, validator_sets::primitives::Session, }; use serai_db::MemDb; use processor::networks::{Network, Bitcoin, Ethereum, Monero}; use crate::{*, tests::*}; #[allow(unused)] pub(crate) async fn recv_sign_preprocesses( coordinators: &mut [Coordinator], session: Session, attempt: u32, ) -> (SignId, HashMap>) { let mut id = None; let mut preprocesses = HashMap::new(); for (i, coordinator) in coordinators.iter_mut().enumerate() { let i = Participant::new(u16::try_from(i).unwrap() + 1).unwrap(); let msg = coordinator.recv_message().await; match msg { messages::ProcessorMessage::Sign(messages::sign::ProcessorMessage::Preprocess { id: this_id, preprocesses: mut these_preprocesses, }) => { if id.is_none() { assert_eq!(&this_id.session, &session); assert_eq!(this_id.attempt, attempt); id = Some(this_id.clone()); } assert_eq!(&this_id, id.as_ref().unwrap()); assert_eq!(these_preprocesses.len(), 1); preprocesses.insert(i, these_preprocesses.swap_remove(0)); } _ => panic!("processor didn't send sign preprocess"), } } // Reduce the preprocesses down to the threshold while preprocesses.len() > THRESHOLD { preprocesses.remove( &Participant::new( u16::try_from(OsRng.next_u64() % u64::try_from(COORDINATORS).unwrap()).unwrap() + 1, ) .unwrap(), ); } (id.unwrap(), preprocesses) } #[allow(unused)] pub(crate) async fn sign_tx( coordinators: &mut [Coordinator], session: Session, id: SignId, preprocesses: HashMap>, ) -> Vec { assert_eq!(preprocesses.len(), THRESHOLD); for (i, coordinator) in coordinators.iter_mut().enumerate() { let i = Participant::new(u16::try_from(i).unwrap() + 1).unwrap(); if preprocesses.contains_key(&i) { coordinator .send_message(messages::sign::CoordinatorMessage::Preprocesses { id: id.clone(), preprocesses: clone_without(&preprocesses, &i), }) .await; } } let mut shares = HashMap::new(); for (i, coordinator) in coordinators.iter_mut().enumerate() { let i = Participant::new(u16::try_from(i).unwrap() + 1).unwrap(); if preprocesses.contains_key(&i) { match coordinator.recv_message().await { messages::ProcessorMessage::Sign(messages::sign::ProcessorMessage::Share { id: this_id, shares: mut these_shares, }) => { assert_eq!(&this_id, &id); assert_eq!(these_shares.len(), 1); shares.insert(i, these_shares.swap_remove(0)); } _ => panic!("processor didn't send TX shares"), } } } for (i, coordinator) in coordinators.iter_mut().enumerate() { let i = Participant::new(u16::try_from(i).unwrap() + 1).unwrap(); if preprocesses.contains_key(&i) { coordinator .send_message(messages::sign::CoordinatorMessage::Shares { id: id.clone(), shares: clone_without(&shares, &i), }) .await; } } // The selected processors should yield Completed let mut tx = None; for (i, coordinator) in coordinators.iter_mut().enumerate() { let i = Participant::new(u16::try_from(i).unwrap() + 1).unwrap(); if preprocesses.contains_key(&i) { match coordinator.recv_message().await { messages::ProcessorMessage::Sign(messages::sign::ProcessorMessage::Completed { session: this_session, id: this_id, tx: this_tx, }) => { assert_eq!(session, this_session); assert_eq!(&this_id, &id.id); if tx.is_none() { tx = Some(this_tx.clone()); } assert_eq!(tx.as_ref().unwrap(), &this_tx); } _ => panic!("processor didn't send Completed"), } } } tx.unwrap() } #[test] fn send_test() { for network in EXTERNAL_NETWORKS { let (coordinators, test) = new_test(network); test.run(|ops| async move { tokio::time::sleep(Duration::from_secs(1)).await; let mut coordinators = coordinators .into_iter() .map(|(handles, key)| Coordinator::new(network, &ops, handles, key)) .collect::>(); // Create a wallet before we start generating keys let mut wallet = Wallet::new(network, &ops, coordinators[0].network_handle.clone()).await; coordinators[0].sync(&ops, &coordinators[1 ..]).await; // Generate keys let key_pair = key_gen(&mut coordinators).await; // Now we we have to mine blocks to activate the key // (the first key is activated when the network's time as of a block exceeds the Serai time // it was confirmed at) // Mine multiple sets of medians to ensure the median is sufficiently advanced for _ in 0 .. (10 * confirmations(network)) { coordinators[0].add_block(&ops).await; tokio::time::sleep(Duration::from_secs(1)).await; } coordinators[0].sync(&ops, &coordinators[1 ..]).await; // Send into the processor's wallet let mut serai_address = [0; 32]; OsRng.fill_bytes(&mut serai_address); let instruction = InInstruction::Transfer(SeraiAddress(serai_address)); let (tx, balance_sent) = wallet.send_to_address(&ops, &key_pair.1, Some(instruction.clone())).await; for coordinator in &mut coordinators { coordinator.publish_transaction(&ops, &tx).await; } // Put the TX past the confirmation depth let mut block_with_tx = None; for _ in 0 .. confirmations(network) { let (hash, _) = coordinators[0].add_block(&ops).await; if block_with_tx.is_none() { block_with_tx = Some(hash); } } coordinators[0].sync(&ops, &coordinators[1 ..]).await; // Sleep for 10s // The scanner works on a 5s interval, so this leaves a few s for any processing/latency tokio::time::sleep(Duration::from_secs(10)).await; let amount_minted = Amount( balance_sent.amount.0 - (2 * match network { ExternalNetworkId::Bitcoin => Bitcoin::COST_TO_AGGREGATE, ExternalNetworkId::Ethereum => Ethereum::::COST_TO_AGGREGATE, ExternalNetworkId::Monero => Monero::COST_TO_AGGREGATE, }), ); let expected_batch = Batch { network, id: 0, block: BlockHash(block_with_tx.unwrap()), instructions: vec![InInstructionWithBalance { instruction, balance: ExternalBalance { coin: balance_sent.coin, amount: amount_minted }, }], }; // Make sure the proceessors picked it up by checking they're trying to sign a batch for it let (id, preprocesses) = recv_batch_preprocesses(&mut coordinators, Session(0), &expected_batch, 0).await; // Continue with signing the batch let batch = sign_batch(&mut coordinators, key_pair.0 .0, id, preprocesses).await; // Check it assert_eq!(batch.batch, expected_batch); // Fire a SubstrateBlock with a burn let substrate_block_num = (OsRng.next_u64() % 4_000_000_000u64) + 1; let serai_time = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs(); let mut plans = vec![]; for coordinator in &mut coordinators { let these_plans = substrate_block( coordinator, messages::substrate::CoordinatorMessage::SubstrateBlock { context: SubstrateContext { serai_time, network_latest_finalized_block: batch.batch.block, }, block: substrate_block_num, burns: vec![OutInstructionWithBalance { instruction: OutInstruction { address: wallet.address(), data: None }, balance: ExternalBalance { coin: balance_sent.coin, amount: amount_minted }, }], batches: vec![batch.batch.id], }, ) .await; if plans.is_empty() { plans = these_plans; } else { assert_eq!(plans, these_plans); } } assert_eq!(plans.len(), 1); // Start signing the TX let (mut id, mut preprocesses) = recv_sign_preprocesses(&mut coordinators, Session(0), 0).await; assert_eq!(id, SignId { session: Session(0), id: plans[0].id, attempt: 0 }); // Trigger a random amount of re-attempts for attempt in 1 ..= u32::try_from(OsRng.next_u64() % 4).unwrap() { // TODO: Double check how the processor handles this ID field // It should be able to assert its perfectly sequential id.attempt = attempt; for coordinator in &mut coordinators { coordinator .send_message(messages::sign::CoordinatorMessage::Reattempt { id: id.clone() }) .await; } (id, preprocesses) = recv_sign_preprocesses(&mut coordinators, Session(0), attempt).await; } let participating = preprocesses.keys().copied().collect::>(); let tx_id = sign_tx(&mut coordinators, Session(0), id.clone(), preprocesses).await; // Make sure all participating nodes published the TX let participating = participating.iter().map(|p| usize::from(u16::from(*p) - 1)).collect::>(); for participant in &participating { assert!(coordinators[*participant].get_published_transaction(&ops, &tx_id).await.is_some()); } // Publish this transaction to the left out nodes let tx = coordinators[*participating.iter().next().unwrap()] .get_published_transaction(&ops, &tx_id) .await .unwrap(); for (i, coordinator) in coordinators.iter_mut().enumerate() { if !participating.contains(&i) { coordinator.publish_eventuality_completion(&ops, &tx).await; // Tell them of it as a completion of the relevant signing nodes coordinator .send_message(messages::sign::CoordinatorMessage::Completed { session: Session(0), id: id.id, tx: tx_id.clone(), }) .await; // Verify they send Completed back match coordinator.recv_message().await { messages::ProcessorMessage::Sign(messages::sign::ProcessorMessage::Completed { session, id: this_id, tx: this_tx, }) => { assert_eq!(session, Session(0)); assert_eq!(&this_id, &id.id); assert_eq!(this_tx, tx_id); } _ => panic!("processor didn't send Completed"), } } } // TODO: Test the Eventuality from the blockchain, instead of from the coordinator // TODO: Test what happens when Completed is sent with a non-existent TX ID // TODO: Test what happens when Completed is sent with a non-completing TX ID }); } } ================================================ FILE: tests/reproducible-runtime/Cargo.toml ================================================ [package] name = "serai-reproducible-runtime-tests" version = "0.1.0" description = "Tests the Serai runtime can be reproducibly built" license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/tests/reproducible-runtime" authors = ["Luke Parker "] keywords = [] edition = "2021" publish = false [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true [dependencies] rand_core = "0.6" hex = "0.4" dockertest = "0.5" serai-docker-tests = { path = "../docker" } tokio = { version = "1", features = ["time"] } ================================================ FILE: tests/reproducible-runtime/LICENSE ================================================ AGPL-3.0-only license Copyright (c) 2023 Luke Parker This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License Version 3 as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see . ================================================ FILE: tests/reproducible-runtime/src/lib.rs ================================================ #[test] pub fn reproducibly_builds() { use std::{collections::HashSet, process::Command}; use rand_core::{RngCore, OsRng}; use dockertest::{PullPolicy, Image, TestBodySpecification, DockerTest}; const RUNS: usize = 3; const TIMEOUT: u16 = 180 * 60; // 3 hours serai_docker_tests::build("runtime".to_string()); let mut ids = vec![[0; 8]; RUNS]; for id in &mut ids { OsRng.fill_bytes(id); } let mut test = DockerTest::new().with_network(dockertest::Network::Isolated); for id in &ids { test.provide_container( TestBodySpecification::with_image( Image::with_repository("serai-dev-runtime").pull_policy(PullPolicy::Never), ) .set_handle(format!("runtime-build-{}", hex::encode(id))) .replace_cmd(vec![ "sh".to_string(), "-c".to_string(), // Sleep for a minute after building to prevent the container from closing before we // retrieve the hash "cd /serai/substrate/runtime && cargo clean && cargo build --release && printf \"Runtime hash: \" > hash && sha256sum /serai/target/release/wbuild/serai-runtime/serai_runtime.wasm >> hash && cat hash && sleep 60" .to_string(), ]), ); } test.run(|_| async { let ids = ids; let mut containers = vec![]; for container in String::from_utf8( Command::new("docker").arg("ps").arg("--format").arg("{{.Names}}").output().unwrap().stdout, ) .expect("output wasn't utf-8") .lines() { for id in &ids { if container.contains(&hex::encode(id)) { containers.push(container.trim().to_string()); } } } assert_eq!(containers.len(), RUNS, "couldn't find all containers"); let mut res = vec![None; RUNS]; 'attempt: for _ in 0 .. (TIMEOUT / 10) { tokio::time::sleep(core::time::Duration::from_secs(10)).await; 'runner: for (i, container) in containers.iter().enumerate() { if res[i].is_some() { continue; } let logs = Command::new("docker").arg("logs").arg(container).output().unwrap(); let Some(last_log) = std::str::from_utf8(&logs.stdout).expect("output wasn't utf-8").lines().last() else { continue 'runner; }; let split = last_log.split("Runtime hash: ").collect::>(); if split.len() == 2 { res[i] = Some(split[1].to_string()); continue 'runner; } } for item in &res { if item.is_none() { continue 'attempt; } } break; } // If we didn't get results from all runners, panic for item in &res { if item.is_none() { panic!("couldn't get runtime hashes within allowed time"); } } let mut identical = HashSet::new(); for res in res.clone() { identical.insert(res.unwrap()); } assert_eq!(identical.len(), 1, "got different runtime hashes {res:?}"); }); }