Repository: polytope-labs/solidity-merkle-trees
Branch: main
Commit: 135f2251c1e1
Files: 40
Total size: 195.5 KB
Directory structure:
gitextract_xw3jd3sw/
├── .github/
│ └── workflows/
│ └── test.yml
├── .gitignore
├── .npmignore
├── Dockerfile
├── LICENSE
├── README.md
├── foundry.toml
├── package.json
├── remappings.txt
├── rust-toolchain
├── src/
│ ├── MerkleMountainRange.sol
│ ├── MerkleMultiProof.sol
│ ├── MerklePatricia.sol
│ └── trie/
│ ├── Bytes.sol
│ ├── Memory.sol
│ ├── NibbleSlice.sol
│ ├── Node.sol
│ ├── Option.sol
│ ├── TrieDB.sol
│ ├── ethereum/
│ │ ├── EthereumTrieDB.sol
│ │ └── RLPReader.sol
│ └── substrate/
│ ├── ScaleCodec.sol
│ └── SubstrateTrieDB.sol
└── tests/
├── foundry/
│ ├── MerkleMountainRange.t.sol
│ ├── MerkleMultiProof.t.sol
│ └── MerklePatricia.t.sol
└── rust/
├── Cargo.toml
├── fuzz/
│ ├── Cargo.toml
│ ├── fuzz_targets/
│ │ ├── trie_proof_invalid.rs
│ │ └── trie_proof_valid.rs
│ └── src/
│ └── lib.rs
├── proptest-regressions/
│ ├── merkle_mountain_range.txt
│ └── merkle_multi_proof.txt
├── rustfmt.toml
└── src/
├── evm_runner.rs
├── lib.rs
├── merkle_mountain_range.rs
├── merkle_multi_proof.rs
├── merkle_patricia.rs
└── multi_proof_utils.rs
================================================
FILE CONTENTS
================================================
================================================
FILE: .github/workflows/test.yml
================================================
name: Rust
on:
push:
branches:
- main
pull_request:
branches:
- main
env:
CARGO_TERM_COLOR: always
jobs:
test:
name: Test
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install pnpm
uses: pnpm/action-setup@v2
with:
version: 8
- uses: actions-rs/toolchain@v1
with:
toolchain: 1.81.0
override: true
- name: Install Foundry
uses: foundry-rs/foundry-toolchain@v1
with:
version: nightly
- name: Run Forge build
run: |
pnpm install --ignore-scripts
forge --version
forge build --sizes
id: build
- name: Run Forge tests
run: |
forge test -vvv
id: test
- name: Download SVM Releases List
run: |
wget -O /tmp/svm_releases_list.json https://raw.githubusercontent.com/nikitastupin/solc/2287d4326237172acf91ce42fd7ec18a67b7f512/linux/aarch64/list.json
echo "SVM_RELEASES_LIST_JSON=/tmp/svm_releases_list.json" >> $GITHUB_ENV
- name: cargo test
run: |
forge build
cd ./tests/rust
cargo test --release -- --nocapture
fuzz:
name: Fuzz
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly
override: true
- name: Install Foundry
uses: foundry-rs/foundry-toolchain@v1
with:
version: nightly
- name: Install cargo-fuzz
run: cargo install cargo-fuzz
- name: Download SVM Releases List
run: |
wget -O /tmp/svm_releases_list.json https://raw.githubusercontent.com/nikitastupin/solc/2287d4326237172acf91ce42fd7ec18a67b7f512/linux/aarch64/list.json
echo "SVM_RELEASES_LIST_JSON=/tmp/svm_releases_list.json" >> $GITHUB_ENV
- name: Install pnpm
uses: pnpm/action-setup@v2
with:
version: 8
- name: Build contracts
run: |
pnpm install --ignore-scripts
forge build
- name: Fuzz trie proof valid
run: cd tests/rust && cargo +nightly fuzz run trie_proof_valid -- -max_total_time=120
- name: Fuzz trie proof invalid
run: cd tests/rust && cargo +nightly fuzz run trie_proof_invalid -- -max_total_time=120
fmt:
name: Rustfmt
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly
override: true
components: rustfmt
- uses: actions-rs/cargo@v1
with:
command: fmt
args: --manifest-path ./tests/rust/Cargo.toml --all -- --check
================================================
FILE: .gitignore
================================================
# Compiler files
cache/
out/
# Ignores development broadcast logs
!/broadcast
/broadcast/*/31337/
/broadcast/**/dry-run/
# Dotenv file
.env
target/
.cargo/
artifacts/
corpus/
.idea/
.claude/
node_modules
================================================
FILE: .npmignore
================================================
target/
cache/
out/
.idea/
lib/forge-std
tests/
.github/
================================================
FILE: Dockerfile
================================================
FROM rustlang/rust:nightly
RUN cargo install cargo-fuzz
================================================
FILE: LICENSE
================================================
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
You must give any other recipients of the Work or Derivative Works a copy of this License; and
You must cause any modified files to carry prominent notices stating that You changed the files; and
You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.
You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
================================================
FILE: README.md
================================================
# `@polytope-labs/solidity-merkle-trees`

[](https://www.npmjs.com/package/@polytope-labs/solidity-merkle-trees)
This library contains the implementations of various merkle tree verification algorithms. Currently supported algorithms:
- [x] Merkle Trees (supports unbalanced trees).
- [x] Merkle Mountain Ranges.
- [x] Merkle-Patricia Trie.
## Installation
```
npm install @polytope-labs/solidity-merkle-trees
```
## Merkle Multi Proofs
This algorithm is based on the research done here: https://research.polytope.technology/merkle-multi-proofs
Supports both balanced and unbalanced trees (leaf count need not be a power of 2).
You can use it to verify proofs like so:
```solidity
pragma solidity ^0.8.17;
import "@polytope-labs/solidity-merkle-trees/MerkleMultiProof.sol";
contract YourContract {
function verify(
bytes32 root,
bytes32[] memory proof,
MerkleMultiProof.Leaf[] memory leaves,
uint256 leafCount
) public {
require(MerkleMultiProof.VerifyProof(root, proof, leaves, leafCount), "Invalid proof");
}
}
```
Leaves carry a 0-based `index` and `hash`. The proof is a flat `bytes32[]` array of sibling hashes — no position metadata needed. The contract converts indices to 1-based tree positions internally and walks up level by level, consuming proof elements for missing siblings.
You can generate the merkle multi proofs using the [rs-merkle](https://crates.io/crates/rs-merkle) crate.
To convert an `rs-merkle` proof into the format the Solidity verifier expects:
```rust
use rs_merkle::MerkleProof;
struct Leaf {
index: usize, // 0-based leaf index
hash: [u8; 32],
}
fn convert_proof>(
proof: &MerkleProof,
leaf_indices: &[usize],
leaf_hashes: &[[u8; 32]],
) -> (Vec<[u8; 32]>, Vec) {
// Proof hashes can be passed directly — they are already in the correct
// consumption order (layer by layer, left to right).
let proof_hashes: Vec<[u8; 32]> = proof.proof_hashes().to_vec();
let mut leaves: Vec = leaf_indices.iter().zip(leaf_hashes)
.map(|(&i, &hash)| Leaf { index: i, hash })
.collect();
leaves.sort_by_key(|l| l.index);
(proof_hashes, leaves)
}
```
## Merkle Mountain Range Multi Proofs
This algorithm is based on the research done here: https://research.polytope.technology/merkle-mountain-range-multi-proofs
You can use it to verify proofs like so:
```solidity
pragma solidity ^0.8.17;
import "@polytope-labs/solidity-merkle-trees/MerkleMountainRange.sol";
contract YourContract {
function verify(
bytes32 root,
bytes32[] memory proof,
MerkleMountainRange.Leaf[] memory leaves,
uint256 leafCount
) public {
require(MerkleMountainRange.VerifyProof(root, proof, leaves, leafCount), "Invalid proof");
}
}
```
You can generate the MMR proofs using the [ckb-merkle-mountain-range](https://crates.io/crates/ckb-merkle-mountain-range) crate.
> **Note:** The MMR verifier provides **membership** proofs only — it guarantees that a given leaf hash exists somewhere in the committed tree. It is **not positionally binding**: the `Leaf.index` field determines how the proof is reconstructed but a valid leaf hash may verify at more than one index. If your application requires positional binding, commit the leaf index into the leaf hash before inserting into the tree (e.g. `keccak256(abi.encodePacked(index, data))`).
## Merkle Patricia Trie
This library also supports the verification of the different styles of merkle patricia tries:
- [x] Substrate
- [x] Ethereum
- [ ] NEAR
```solidity
pragma solidity ^0.8.17;
import "@polytope-labs/solidity-merkle-trees/MerklePatricia.sol";
contract YourContract {
function verifySubstrateProof(
bytes32 root,
bytes[] memory proof,
bytes[] memory keys
) public {
// verifies proofs from state.getReadProof
MerklePatricia.StorageValue[] memory values = MerklePatricia.VerifySubstrateProof(root, proof, keys);
// do something with the verified values (values[i].key, values[i].value).
}
function verifyEthereumProof(
bytes32 root,
bytes[] memory proof,
bytes[] memory keys
) public {
// verifies ethereum specific merkle patricia proofs as described by EIP-1188.
// can be used to verify the receipt trie, transaction trie and state trie
MerklePatricia.StorageValue[] memory values = MerklePatricia.VerifyEthereumProof(root, proof, keys);
// do something with the verified values (values[i].key, values[i].value).
}
}
```
## Testing Guide
This guide assumes [Rust](https://www.rust-lang.org/tools/install)...along with its [nightly](https://rust-lang.github.io/rustup/concepts/channels.html#:~:text=it%20just%20run-,rustup%20toolchain%20install%20nightly,-%3A) version, [Solidity](https://docs.soliditylang.org/en/v0.8.17/installing-solidity.html), [cargo-fuzz](https://github.com/rust-fuzz/cargo-fuzz) and [Forge](https://github.com/foundry-rs/foundry/blob/master/README.md) are installed, if not browse the official websites/repositories for instructions.
Build the contracts;
```bash
forge build
```
To run the unit tests associated with the Merkle Multi Proof library;
```bash
cargo test --release --manifest-path=./tests/rust/Cargo.toml --lib merkle_multi_proof
```
To run the unit tests associated with the Merkle Mountain Range library;
```bash
cargo test --release --manifest-path=./tests/rust/Cargo.toml --lib merkle_mountain_range
```
To run the unit and fuzz tests associated with the Merkle Patricia Trie library;
```bash
cargo test --release --manifest-path=./tests/rust/Cargo.toml --lib merkle_patricia
cd tests/rust && cargo +nightly fuzz run trie_proof_valid
cargo +nightly fuzz run trie_proof_invalid
```
### Run Tests in Docker
Execute the following commands in the project directory:
```bash
git submodule update --init --recursive
# run tests for all merkle verifiers
docker run --memory="24g" --rm --user root -v "$PWD":/app -w /app rust:latest cargo test --release --manifest-path=./tests/rust/Cargo.toml
# fuzz the merkle-patricia verifier
docker build -t test .
docker run --memory="24g" --rm --user root -v "$PWD":/app -w /app/tests/rust/fuzz test cargo +nightly fuzz run trie_proof_valid
docker run --memory="24g" --rm --user root -v "$PWD":/app -w /app/tests/rust/fuzz test cargo +nightly fuzz run trie_proof_invalid
```
## License
This library is licensed under the [Apache 2.0 License](./LICENSE), Copyright (c) 2023 Polytope Labs.
================================================
FILE: foundry.toml
================================================
[profile.default]
solc = '0.8.20'
src = 'src'
out = 'out'
libs = []
test = 'tests/foundry'
[lint]
lint_on_build = false
================================================
FILE: package.json
================================================
{
"name": "@polytope-labs/solidity-merkle-trees",
"version": "0.7.0",
"description": "The most advanced solidity library for merkle (multi) proof verification of different kinds of merkle trees",
"author": "Polytope Labs ",
"license": "Apache-2.0",
"bugs": {
"url": "https://github.com/polytope-labs/solidity-merkle-trees/issues"
},
"homepage": "https://github.com/polytope-labs/solidity-merkle-trees#readme",
"directories": {
"lib": "src"
},
"files": [
"./src/**/*.sol"
],
"scripts": {},
"repository": {
"type": "git",
"url": "git+https://github.com/polytope-labs/solidity-merkle-trees.git"
},
"keywords": [
"solidity",
"ethereum",
"smart",
"contracts",
"merkle-trees",
"algorithms"
],
"dependencies": {
"prettier": "^3.3.3",
"prettier-plugin-solidity": "^1.4.1"
},
"devDependencies": {
"forge-std": "github:foundry-rs/forge-std#v1.9.7"
}
}
================================================
FILE: remappings.txt
================================================
@openzeppelin/=node_modules/@openzeppelin/
forge-std/=node_modules/forge-std/src/
================================================
FILE: rust-toolchain
================================================
[toolchain]
channel = "1.81.0"
================================================
FILE: src/MerkleMountainRange.sol
================================================
// Copyright (C) Polytope Labs Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pragma solidity ^0.8.17;
/**
* @title A Merkle Mountain Range proof library
* @author Polytope Labs
* @notice Use this library to verify the merkle multi proofs of a merkle mountain range tree
* @dev refer to research for more info. https://research.polytope.technology/merkle-mountain-range-multi-proofs
*/
library MerkleMountainRange {
// @dev Thrown when the proof array is exhausted before all siblings are resolved.
error ProofExhausted();
// @dev Thrown when leafCount is zero.
error EmptyTree();
// @dev Thrown when there are leaves with indices >= leafCount
error OutOfBoundsLeaves();
/*
* @title A merkle mountain range leaf node
*
* An MMR with 14 leaves decomposes into subtrees of size 8, 4, 2:
*
* Subtree 1 Subtree 2 Subtree 3
* /\ /\ /\
* / \ / \ / \
* / \ / \ L12 L13
* /\ .. /\ /\ /\
* L0 L1 .. L6 L7 L8 L9 L10 L11
*
* index: 0-based leaf position across the entire MMR
*/
struct Leaf {
// 0-based index of the leaf across the entire MMR
uint256 index;
// A hash of the leaf
bytes32 hash;
}
// @dev Iterator for tracking a contiguous range of leaves in an array
struct LeafIterator {
// Start index of the range
uint256 offset;
// Length of the range
uint256 length;
// Reference to the underlying leaves array
Leaf[] data;
}
/*
* @dev A bidirectional iterator over a bytes32 array, used for sequential
* consumption of proof elements and accumulation of peak roots.
*/
struct HashIterator {
// Current position in the array
uint256 offset;
// Reference to the underlying data
bytes32[] data;
}
/*
* @notice Verify that merkle proof is accurate
* @notice This calls CalculateRoot(...) under the hood
* @param root hash of the merkle mountain range tree
* @param proof a list of nodes required for the proof to be verified
* @param leaves a list of mmr leaves to prove
* @param leafCount the total leaf count of the merkle mountain range
* @return boolean if the calculated root matches the provided root node
*/
function VerifyProof(bytes32 root, bytes32[] memory proof, Leaf[] memory leaves, uint256 leafCount)
internal
pure
returns (bool)
{
return root == CalculateRoot(proof, leaves, leafCount);
}
/*
* @notice Calculate merkle mountain range root
* @dev Decomposes leafCount into subtrees, computes each subtree root,
* then bags the peaks right-to-left:
*
* leafCount = 14 = 8 + 4 + 2 → 3 subtrees
*
* S1 (8 leaves) S2 (4 leaves) S3 (2 leaves)
* /\ /\ /\
* / \ / \ L12 L13
* /\ /\ /\ /\
* .. .. .. .. L8 L9 L10 L11
*
* ROOT = hash( hash(S3, S2), S1 )
*
* @param proof A list of the merkle nodes that are needed to re-calculate root node.
* @param leaves a list of mmr leaves to prove
* @param leafCount the total leaf count of the merkle mountain range
* @return bytes32 hash of the computed root node
*/
function CalculateRoot(bytes32[] memory proof, Leaf[] memory leaves, uint256 leafCount)
internal
pure
returns (bytes32)
{
if (leafCount == 0) revert EmptyTree();
// special handle the only 1 leaf MMR
if (leafCount == 1 && leaves.length == 1 && leaves[0].index == 0) {
return leaves[0].hash;
}
HashIterator memory peakRoots = HashIterator(0, new bytes32[](_popcount(leafCount)));
HashIterator memory proofIter = HashIterator(0, proof);
LeafIterator memory leafIter = LeafIterator(0, leaves.length, leaves);
uint256 nextSubtreeStart;
uint256 remaining = leafCount;
while (remaining != 0) {
uint256 height = _log2(remaining);
uint256 subtreeSize = 1 << height;
remaining -= subtreeSize;
nextSubtreeStart += subtreeSize;
LeafIterator memory subtreeLeaves = _subtreeLeaves(leafIter, nextSubtreeStart);
if (subtreeLeaves.length == 0) {
if (proofIter.data.length == proofIter.offset) {
break;
} else {
_push(peakRoots, _next(proofIter));
}
} else if (subtreeLeaves.length == 1 && height == 0) {
_push(peakRoots, subtreeLeaves.data[subtreeLeaves.offset].hash);
} else {
uint256 subtreeStartPos;
unchecked { subtreeStartPos = 2 * subtreeSize - nextSubtreeStart; }
_push(peakRoots, _subtreeRoot(subtreeLeaves, proofIter, subtreeStartPos));
}
}
// invariant: no out of bounds leaves
if (leafIter.length != 0) revert OutOfBoundsLeaves();
unchecked {
peakRoots.offset--;
}
while (peakRoots.offset != 0) {
bytes32 right = _previous(peakRoots);
bytes32 left = _previous(peakRoots);
unchecked {
++peakRoots.offset;
}
bytes32 hash;
assembly {
mstore(0x0, right)
mstore(0x20, left)
hash := keccak256(0x0, 0x40)
}
peakRoots.data[peakRoots.offset] = hash;
}
return peakRoots.data[0];
}
/*
* @notice Get a subtree's leaves
* @dev Partitions the leaf iterator so that leaves belonging to the current subtree
* are returned, and the iterator is advanced past them.
* @param leafIter Iterator tracking the current leaf range
* @param nextSubtreeStart The first leaf index belonging to the next subtree
* @return LeafIterator for the current subtree's leaves
*/
function _subtreeLeaves(LeafIterator memory leafIter, uint256 nextSubtreeStart)
internal
pure
returns (LeafIterator memory)
{
uint256 end = leafIter.offset + leafIter.length;
uint256 newOffset = leafIter.offset;
for (; newOffset < end;) {
if (nextSubtreeStart <= leafIter.data[newOffset].index) {
break;
}
unchecked {
++newOffset;
}
}
uint256 newLength = newOffset - leafIter.offset;
LeafIterator memory subtreeIter = LeafIterator(leafIter.offset, newLength, leafIter.data);
leafIter.offset = newOffset;
leafIter.length -= newLength;
return subtreeIter;
}
/*
* @notice Calculate root hash of a subtree of the merkle mountain
* @dev Converts leaf indices to tree positions, then walks up pairing siblings:
*
* Subtree 2 (subtreeSize=4, leaves 8-11):
*
* 1 ← peak root
* / \
* 2 3 position = subtreeStartPos + leafIndex
* / \ / \
* 4 5 6 7 e.g. leaf 10 → pos 6
* │ │ │ │
* L8 L9 L10 L11
*
* At each level, siblings are paired (pos ^ 1) and hashed.
* Missing siblings are consumed from the proof.
*
* @param leafIter An iterator representing the range of leaves for the subtree
* @param proofIter Iterator over proof node hashes consumed as siblings during traversal
* @param subtreeStartPos Precomputed constant such that position = subtreeStartPos + leafIndex (may underflow, corrected on addition)
* @return bytes32 The computed peak root hash
*/
function _subtreeRoot(LeafIterator memory leafIter, HashIterator memory proofIter, uint256 subtreeStartPos)
internal
pure
returns (bytes32)
{
uint256 length = leafIter.length;
uint256 offset = leafIter.offset;
uint256[] memory positions = new uint256[](length);
bytes32[] memory hashes = new bytes32[](length);
for (uint256 i; i < length;) {
hashes[i] = leafIter.data[offset + i].hash;
unchecked {
positions[i] = subtreeStartPos + leafIter.data[offset + i].index;
++i;
}
}
uint256 len = length;
// Walk up the tree, hashing with proof nodes — reuse arrays in-place
while (positions[0] != 1) {
uint256 nIdx;
uint256 i;
while (i < len) {
uint256 pos = positions[i];
if (i + 1 < len && positions[i + 1] == (pos ^ 1)) {
// Both siblings known
hashes[nIdx] = _hashPair(pos, hashes[i], hashes[i + 1]);
positions[nIdx] = pos >> 1;
unchecked {
++nIdx;
}
i += 2;
} else {
// Sibling is a proof node
if (proofIter.offset >= proofIter.data.length) revert ProofExhausted();
hashes[nIdx] = _hashPair(pos, hashes[i], _next(proofIter));
positions[nIdx] = pos >> 1;
unchecked {
++nIdx;
++i;
}
}
}
// Shrink arrays to the number of parent nodes written
len = nIdx;
assembly {
mstore(positions, nIdx)
mstore(hashes, nIdx)
}
}
return hashes[0];
}
// @dev Push a value onto the iterator and advance the offset
function _push(HashIterator memory iterator, bytes32 data) internal pure {
iterator.data[iterator.offset] = data;
unchecked {
++iterator.offset;
}
}
// @dev Read the current value and advance the iterator forward
function _next(HashIterator memory iterator) internal pure returns (bytes32) {
bytes32 data = iterator.data[iterator.offset];
unchecked {
++iterator.offset;
}
return data;
}
// @dev Read the current value and move the iterator backward
function _previous(HashIterator memory iterator) internal pure returns (bytes32) {
bytes32 data = iterator.data[iterator.offset];
unchecked {
--iterator.offset;
}
return data;
}
/*
* @dev Hash a node with its sibling, ordering by position (even = left child, odd = right child)
* @param pos The 1-based tree position of the current node
* @param current Hash of the current node
* @param sibling Hash of the sibling node
*/
function _hashPair(uint256 pos, bytes32 current, bytes32 sibling) private pure returns (bytes32 h) {
if ((pos & 1) == 0) {
assembly {
mstore(0x0, current)
mstore(0x20, sibling)
h := keccak256(0x0, 0x40)
}
} else {
assembly {
mstore(0x0, sibling)
mstore(0x20, current)
h := keccak256(0x0, 0x40)
}
}
}
// @dev Count the number of set bits (population count). Used to determine the number of peaks in the MMR.
function _popcount(uint256 x) private pure returns (uint256 count) {
while (x != 0) {
x &= x - 1;
unchecked {
++count;
}
}
}
// @dev Efficient floor(log2(x)) using bit-shifting
function _log2(uint256 x) private pure returns (uint256 r) {
assembly {
r := shl(7, lt(0xffffffffffffffffffffffffffffffff, x))
r := or(r, shl(6, lt(0xffffffffffffffff, shr(r, x))))
r := or(r, shl(5, lt(0xffffffff, shr(r, x))))
r := or(r, shl(4, lt(0xffff, shr(r, x))))
r := or(r, shl(3, lt(0xff, shr(r, x))))
r := or(r, shl(2, lt(0xf, shr(r, x))))
r := or(r, shl(1, lt(0x3, shr(r, x))))
r := or(r, lt(0x1, shr(r, x)))
}
}
}
================================================
FILE: src/MerkleMultiProof.sol
================================================
// Copyright (C) Polytope Labs Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pragma solidity ^0.8.20;
/**
* @title A Merkle Multi proof library
* @author Polytope Labs
* @dev Use this library to verify merkle tree leaves using merkle multi proofs.
* Supports both balanced and unbalanced trees.
* @dev refer to research for more info. https://research.polytope.technology/merkle-multi-proofs
*/
library MerkleMultiProof {
/*
* @title A merkle tree leaf node
*
*/
struct Leaf {
// 0-based index of the leaf
uint256 index;
// A hash of the leaf
bytes32 hash;
}
// @dev Thrown when the proof array is exhausted before all siblings are resolved.
error ProofExhausted();
// @dev Thrown when leafCount is zero.
error EmptyTree();
// @dev Thrown when a leaf index is >= leafCount.
error LeafIndexOutOfBounds();
/**
* @notice Verify a Merkle Multi Proof
* @param root hash of the root node of the merkle tree
* @param proof A list of proof node hashes needed to re-calculate root node.
* @param leaves A list of the leaves with their indices to prove
* @param leafCount Total number of leaves in the complete tree
* @return boolean if the calculated root matches the provided root node
*/
function VerifyProof(
bytes32 root,
bytes32[] memory proof,
Leaf[] memory leaves,
uint256 leafCount
) internal pure returns (bool) {
return root == CalculateRoot(proof, leaves, leafCount);
}
/**
* @notice Calculates the root hash of a merkle tree.
* @dev Supports both balanced and unbalanced trees (leafCount need not be a
* power of 2). Converts leaf indices to 1-based tree positions, then
* walks up level by level pairing siblings. Missing siblings are consumed
* sequentially from the proof array. On the rightmost edge of an unbalanced
* tree, nodes whose sibling does not exist are promoted unchanged.
*
* Position numbering (root = 1, children of i are 2i and 2i+1):
*
* 1 ← root
* / \
* 2 3
* / \ / \
* 4 5 6 7 leaves at positions (1 << height) + index
*
* Unbalanced example (5 leaves, height = 3):
*
* 1
* / \
* 2 3
* / \ / \
* 4 5 6 7
* /\ /\ |
* 8 9 .. 12 positions 13-15 don't exist, nodes promoted
*
* At each level, siblings are identified via pos ^ 1.
* Even positions are left children, odd are right.
*
* @param proof Array of proof node hashes consumed as siblings during traversal
* @param leaves Array of leaf nodes with their 0-based indices (must be sorted)
* @param leafCount Total number of leaves in the complete tree
* @return bytes32 The calculated root hash
*/
function CalculateRoot(
bytes32[] memory proof,
Leaf[] memory leaves,
uint256 leafCount
) internal pure returns (bytes32) {
if (leafCount == 0) revert EmptyTree();
uint256 len = leaves.length;
uint256[] memory positions = new uint256[](len);
bytes32[] memory hashes = new bytes32[](len);
// Convert leaf indices to 1-based tree positions
uint256 firstLeafPos = 1 << _ceilLog2(leafCount);
for (uint256 i; i < len;) {
if (leaves[i].index >= leafCount) revert LeafIndexOutOfBounds();
hashes[i] = leaves[i].hash;
unchecked {
positions[i] = firstLeafPos + leaves[i].index;
++i;
}
}
return _walk(positions, hashes, proof, leafCount);
}
/**
* @dev Walk up the tree level by level, pairing siblings and hashing.
*
* Supports unbalanced trees by tracking the number of valid nodes per
* level (`nodesAtLevel`). Starting from `leafCount`, this halves (with
* ceiling) each level. A sibling position that falls outside the valid
* range means it doesn't exist — the node is promoted unchanged.
*
*/
function _walk(
uint256[] memory positions,
bytes32[] memory hashes,
bytes32[] memory proof,
uint256 nodesAtLevel
) private pure returns (bytes32) {
uint256 p;
uint256 len = positions.length;
while (positions[0] != 1) {
uint256 lastValid = (1 << _log2(positions[0])) + nodesAtLevel - 1;
uint256 j;
for (uint256 i; i < len;) {
uint256 pos = positions[i];
bool hasSiblingInSet = i + 1 < len && positions[i + 1] == (pos ^ 1);
bool siblingExists = (pos ^ 1) <= lastValid;
bytes32 parent;
if (hasSiblingInSet) {
parent = _hashPair(pos, hashes[i], hashes[i + 1]);
unchecked { i += 2; }
} else if (siblingExists) {
parent = _hashPair(pos, hashes[i], proof[p]);
unchecked { ++p; ++i; }
} else {
parent = hashes[i]; // unbalanced edge — promote
unchecked { ++i; }
}
hashes[j] = parent;
positions[j] = pos >> 1;
unchecked { ++j; }
}
len = j;
nodesAtLevel = (nodesAtLevel + 1) >> 1;
}
return hashes[0];
}
/*
* @dev Hash a node with its sibling, ordering by position (even = left child, odd = right child)
* @param pos The 1-based tree position of the current node
* @param current Hash of the current node
* @param sibling Hash of the sibling node
*/
function _hashPair(uint256 pos, bytes32 current, bytes32 sibling) private pure returns (bytes32 h) {
if ((pos & 1) == 0) {
assembly {
mstore(0x0, current)
mstore(0x20, sibling)
h := keccak256(0x0, 0x40)
}
} else {
assembly {
mstore(0x0, sibling)
mstore(0x20, current)
h := keccak256(0x0, 0x40)
}
}
}
// @dev Compute ceil(log2(x))
function _ceilLog2(uint256 x) private pure returns (uint256) {
if (x <= 1) return 0;
return _log2(x - 1) + 1;
}
// @dev Efficient floor(log2(x)) using bit-shifting
function _log2(uint256 x) private pure returns (uint256 r) {
assembly {
r := shl(7, lt(0xffffffffffffffffffffffffffffffff, x))
r := or(r, shl(6, lt(0xffffffffffffffff, shr(r, x))))
r := or(r, shl(5, lt(0xffffffff, shr(r, x))))
r := or(r, shl(4, lt(0xffff, shr(r, x))))
r := or(r, shl(3, lt(0xff, shr(r, x))))
r := or(r, shl(2, lt(0xf, shr(r, x))))
r := or(r, shl(1, lt(0x3, shr(r, x))))
r := or(r, lt(0x1, shr(r, x)))
}
}
}
================================================
FILE: src/MerklePatricia.sol
================================================
// Copyright (C) Polytope Labs Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pragma solidity ^0.8.20;
import {NodeKind, NodeHandle, Extension, Branch, NibbledBranch, ValueOption, NodeHandleOption, Leaf, TrieNode} from "./trie/Node.sol";
import {Option} from "./trie/Option.sol";
import {NibbleSlice, NibbleSliceOps} from "./trie/NibbleSlice.sol";
import {TrieDB} from "./trie/TrieDB.sol";
import {SubstrateTrieDB} from "./trie/substrate/SubstrateTrieDB.sol";
import {EthereumTrieDB} from "./trie/ethereum/EthereumTrieDB.sol";
/**
* @title A Merkle Patricia library
* @author Polytope Labs
* @dev Use this library to verify merkle patricia proofs
* @dev refer to research for more info. https://research.polytope.technology/state-(machine)-proofs
*/
library MerklePatricia {
// Outcome of a successfully verified merkle-patricia proof
struct StorageValue {
// the storage key
bytes key;
// the encoded value
bytes value;
}
/**
* @notice Verifies substrate specific merkle patricia proofs.
* @param root hash of the merkle patricia trie
* @param proof a list of proof nodes
* @param keys a list of keys to verify
* @return bytes[] a list of values corresponding to the supplied keys.
*/
function VerifySubstrateProof(
bytes32 root,
bytes[] memory proof,
bytes[] memory keys
) public pure returns (StorageValue[] memory) {
StorageValue[] memory values = new StorageValue[](keys.length);
TrieNode[] memory nodes = new TrieNode[](proof.length);
for (uint256 i = 0; i < proof.length; i++) {
nodes[i] = TrieNode(keccak256(proof[i]), proof[i]);
}
for (uint256 i = 0; i < keys.length; i++) {
values[i].key = keys[i];
NibbleSlice memory keyNibbles = NibbleSlice(keys[i], 0);
NodeKind memory node = SubstrateTrieDB.decodeNodeKind(
TrieDB.get(nodes, root)
);
/*
* This loop is unbounded so that an adversary cannot insert a deeply nested key in the trie
* and successfully convince us of it's non-existence, if we consume the block gas limit while
* traversing the trie, then the transaction should revert.
*/
for (uint256 j = 1; j > 0; j++) {
NodeHandle memory nextNode;
if (TrieDB.isLeaf(node)) {
Leaf memory leaf = SubstrateTrieDB.decodeLeaf(node);
if (NibbleSliceOps.eq(leaf.key, keyNibbles)) {
values[i].value = TrieDB.load(nodes, leaf.value);
}
break;
} else if (TrieDB.isNibbledBranch(node)) {
NibbledBranch memory nibbled = SubstrateTrieDB
.decodeNibbledBranch(node);
uint256 nibbledBranchKeyLength = NibbleSliceOps.len(
nibbled.key
);
if (!NibbleSliceOps.startsWith(keyNibbles, nibbled.key)) {
break;
}
if (
NibbleSliceOps.len(keyNibbles) == nibbledBranchKeyLength
) {
if (Option.isSome(nibbled.value)) {
values[i].value = TrieDB.load(
nodes,
nibbled.value.value
);
}
break;
} else {
uint256 index = NibbleSliceOps.at(
keyNibbles,
nibbledBranchKeyLength
);
NodeHandleOption memory handle = nibbled.children[
index
];
if (Option.isSome(handle)) {
keyNibbles = NibbleSliceOps.mid(
keyNibbles,
nibbledBranchKeyLength + 1
);
nextNode = handle.value;
} else {
break;
}
}
} else if (TrieDB.isEmpty(node)) {
break;
}
node = SubstrateTrieDB.decodeNodeKind(
TrieDB.load(nodes, nextNode)
);
}
}
return values;
}
/**
* @notice Verify child trie keys
* @dev substrate specific method in order to verify keys in the child trie.
* @param root hash of the merkle root
* @param proof a list of proof nodes
* @param keys a list of keys to verify
* @param childInfo data that can be used to compute the root of the child trie
* @return bytes[], a list of values corresponding to the supplied keys.
*/
function ReadChildProofCheck(
bytes32 root,
bytes[] memory proof,
bytes[] memory keys,
bytes memory childInfo
) public pure returns (StorageValue[] memory) {
// fetch the child trie root hash;
bytes memory prefix = bytes(":child_storage:default:");
bytes memory key = bytes.concat(prefix, childInfo);
bytes[] memory _keys = new bytes[](1);
_keys[0] = key;
StorageValue[] memory values = VerifySubstrateProof(root, proof, _keys);
bytes32 childRoot = bytes32(values[0].value);
require(childRoot != bytes32(0), "Invalid child trie proof");
return VerifySubstrateProof(childRoot, proof, keys);
}
/**
* @notice Verifies ethereum specific merkle patricia proofs as described by EIP-1188.
* @param root hash of the merkle patricia trie
* @param proof a list of proof nodes
* @param keys a list of keys to verify
* @return bytes[] a list of values corresponding to the supplied keys.
*/
function VerifyEthereumProof(
bytes32 root,
bytes[] memory proof,
bytes[] memory keys
) public pure returns (StorageValue[] memory) {
StorageValue[] memory values = new StorageValue[](keys.length);
TrieNode[] memory nodes = new TrieNode[](proof.length);
for (uint256 i = 0; i < proof.length; i++) {
nodes[i] = TrieNode(keccak256(proof[i]), proof[i]);
}
for (uint256 i = 0; i < keys.length; i++) {
values[i].key = keys[i];
NibbleSlice memory keyNibbles = NibbleSlice(keys[i], 0);
NodeKind memory node = EthereumTrieDB.decodeNodeKind(
TrieDB.get(nodes, root)
);
/*
* This loop is unbounded so that an adversary cannot insert a deeply nested key in the trie
* and successfully convince us of it's non-existence, if we consume the block gas limit while
* traversing the trie, then the transaction should revert.
*/
for (uint256 j = 1; j > 0; j++) {
NodeHandle memory nextNode;
if (TrieDB.isLeaf(node)) {
Leaf memory leaf = EthereumTrieDB.decodeLeaf(node);
// Let's retrieve the offset to be used
uint256 offset = keyNibbles.offset % 2 == 0
? keyNibbles.offset / 2
: keyNibbles.offset / 2 + 1;
// Let's cut the key passed as input
keyNibbles = NibbleSlice(
NibbleSliceOps.bytesSlice(keyNibbles.data, offset),
0
);
if (NibbleSliceOps.eq(leaf.key, keyNibbles)) {
values[i].value = TrieDB.load(nodes, leaf.value);
}
break;
} else if (TrieDB.isExtension(node)) {
Extension memory extension = EthereumTrieDB.decodeExtension(
node
);
if (NibbleSliceOps.startsWith(keyNibbles, extension.key)) {
// Let's cut the key passed as input
uint256 cutNibble = keyNibbles.offset +
NibbleSliceOps.len(extension.key);
keyNibbles = NibbleSlice(
NibbleSliceOps.bytesSlice(
keyNibbles.data,
cutNibble / 2
),
cutNibble % 2
);
nextNode = extension.node;
} else {
break;
}
} else if (TrieDB.isBranch(node)) {
Branch memory branch = EthereumTrieDB.decodeBranch(node);
if (NibbleSliceOps.isEmpty(keyNibbles)) {
if (Option.isSome(branch.value)) {
values[i].value = TrieDB.load(
nodes,
branch.value.value
);
}
break;
} else {
NodeHandleOption memory handle = branch.children[
NibbleSliceOps.at(keyNibbles, 0)
];
if (Option.isSome(handle)) {
keyNibbles = NibbleSliceOps.mid(keyNibbles, 1);
nextNode = handle.value;
} else {
break;
}
}
} else if (TrieDB.isEmpty(node)) {
break;
}
node = EthereumTrieDB.decodeNodeKind(
TrieDB.load(nodes, nextNode)
);
}
}
return values;
}
}
================================================
FILE: src/trie/Bytes.sol
================================================
// Copyright (C) Polytope Labs Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pragma solidity ^0.8.20;
import {Memory} from "./Memory.sol";
struct ByteSlice {
bytes data;
uint256 offset;
}
library Bytes {
uint256 internal constant BYTES_HEADER_SIZE = 32;
/*
* Checks if two `bytes memory` variables are equal. This is done using hashing,
* which is much more gas efficient then comparing each byte individually.
* Equality means that:
* - 'self.length == other.length'
* - For 'n' in '[0, self.length)', 'self[n] == other[n]'
*/
function equals(
bytes memory self,
bytes memory other
) internal pure returns (bool equal) {
if (self.length != other.length) {
return false;
}
uint256 addr;
uint256 addr2;
assembly {
addr := add(self, /*BYTES_HEADER_SIZE*/ 32)
addr2 := add(other, /*BYTES_HEADER_SIZE*/ 32)
}
equal = Memory.equals(addr, addr2, self.length);
}
function readByte(ByteSlice memory self) internal pure returns (uint8) {
if (self.offset + 1 > self.data.length) {
revert("Out of range");
}
uint8 b = uint8(self.data[self.offset]);
self.offset += 1;
return b;
}
/*
* Copies 'len' bytes from 'self' into a new array, starting at the provided 'startIndex'.
* Returns the new copy.
* Requires that:
* - 'startIndex + len <= self.length'
* The length of the substring is: 'len'
*/
function read(
ByteSlice memory self,
uint256 len
) internal pure returns (bytes memory) {
require(self.offset + len <= self.data.length);
if (len == 0) {
return "";
}
uint256 addr = Memory.dataPtr(self.data);
bytes memory slice = Memory.toBytes(addr + self.offset, len);
self.offset += len;
return slice;
}
/*
* Copies a section of 'self' into a new array, starting at the provided 'startIndex'.
* Returns the new copy.
* Requires that 'startIndex <= self.length'
* The length of the substring is: 'self.length - startIndex'
*/
function substr(
bytes memory self,
uint256 startIndex
) internal pure returns (bytes memory) {
require(startIndex <= self.length);
uint256 len = self.length - startIndex;
uint256 addr = Memory.dataPtr(self);
return Memory.toBytes(addr + startIndex, len);
}
/*
* Copies 'len' bytes from 'self' into a new array, starting at the provided 'startIndex'.
* Returns the new copy.
* Requires that:
* - 'startIndex + len <= self.length'
* The length of the substring is: 'len'
*/
function substr(
bytes memory self,
uint256 startIndex,
uint256 len
) internal pure returns (bytes memory) {
require(startIndex + len <= self.length);
if (len == 0) {
return "";
}
uint256 addr = Memory.dataPtr(self);
return Memory.toBytes(addr + startIndex, len);
}
/*
* Combines 'self' and 'other' into a single array.
* Returns the concatenated arrays:
* [self[0], self[1], ... , self[self.length - 1], other[0], other[1], ... , other[other.length - 1]]
* The length of the new array is 'self.length + other.length'
*/
function concat(
bytes memory self,
bytes memory other
) internal pure returns (bytes memory) {
bytes memory ret = new bytes(self.length + other.length);
uint256 src;
uint256 srcLen;
(src, srcLen) = Memory.fromBytes(self);
uint256 src2;
uint256 src2Len;
(src2, src2Len) = Memory.fromBytes(other);
uint256 dest;
(dest, ) = Memory.fromBytes(ret);
uint256 dest2 = dest + srcLen;
Memory.copy(src, dest, srcLen);
Memory.copy(src2, dest2, src2Len);
return ret;
}
function toBytes32(bytes memory self) internal pure returns (bytes32 out) {
require(self.length >= 32, "Bytes:: toBytes32: data is to short.");
assembly {
out := mload(add(self, 32))
}
}
function toBytes16(
bytes memory self,
uint256 offset
) internal pure returns (bytes16 out) {
for (uint256 i = 0; i < 16; i++) {
out |= bytes16(bytes1(self[offset + i]) & 0xFF) >> (i * 8);
}
}
function toBytes8(
bytes memory self,
uint256 offset
) internal pure returns (bytes8 out) {
for (uint256 i = 0; i < 8; i++) {
out |= bytes8(bytes1(self[offset + i]) & 0xFF) >> (i * 8);
}
}
function toBytes4(
bytes memory self,
uint256 offset
) internal pure returns (bytes4) {
bytes4 out;
for (uint256 i = 0; i < 4; i++) {
out |= bytes4(self[offset + i] & 0xFF) >> (i * 8);
}
return out;
}
function toBytes2(
bytes memory self,
uint256 offset
) internal pure returns (bytes2) {
bytes2 out;
for (uint256 i = 0; i < 2; i++) {
out |= bytes2(self[offset + i] & 0xFF) >> (i * 8);
}
return out;
}
function removeLeadingZero(
bytes memory data
) internal pure returns (bytes memory) {
uint256 length = data.length;
uint256 startIndex = 0;
for (uint256 i = 0; i < length; i++) {
if (data[i] != 0) {
startIndex = i;
break;
}
}
return substr(data, startIndex);
}
function removeEndingZero(
bytes memory data
) internal pure returns (bytes memory) {
uint256 length = data.length;
uint256 endIndex = 0;
for (uint256 i = length - 1; i >= 0; i--) {
if (data[i] != 0) {
endIndex = i;
break;
}
}
return substr(data, 0, endIndex + 1);
}
function reverse(
bytes memory inbytes
) internal pure returns (bytes memory) {
uint256 inlength = inbytes.length;
bytes memory outbytes = new bytes(inlength);
for (uint256 i = 0; i <= inlength - 1; i++) {
outbytes[i] = inbytes[inlength - i - 1];
}
return outbytes;
}
}
================================================
FILE: src/trie/Memory.sol
================================================
// Copyright (C) Polytope Labs Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pragma solidity ^0.8.20;
library Memory {
uint256 internal constant WORD_SIZE = 32;
/*
* Compares the 'len' bytes starting at address 'addr' in memory with the 'len'
* bytes starting at 'addr2'.
* Returns 'true' if the bytes are the same, otherwise 'false'.
*/
function equals(
uint256 addr,
uint256 addr2,
uint256 len
) internal pure returns (bool equal) {
assembly {
equal := eq(keccak256(addr, len), keccak256(addr2, len))
}
}
/*
* Compares the 'len' bytes starting at address 'addr' in memory with the bytes stored in
* 'bts'. It is allowed to set 'len' to a lower value then 'bts.length', in which case only
* the first 'len' bytes will be compared.
* Requires that 'bts.length >= len'
*/
function equals(
uint256 addr,
uint256 len,
bytes memory bts
) internal pure returns (bool equal) {
require(bts.length >= len);
uint256 addr2;
assembly {
addr2 := add(bts, /*BYTES_HEADER_SIZE*/ 32)
}
return equals(addr, addr2, len);
}
// Returns a memory pointer to the data portion of the provided bytes array.
function dataPtr(bytes memory bts) internal pure returns (uint256 addr) {
assembly {
addr := add(bts, /*BYTES_HEADER_SIZE*/ 32)
}
}
/*
* Creates a 'bytes memory' variable from the memory address 'addr', with the
* length 'len'. The function will allocate new memory for the bytes array, and
* the 'len bytes starting at 'addr' will be copied into that new memory.
*/
function toBytes(
uint256 addr,
uint256 len
) internal pure returns (bytes memory bts) {
bts = new bytes(len);
uint256 btsptr;
assembly {
btsptr := add(bts, /*BYTES_HEADER_SIZE*/ 32)
}
copy(addr, btsptr, len);
}
/*
* Copies 'self' into a new 'bytes memory'.
* Returns the newly created 'bytes memory'
* The returned bytes will be of length '32'.
*/
function toBytes(bytes32 self) internal pure returns (bytes memory bts) {
bts = new bytes(32);
assembly {
mstore(add(bts, /*BYTES_HEADER_SIZE*/ 32), self)
}
}
/*
* Copy 'len' bytes from memory address 'src', to address 'dest'.
* This function does not check the or destination, it only copies
* the bytes.
*/
function copy(uint256 src, uint256 dest, uint256 len) internal pure {
// Copy word-length chunks while possible
for (; len >= WORD_SIZE; len -= WORD_SIZE) {
assembly {
mstore(dest, mload(src))
}
dest += WORD_SIZE;
src += WORD_SIZE;
}
// Copy remaining bytes
uint256 mask = len == 0
? 0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
: 256 ** (WORD_SIZE - len) - 1;
assembly {
let srcpart := and(mload(src), not(mask))
let destpart := and(mload(dest), mask)
mstore(dest, or(destpart, srcpart))
}
}
/*
* This function does the same as 'dataPtr(bytes memory)', but will also return the
* length of the provided bytes array.
*/
function fromBytes(
bytes memory bts
) internal pure returns (uint256 addr, uint256 len) {
len = bts.length;
assembly {
addr := add(bts, /*BYTES_HEADER_SIZE*/ 32)
}
}
}
================================================
FILE: src/trie/NibbleSlice.sol
================================================
// Copyright (C) Polytope Labs Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pragma solidity ^0.8.20;
struct NibbleSlice {
bytes data;
uint256 offset;
}
library NibbleSliceOps {
uint256 internal constant NIBBLE_PER_BYTE = 2;
uint256 internal constant BITS_PER_NIBBLE = 4;
function len(NibbleSlice memory nibble) internal pure returns (uint256) {
return nibble.data.length * NIBBLE_PER_BYTE - nibble.offset;
}
function mid(
NibbleSlice memory self,
uint256 i
) internal pure returns (NibbleSlice memory) {
return NibbleSlice(self.data, self.offset + i);
}
function isEmpty(NibbleSlice memory self) internal pure returns (bool) {
return len(self) == 0;
}
function eq(
NibbleSlice memory self,
NibbleSlice memory other
) internal pure returns (bool) {
return len(self) == len(other) && startsWith(self, other);
}
function at(
NibbleSlice memory self,
uint256 i
) internal pure returns (uint256) {
uint256 ix = (self.offset + i) / NIBBLE_PER_BYTE;
uint256 pad = (self.offset + i) % NIBBLE_PER_BYTE;
uint8 data = uint8(self.data[ix]);
return (pad == 1) ? data & 0x0F : data >> BITS_PER_NIBBLE;
}
function startsWith(
NibbleSlice memory self,
NibbleSlice memory other
) internal pure returns (bool) {
return commonPrefix(self, other) == len(other);
}
function commonPrefix(
NibbleSlice memory self,
NibbleSlice memory other
) internal pure returns (uint256) {
uint256 self_align = self.offset % NIBBLE_PER_BYTE;
uint256 other_align = other.offset % NIBBLE_PER_BYTE;
if (self_align == other_align) {
uint256 self_start = self.offset / NIBBLE_PER_BYTE;
uint256 other_start = other.offset / NIBBLE_PER_BYTE;
uint256 first = 0;
if (self_align != 0) {
if (
(self.data[self_start] & 0x0F) !=
(other.data[other_start] & 0x0F)
) {
return 0;
}
++self_start;
++other_start;
++first;
}
bytes memory selfSlice = bytesSlice(self.data, self_start);
bytes memory otherSlice = bytesSlice(other.data, other_start);
return biggestDepth(selfSlice, otherSlice) + first;
} else {
uint256 s = min(len(self), len(other));
uint256 i = 0;
while (i < s) {
if (at(self, i) != at(other, i)) {
break;
}
++i;
}
return i;
}
}
function biggestDepth(
bytes memory a,
bytes memory b
) internal pure returns (uint256) {
uint256 upperBound = min(a.length, b.length);
uint256 i = 0;
while (i < upperBound) {
if (a[i] != b[i]) {
return i * NIBBLE_PER_BYTE + leftCommon(a[i], b[i]);
}
++i;
}
return i * NIBBLE_PER_BYTE;
}
function leftCommon(bytes1 a, bytes1 b) internal pure returns (uint256) {
if (a == b) {
return 2;
} else if (uint8(a) & 0xF0 == uint8(b) & 0xF0) {
return 1;
} else {
return 0;
}
}
function bytesSlice(
bytes memory _bytes,
uint256 _start
) internal pure returns (bytes memory) {
uint256 bytesLength = _bytes.length;
uint256 _length = bytesLength - _start;
require(bytesLength >= _start, "slice_outOfBounds");
bytes memory tempBytes;
assembly {
switch iszero(_length)
case 0 {
tempBytes := mload(0x40) // load free memory pointer
let lengthmod := and(_length, 31)
let mc := add(
add(tempBytes, lengthmod),
mul(0x20, iszero(lengthmod))
)
let end := add(mc, _length)
for {
let cc := add(
add(
add(_bytes, lengthmod),
mul(0x20, iszero(lengthmod))
),
_start
)
} lt(mc, end) {
mc := add(mc, 0x20)
cc := add(cc, 0x20)
} {
mstore(mc, mload(cc))
}
mstore(tempBytes, _length)
mstore(0x40, and(add(mc, 31), not(31)))
}
default {
tempBytes := mload(0x40)
mstore(tempBytes, 0)
mstore(0x40, add(tempBytes, 0x20))
}
}
return tempBytes;
}
function min(uint256 a, uint256 b) private pure returns (uint256) {
return (a < b) ? a : b;
}
}
================================================
FILE: src/trie/Node.sol
================================================
// Copyright (C) Polytope Labs Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pragma solidity ^0.8.20;
import {NibbleSlice} from "./NibbleSlice.sol";
import {ByteSlice} from "./Bytes.sol";
// This is an enum for the different node types.
struct NodeKind {
bool isEmpty;
bool isLeaf;
bool isHashedLeaf;
bool isNibbledValueBranch;
bool isNibbledHashedValueBranch;
bool isNibbledBranch;
bool isExtension;
bool isBranch;
uint256 nibbleSize;
ByteSlice data;
}
struct NodeHandle {
bool isHash;
bytes32 hash;
bool isInline;
bytes inLine;
}
struct Extension {
NibbleSlice key;
NodeHandle node;
}
struct Branch {
NodeHandleOption value;
NodeHandleOption[16] children;
}
struct NibbledBranch {
NibbleSlice key;
NodeHandleOption value;
NodeHandleOption[16] children;
}
struct ValueOption {
bool isSome;
bytes value;
}
struct NodeHandleOption {
bool isSome;
NodeHandle value;
}
struct Leaf {
NibbleSlice key;
NodeHandle value;
}
struct TrieNode {
bytes32 hash;
bytes node;
}
================================================
FILE: src/trie/Option.sol
================================================
// Copyright (C) Polytope Labs Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pragma solidity ^0.8.20;
import {ValueOption, NodeHandleOption} from "./Node.sol";
library Option {
function isSome(ValueOption memory val) internal pure returns (bool) {
return val.isSome == true;
}
function isSome(NodeHandleOption memory val) internal pure returns (bool) {
return val.isSome == true;
}
}
================================================
FILE: src/trie/TrieDB.sol
================================================
// Copyright (C) Polytope Labs Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pragma solidity ^0.8.20;
import {TrieNode, NodeHandle, NodeKind} from "./Node.sol";
library TrieDB {
function get(
TrieNode[] memory nodes,
bytes32 hash
) internal pure returns (bytes memory) {
for (uint256 i = 0; i < nodes.length; i++) {
if (nodes[i].hash == hash) {
return nodes[i].node;
}
}
revert("Incomplete Proof!");
}
function load(
TrieNode[] memory nodes,
NodeHandle memory node
) internal pure returns (bytes memory) {
if (node.isInline) {
return node.inLine;
} else if (node.isHash) {
return get(nodes, node.hash);
}
return bytes("");
}
function isNibbledBranch(
NodeKind memory node
) internal pure returns (bool) {
return (node.isNibbledBranch ||
node.isNibbledHashedValueBranch ||
node.isNibbledValueBranch);
}
function isExtension(NodeKind memory node) internal pure returns (bool) {
return node.isExtension;
}
function isBranch(NodeKind memory node) internal pure returns (bool) {
return node.isBranch;
}
function isLeaf(NodeKind memory node) internal pure returns (bool) {
return (node.isLeaf || node.isHashedLeaf);
}
function isEmpty(NodeKind memory node) internal pure returns (bool) {
return node.isEmpty;
}
function isHash(NodeHandle memory node) internal pure returns (bool) {
return node.isHash;
}
function isInline(NodeHandle memory node) internal pure returns (bool) {
return node.isInline;
}
}
================================================
FILE: src/trie/ethereum/EthereumTrieDB.sol
================================================
/*
* Copyright (C) Polytope Labs Ltd.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
pragma solidity ^0.8.20;
import {NodeKind, Leaf, Extension, Branch, NodeHandle, NodeHandleOption, ByteSlice} from "../Node.sol";
import {Bytes} from "../Bytes.sol";
import {NibbleSlice, NibbleSliceOps} from "../NibbleSlice.sol";
import {RLPReader} from "./RLPReader.sol";
library EthereumTrieDB {
using RLPReader for bytes;
using RLPReader for RLPReader.RLPItem;
using RLPReader for RLPReader.Iterator;
bytes constant HASHED_NULL_NODE =
hex"56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421";
function decodeNodeKind(
bytes memory encoded
) external pure returns (NodeKind memory) {
NodeKind memory node;
ByteSlice memory input = ByteSlice(encoded, 0);
if (Bytes.equals(encoded, HASHED_NULL_NODE)) {
node.isEmpty = true;
return node;
}
RLPReader.RLPItem[] memory itemList = encoded.toRlpItem().toList();
uint256 numItems = itemList.length;
if (numItems == 0) {
node.isEmpty = true;
return node;
} else if (numItems == 2) {
// It may be a leaf or extension
bytes memory key = itemList[0].toBytes();
uint256 prefix;
assembly {
let first := shr(248, mload(add(key, 32)))
prefix := shr(4, first)
}
if (prefix == 2 || prefix == 3) {
node.isLeaf = true;
} else {
node.isExtension = true;
}
} else if (numItems == 17) {
node.isBranch = true;
} else {
revert("Invalid data");
}
node.data = input;
return node;
}
function decodeLeaf(
NodeKind memory node
) external pure returns (Leaf memory) {
Leaf memory leaf;
RLPReader.RLPItem[] memory decoded = node
.data
.data
.toRlpItem()
.toList();
bytes memory data = decoded[1].toBytes();
// Remove the first byte, which is the prefix and not present in the user provided key
leaf.key = NibbleSlice(Bytes.substr(decoded[0].toBytes(), 1), 0);
leaf.value = NodeHandle(false, bytes32(0), true, data);
return leaf;
}
function decodeExtension(
NodeKind memory node
) external pure returns (Extension memory) {
Extension memory extension;
RLPReader.RLPItem[] memory decoded = node
.data
.data
.toRlpItem()
.toList();
bytes memory data = decoded[1].toBytes();
uint8 isOdd = uint8(decoded[0].toBytes()[0] >> 4) & 0x01;
// Remove the first byte, which is the prefix and not present in the user provided key
extension.key = NibbleSlice(
Bytes.substr(decoded[0].toBytes(), (isOdd + 1) % 2),
isOdd
);
extension.node = NodeHandle(
true,
Bytes.toBytes32(data),
false,
new bytes(0)
);
return extension;
}
function decodeBranch(
NodeKind memory node
) external pure returns (Branch memory) {
Branch memory branch;
RLPReader.RLPItem[] memory decoded = node
.data
.data
.toRlpItem()
.toList();
NodeHandleOption[16] memory childrens;
for (uint256 i = 0; i < 16; i++) {
bytes memory dataAsBytes = decoded[i].toBytes();
if (dataAsBytes.length != 32) {
childrens[i] = NodeHandleOption(
false,
NodeHandle(false, bytes32(0), false, new bytes(0))
);
} else {
bytes32 data = Bytes.toBytes32(dataAsBytes);
childrens[i] = NodeHandleOption(
true,
NodeHandle(true, data, false, new bytes(0))
);
}
}
if (isEmpty(decoded[16].toBytes())) {
branch.value = NodeHandleOption(
false,
NodeHandle(false, bytes32(0), false, new bytes(0))
);
} else {
branch.value = NodeHandleOption(
true,
NodeHandle(false, bytes32(0), true, decoded[16].toBytes())
);
}
branch.children = childrens;
return branch;
}
function isEmpty(bytes memory item) internal pure returns (bool) {
return item.length > 0 && (item[0] == 0xc0 || item[0] == 0x80);
}
}
================================================
FILE: src/trie/ethereum/RLPReader.sol
================================================
// SPDX-License-Identifier: Apache-2.0
/*
* @author Hamdi Allam hamdi.allam97@gmail.com
* Please reach out with any questions or concerns
*/
pragma solidity >=0.5.10 <0.9.0;
library RLPReader {
uint8 constant STRING_SHORT_START = 0x80;
uint8 constant STRING_LONG_START = 0xb8;
uint8 constant LIST_SHORT_START = 0xc0;
uint8 constant LIST_LONG_START = 0xf8;
uint8 constant WORD_SIZE = 32;
struct RLPItem {
uint256 len;
uint256 memPtr;
}
struct Iterator {
RLPItem item; // Item that's being iterated over.
uint256 nextPtr; // Position of the next item in the list.
}
/*
* @dev Returns the next element in the iteration. Reverts if it has not next element.
* @param self The iterator.
* @return The next element in the iteration.
*/
function next(Iterator memory self) internal pure returns (RLPItem memory) {
require(hasNext(self));
uint256 ptr = self.nextPtr;
uint256 itemLength = _itemLength(ptr);
self.nextPtr = ptr + itemLength;
return RLPItem(itemLength, ptr);
}
/*
* @dev Returns true if the iteration has more elements.
* @param self The iterator.
* @return true if the iteration has more elements.
*/
function hasNext(Iterator memory self) internal pure returns (bool) {
RLPItem memory item = self.item;
return self.nextPtr < item.memPtr + item.len;
}
/*
* @param item RLP encoded bytes
*/
function toRlpItem(bytes memory item) internal pure returns (RLPItem memory) {
uint256 memPtr;
assembly {
memPtr := add(item, 0x20)
}
return RLPItem(item.length, memPtr);
}
/*
* @dev Create an iterator. Reverts if item is not a list.
* @param self The RLP item.
* @return An 'Iterator' over the item.
*/
function iterator(RLPItem memory self) internal pure returns (Iterator memory) {
require(isList(self));
uint256 ptr = self.memPtr + _payloadOffset(self.memPtr);
return Iterator(self, ptr);
}
/*
* @param the RLP item.
*/
function rlpLen(RLPItem memory item) internal pure returns (uint256) {
return item.len;
}
/*
* @param the RLP item.
* @return (memPtr, len) pair: location of the item's payload in memory.
*/
function payloadLocation(RLPItem memory item) internal pure returns (uint256, uint256) {
uint256 offset = _payloadOffset(item.memPtr);
uint256 memPtr = item.memPtr + offset;
uint256 len = item.len - offset; // data length
return (memPtr, len);
}
/*
* @param the RLP item.
*/
function payloadLen(RLPItem memory item) internal pure returns (uint256) {
(, uint256 len) = payloadLocation(item);
return len;
}
/*
* @param the RLP item containing the encoded list.
*/
function toList(RLPItem memory item) internal pure returns (RLPItem[] memory) {
require(isList(item));
uint256 items = numItems(item);
RLPItem[] memory result = new RLPItem[](items);
uint256 memPtr = item.memPtr + _payloadOffset(item.memPtr);
uint256 dataLen;
for (uint256 i = 0; i < items; i++) {
dataLen = _itemLength(memPtr);
result[i] = RLPItem(dataLen, memPtr);
memPtr = memPtr + dataLen;
}
return result;
}
// @return indicator whether encoded payload is a list. negate this function call for isData.
function isList(RLPItem memory item) internal pure returns (bool) {
if (item.len == 0) return false;
uint8 byte0;
uint256 memPtr = item.memPtr;
assembly {
byte0 := byte(0, mload(memPtr))
}
if (byte0 < LIST_SHORT_START) return false;
return true;
}
/*
* @dev A cheaper version of keccak256(toRlpBytes(item)) that avoids copying memory.
* @return keccak256 hash of RLP encoded bytes.
*/
function rlpBytesKeccak256(RLPItem memory item) internal pure returns (bytes32) {
uint256 ptr = item.memPtr;
uint256 len = item.len;
bytes32 result;
assembly {
result := keccak256(ptr, len)
}
return result;
}
/*
* @dev A cheaper version of keccak256(toBytes(item)) that avoids copying memory.
* @return keccak256 hash of the item payload.
*/
function payloadKeccak256(RLPItem memory item) internal pure returns (bytes32) {
(uint256 memPtr, uint256 len) = payloadLocation(item);
bytes32 result;
assembly {
result := keccak256(memPtr, len)
}
return result;
}
/**
* RLPItem conversions into data types *
*/
// @returns raw rlp encoding in bytes
function toRlpBytes(RLPItem memory item) internal pure returns (bytes memory) {
bytes memory result = new bytes(item.len);
if (result.length == 0) return result;
uint256 ptr;
assembly {
ptr := add(0x20, result)
}
copy(item.memPtr, ptr, item.len);
return result;
}
// any non-zero byte except "0x80" is considered true
function toBoolean(RLPItem memory item) internal pure returns (bool) {
require(item.len == 1);
uint256 result;
uint256 memPtr = item.memPtr;
assembly {
result := byte(0, mload(memPtr))
}
/*
* SEE Github Issue #5.
* Summary: Most commonly used RLP libraries (i.e Geth) will encode
* "0" as "0x80" instead of as "0". We handle this edge case explicitly
* here.
*/
if (result == 0 || result == STRING_SHORT_START) {
return false;
} else {
return true;
}
}
function toAddress(RLPItem memory item) internal pure returns (address) {
// 1 byte for the length prefix
require(item.len == 21);
return address(uint160(toUint(item)));
}
function toUint(RLPItem memory item) internal pure returns (uint256) {
require(item.len > 0 && item.len <= 33);
(uint256 memPtr, uint256 len) = payloadLocation(item);
uint256 result;
assembly {
result := mload(memPtr)
// shift to the correct location if neccesary
if lt(len, 32) { result := div(result, exp(256, sub(32, len))) }
}
return result;
}
// enforces 32 byte length
function toUintStrict(RLPItem memory item) internal pure returns (uint256) {
// one byte prefix
require(item.len == 33);
uint256 result;
uint256 memPtr = item.memPtr + 1;
assembly {
result := mload(memPtr)
}
return result;
}
function toBytes(RLPItem memory item) internal pure returns (bytes memory) {
require(item.len > 0);
(uint256 memPtr, uint256 len) = payloadLocation(item);
bytes memory result = new bytes(len);
uint256 destPtr;
assembly {
destPtr := add(0x20, result)
}
copy(memPtr, destPtr, len);
return result;
}
/*
* Private Helpers
*/
// @return number of payload items inside an encoded list.
function numItems(RLPItem memory item) private pure returns (uint256) {
if (item.len == 0) return 0;
uint256 count = 0;
uint256 currPtr = item.memPtr + _payloadOffset(item.memPtr);
uint256 endPtr = item.memPtr + item.len;
while (currPtr < endPtr) {
currPtr = currPtr + _itemLength(currPtr); // skip over an item
count++;
}
return count;
}
// @return entire rlp item byte length
function _itemLength(uint256 memPtr) private pure returns (uint256) {
uint256 itemLen;
uint256 byte0;
assembly {
byte0 := byte(0, mload(memPtr))
}
if (byte0 < STRING_SHORT_START) {
itemLen = 1;
} else if (byte0 < STRING_LONG_START) {
itemLen = byte0 - STRING_SHORT_START + 1;
} else if (byte0 < LIST_SHORT_START) {
assembly {
let byteLen := sub(byte0, 0xb7) // # of bytes the actual length is
memPtr := add(memPtr, 1) // skip over the first byte
// 32 byte word size
let dataLen := div(mload(memPtr), exp(256, sub(32, byteLen))) // right shifting to get the len
itemLen := add(dataLen, add(byteLen, 1))
}
} else if (byte0 < LIST_LONG_START) {
itemLen = byte0 - LIST_SHORT_START + 1;
} else {
assembly {
let byteLen := sub(byte0, 0xf7)
memPtr := add(memPtr, 1)
let dataLen := div(mload(memPtr), exp(256, sub(32, byteLen))) // right shifting to the correct length
itemLen := add(dataLen, add(byteLen, 1))
}
}
return itemLen;
}
// @return number of bytes until the data
function _payloadOffset(uint256 memPtr) private pure returns (uint256) {
uint256 byte0;
assembly {
byte0 := byte(0, mload(memPtr))
}
if (byte0 < STRING_SHORT_START) {
return 0;
} else if (byte0 < STRING_LONG_START || (byte0 >= LIST_SHORT_START && byte0 < LIST_LONG_START)) {
return 1;
} else if (byte0 < LIST_SHORT_START) {
// being explicit
return byte0 - (STRING_LONG_START - 1) + 1;
} else {
return byte0 - (LIST_LONG_START - 1) + 1;
}
}
/*
* @param src Pointer to source
* @param dest Pointer to destination
* @param len Amount of memory to copy from the source
*/
function copy(uint256 src, uint256 dest, uint256 len) private pure {
if (len == 0) return;
// copy as many word sizes as possible
for (; len >= WORD_SIZE; len -= WORD_SIZE) {
assembly {
mstore(dest, mload(src))
}
src += WORD_SIZE;
dest += WORD_SIZE;
}
if (len > 0) {
// left over bytes. Mask is used to remove unwanted bytes from the word
uint256 mask = 256 ** (WORD_SIZE - len) - 1;
assembly {
let srcpart := and(mload(src), not(mask)) // zero out src
let destpart := and(mload(dest), mask) // retrieve the bytes
mstore(dest, or(destpart, srcpart))
}
}
}
}
================================================
FILE: src/trie/substrate/ScaleCodec.sol
================================================
// Copyright (C) Polytope Labs Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pragma solidity ^0.8.20;
import {Bytes, ByteSlice} from "../Bytes.sol";
library ScaleCodec {
// Decodes a SCALE encoded uint256 by converting bytes (bid endian) to little endian format
function decodeUint256(bytes memory data) internal pure returns (uint256) {
uint256 number;
for (uint256 i = data.length; i > 0; i--) {
number =
number +
uint256(uint8(data[i - 1])) *
(2 ** (8 * (i - 1)));
}
return number;
}
// Decodes a SCALE encoded compact unsigned integer
function decodeUintCompact(
ByteSlice memory data
) internal pure returns (uint256 v) {
uint8 b = Bytes.readByte(data); // read the first byte
uint8 mode = b % 4; // bitwise operation
uint256 value;
if (mode == 0) {
// [0, 63]
value = b >> 2; // right shift to remove mode bits
} else if (mode == 1) {
// [64, 16383]
uint8 bb = Bytes.readByte(data); // read the second byte
uint64 r = bb; // convert to uint64
r <<= 6; // multiply by * 2^6
r += b >> 2; // right shift to remove mode bits
value = r;
} else if (mode == 2) {
// [16384, 1073741823]
uint8 b2 = Bytes.readByte(data); // read the next 3 bytes
uint8 b3 = Bytes.readByte(data);
uint8 b4 = Bytes.readByte(data);
uint32 x1 = uint32(b) | (uint32(b2) << 8); // convert to little endian
uint32 x2 = x1 | (uint32(b3) << 16);
uint32 x3 = x2 | (uint32(b4) << 24);
x3 >>= 2; // remove the last 2 mode bits
value = uint256(x3);
} else if (mode == 3) {
// [1073741824, 4503599627370496]
uint8 l = (b >> 2) + 4; // remove mode bits
require(l <= 8, "unexpected prefix decoding Compact");
return decodeUint256(Bytes.read(data, l));
} else {
revert("Code should be unreachable");
}
return value;
}
// Decodes a SCALE encoded compact unsigned integer
function decodeUintCompact(
bytes memory data
) internal pure returns (uint256 v, uint8 m) {
uint8 b = readByteAtIndex(data, 0); // read the first byte
uint8 mode = b & 3; // bitwise operation
uint256 value;
if (mode == 0) {
// [0, 63]
value = b >> 2; // right shift to remove mode bits
} else if (mode == 1) {
// [64, 16383]
uint8 bb = readByteAtIndex(data, 1); // read the second byte
uint64 r = bb; // convert to uint64
r <<= 6; // multiply by * 2^6
r += b >> 2; // right shift to remove mode bits
value = r;
} else if (mode == 2) {
// [16384, 1073741823]
uint8 b2 = readByteAtIndex(data, 1); // read the next 3 bytes
uint8 b3 = readByteAtIndex(data, 2);
uint8 b4 = readByteAtIndex(data, 3);
uint32 x1 = uint32(b) | (uint32(b2) << 8); // convert to little endian
uint32 x2 = x1 | (uint32(b3) << 16);
uint32 x3 = x2 | (uint32(b4) << 24);
x3 >>= 2; // remove the last 2 mode bits
value = uint256(x3);
} else if (mode == 3) {
// [1073741824, 4503599627370496]
uint8 l = b >> 2; // remove mode bits
require(
l > 32,
"Not supported: number cannot be greater than 32 bytes"
);
} else {
revert("Code should be unreachable");
}
return (value, mode);
}
/*
* The biggest compact supported uint is 2 ** 536 - 1.
* But the biggest value supported by this method is 2 ** 256 - 1(max of uint256)
*/
function encodeUintCompact(uint256 v) internal pure returns (bytes memory) {
if (v < 64) {
return abi.encodePacked(uint8(v << 2));
} else if (v < 2 ** 14) {
return abi.encodePacked(reverse16(uint16(((v << 2) + 1))));
} else if (v < 2 ** 30) {
return abi.encodePacked(reverse32(uint32(((v << 2) + 2))));
} else {
bytes memory valueBytes = Bytes.removeEndingZero(
abi.encodePacked(reverse256(v))
);
uint256 length = valueBytes.length;
uint8 prefix = uint8(((length - 4) << 2) + 3);
return abi.encodePacked(prefix, valueBytes);
}
}
// Read a byte at a specific index and return it as type uint8
function readByteAtIndex(
bytes memory data,
uint8 index
) internal pure returns (uint8) {
return uint8(data[index]);
}
/*
* Sources:
* * https://ethereum.stackexchange.com/questions/15350/how-to-convert-an-bytes-to-address-in-solidity/50528
* * https://graphics.stanford.edu/~seander/bithacks.html#ReverseParallel
*/
function reverse256(uint256 input) internal pure returns (uint256 v) {
v = input;
// swap bytes
v =
((v &
0xFF00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF00) >>
8) |
((v &
0x00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF) <<
8);
// swap 2-byte long pairs
v =
((v &
0xFFFF0000FFFF0000FFFF0000FFFF0000FFFF0000FFFF0000FFFF0000FFFF0000) >>
16) |
((v &
0x0000FFFF0000FFFF0000FFFF0000FFFF0000FFFF0000FFFF0000FFFF0000FFFF) <<
16);
// swap 4-byte long pairs
v =
((v &
0xFFFFFFFF00000000FFFFFFFF00000000FFFFFFFF00000000FFFFFFFF00000000) >>
32) |
((v &
0x00000000FFFFFFFF00000000FFFFFFFF00000000FFFFFFFF00000000FFFFFFFF) <<
32);
// swap 8-byte long pairs
v =
((v &
0xFFFFFFFFFFFFFFFF0000000000000000FFFFFFFFFFFFFFFF0000000000000000) >>
64) |
((v &
0x0000000000000000FFFFFFFFFFFFFFFF0000000000000000FFFFFFFFFFFFFFFF) <<
64);
// swap 16-byte long pairs
v = (v >> 128) | (v << 128);
}
function reverse128(uint128 input) internal pure returns (uint128 v) {
v = input;
// swap bytes
v =
((v & 0xFF00FF00FF00FF00FF00FF00FF00FF00) >> 8) |
((v & 0x00FF00FF00FF00FF00FF00FF00FF00FF) << 8);
// swap 2-byte long pairs
v =
((v & 0xFFFF0000FFFF0000FFFF0000FFFF0000) >> 16) |
((v & 0x0000FFFF0000FFFF0000FFFF0000FFFF) << 16);
// swap 4-byte long pairs
v =
((v & 0xFFFFFFFF00000000FFFFFFFF00000000) >> 32) |
((v & 0x00000000FFFFFFFF00000000FFFFFFFF) << 32);
// swap 8-byte long pairs
v = (v >> 64) | (v << 64);
}
function reverse64(uint64 input) internal pure returns (uint64 v) {
v = input;
// swap bytes
v = ((v & 0xFF00FF00FF00FF00) >> 8) | ((v & 0x00FF00FF00FF00FF) << 8);
// swap 2-byte long pairs
v = ((v & 0xFFFF0000FFFF0000) >> 16) | ((v & 0x0000FFFF0000FFFF) << 16);
// swap 4-byte long pairs
v = (v >> 32) | (v << 32);
}
function reverse32(uint32 input) internal pure returns (uint32 v) {
v = input;
// swap bytes
v = ((v & 0xFF00FF00) >> 8) | ((v & 0x00FF00FF) << 8);
// swap 2-byte long pairs
v = (v >> 16) | (v << 16);
}
function reverse16(uint16 input) internal pure returns (uint16 v) {
v = input;
// swap bytes
v = (v >> 8) | (v << 8);
}
function encode256(uint256 input) internal pure returns (bytes32) {
return bytes32(reverse256(input));
}
function encode128(uint128 input) internal pure returns (bytes16) {
return bytes16(reverse128(input));
}
function encode64(uint64 input) internal pure returns (bytes8) {
return bytes8(reverse64(input));
}
function encode32(uint32 input) internal pure returns (bytes4) {
return bytes4(reverse32(input));
}
function encode16(uint16 input) internal pure returns (bytes2) {
return bytes2(reverse16(input));
}
function encodeBytes(
bytes memory input
) internal pure returns (bytes memory) {
return abi.encodePacked(encodeUintCompact(input.length), input);
}
}
================================================
FILE: src/trie/substrate/SubstrateTrieDB.sol
================================================
// Copyright (C) Polytope Labs Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pragma solidity ^0.8.20;
import {NodeKind, NibbledBranch, NodeHandleOption, Leaf, NodeHandle} from "../Node.sol";
import {Bytes, ByteSlice} from "../Bytes.sol";
import {NibbleSlice, NibbleSliceOps} from "../NibbleSlice.sol";
import {ScaleCodec} from "./ScaleCodec.sol";
library SubstrateTrieDB {
uint8 public constant FIRST_PREFIX = 0x00 << 6;
uint8 public constant PADDING_BITMASK = 0x0F;
uint8 public constant EMPTY_TRIE = FIRST_PREFIX | (0x00 << 4);
uint8 public constant LEAF_PREFIX_MASK = 0x01 << 6;
uint8 public constant BRANCH_WITH_MASK = 0x03 << 6;
uint8 public constant BRANCH_WITHOUT_MASK = 0x02 << 6;
uint8 public constant ALT_HASHING_LEAF_PREFIX_MASK =
FIRST_PREFIX | (0x01 << 5);
uint8 public constant ALT_HASHING_BRANCH_WITH_MASK =
FIRST_PREFIX | (0x01 << 4);
uint8 public constant NIBBLE_PER_BYTE = 2;
uint256 public constant NIBBLE_SIZE_BOUND = uint256(type(uint16).max);
uint256 public constant BITMAP_LENGTH = 2;
uint256 public constant HASH_lENGTH = 32;
function decodeNodeKind(
bytes memory encoded
) internal pure returns (NodeKind memory) {
NodeKind memory node;
ByteSlice memory input = ByteSlice(encoded, 0);
uint8 i = Bytes.readByte(input);
if (i == EMPTY_TRIE) {
node.isEmpty = true;
return node;
}
uint8 mask = i & (0x03 << 6);
if (mask == LEAF_PREFIX_MASK) {
node.nibbleSize = decodeSize(i, input, 2);
node.isLeaf = true;
} else if (mask == BRANCH_WITH_MASK) {
node.nibbleSize = decodeSize(i, input, 2);
node.isNibbledValueBranch = true;
} else if (mask == BRANCH_WITHOUT_MASK) {
node.nibbleSize = decodeSize(i, input, 2);
node.isNibbledBranch = true;
} else if (mask == EMPTY_TRIE) {
if (i & (0x07 << 5) == ALT_HASHING_LEAF_PREFIX_MASK) {
node.nibbleSize = decodeSize(i, input, 3);
node.isHashedLeaf = true;
} else if (i & (0x0F << 4) == ALT_HASHING_BRANCH_WITH_MASK) {
node.nibbleSize = decodeSize(i, input, 4);
node.isNibbledHashedValueBranch = true;
} else {
// do not allow any special encoding
revert("Unallowed encoding");
}
}
node.data = input;
return node;
}
function decodeNibbledBranch(
NodeKind memory node
) internal pure returns (NibbledBranch memory) {
NibbledBranch memory nibbledBranch;
ByteSlice memory input = node.data;
bool padding = node.nibbleSize % NIBBLE_PER_BYTE != 0;
if (padding && (padLeft(uint8(input.data[input.offset])) != 0)) {
revert("Bad Format!");
}
uint256 nibbleLen = ((node.nibbleSize +
(NibbleSliceOps.NIBBLE_PER_BYTE - 1)) /
NibbleSliceOps.NIBBLE_PER_BYTE);
nibbledBranch.key = NibbleSlice(
Bytes.read(input, nibbleLen),
node.nibbleSize % NIBBLE_PER_BYTE
);
bytes memory bitmapBytes = Bytes.read(input, BITMAP_LENGTH);
uint16 bitmap = uint16(ScaleCodec.decodeUint256(bitmapBytes));
NodeHandleOption memory valueHandle;
if (node.isNibbledHashedValueBranch) {
valueHandle.isSome = true;
valueHandle.value.isHash = true;
valueHandle.value.hash = Bytes.toBytes32(
Bytes.read(input, HASH_lENGTH)
);
} else if (node.isNibbledValueBranch) {
uint256 len = ScaleCodec.decodeUintCompact(input);
valueHandle.isSome = true;
valueHandle.value.isInline = true;
valueHandle.value.inLine = Bytes.read(input, len);
}
nibbledBranch.value = valueHandle;
for (uint256 i = 0; i < 16; i++) {
NodeHandleOption memory childHandle;
if (valueAt(bitmap, i)) {
childHandle.isSome = true;
uint256 len = ScaleCodec.decodeUintCompact(input);
// revert(string.concat("node index: ", Strings.toString(len)));
if (len == HASH_lENGTH) {
childHandle.value.isHash = true;
childHandle.value.hash = Bytes.toBytes32(
Bytes.read(input, HASH_lENGTH)
);
} else {
childHandle.value.isInline = true;
childHandle.value.inLine = Bytes.read(input, len);
}
}
nibbledBranch.children[i] = childHandle;
}
return nibbledBranch;
}
function decodeLeaf(
NodeKind memory node
) internal pure returns (Leaf memory) {
Leaf memory leaf;
ByteSlice memory input = node.data;
bool padding = node.nibbleSize % NIBBLE_PER_BYTE != 0;
if (padding && padLeft(uint8(input.data[input.offset])) != 0) {
revert("Bad Format!");
}
uint256 nibbleLen = (node.nibbleSize +
(NibbleSliceOps.NIBBLE_PER_BYTE - 1)) /
NibbleSliceOps.NIBBLE_PER_BYTE;
bytes memory nibbleBytes = Bytes.read(input, nibbleLen);
leaf.key = NibbleSlice(nibbleBytes, node.nibbleSize % NIBBLE_PER_BYTE);
NodeHandle memory handle;
if (node.isHashedLeaf) {
handle.isHash = true;
handle.hash = Bytes.toBytes32(Bytes.read(input, HASH_lENGTH));
} else {
uint256 len = ScaleCodec.decodeUintCompact(input);
handle.isInline = true;
handle.inLine = Bytes.read(input, len);
}
leaf.value = handle;
return leaf;
}
function decodeSize(
uint8 first,
ByteSlice memory encoded,
uint8 prefixMask
) internal pure returns (uint256) {
uint8 maxValue = uint8(255 >> prefixMask);
uint256 result = uint256(first & maxValue);
if (result < maxValue) {
return result;
}
result -= 1;
while (result <= NIBBLE_SIZE_BOUND) {
uint256 n = uint256(Bytes.readByte(encoded));
if (n < 255) {
return result + n + 1;
}
result += 255;
}
return NIBBLE_SIZE_BOUND;
}
function padLeft(uint8 b) internal pure returns (uint8) {
return b & ~PADDING_BITMASK;
}
function valueAt(uint16 bitmap, uint256 i) internal pure returns (bool) {
return bitmap & (uint16(1) << uint16(i)) != 0;
}
}
================================================
FILE: tests/foundry/MerkleMountainRange.t.sol
================================================
// Copyright (C) Polytope Labs Ltd.
// SPDX-License-Identifier: Apache-2.0
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
pragma solidity ^0.8.17;
import {Test, console} from "forge-std/Test.sol";
import {MerkleMountainRange} from "../../src/MerkleMountainRange.sol";
contract MerkleMountainRangeTest is Test {
function setUp() public {}
/*
* This unit test showcases how to use the MMR contract
* to verify a proof generated from the counterpart Rust library.
*/
function testMerkleMountainRange() public {
uint256 offset = 0;
bytes32[] memory data = new bytes32[](5);
MerkleMountainRange.HashIterator memory iterator = MerkleMountainRange.HashIterator(offset, data);
// Define leaves values
bytes32 value1 = bytes32(uint256(2));
bytes32 value2 = bytes32(uint256(5));
bytes32 value3 = bytes32(uint256(8));
bytes32 value4 = bytes32(uint256(10));
bytes32 value5 = bytes32(uint256(12));
// Push the iterator to the MMR
MerkleMountainRange._push(iterator, value1);
MerkleMountainRange._push(iterator, value2);
MerkleMountainRange._push(iterator, value3);
MerkleMountainRange._push(iterator, value4);
MerkleMountainRange._push(iterator, value5);
assertEq(iterator.offset, 5);
uint256 leafCount = 14;
// Proof generated by `merkle_mountain_range.rs` implementation and converted to bytes32:
bytes32[] memory proof = new bytes32[](7);
proof[0] = 0xa4a7208a40e95acaf2fe1a3c675b1b5d8c341060e4f179b76ba79493582a95a6;
proof[1] = 0x989a7025bda9312b19569d9e84e33a624e7fc007e54db23b6758d5f819647071;
proof[2] = 0xfc5b56233029d71e7e9aff8e230ff491475dee2d8074b27d5fecf8f5154d7c8d;
proof[3] = 0x37db026959b7bafb26c0d292ecd69c24df5eab845d9625ac5301324402938f25;
proof[4] = 0x754310be011a7a378b07fa7cbac39dbedcadf645c518ddec58deeaa8c29e0634;
proof[5] = 0x06be3c46e5a06d7b3e438a9d698f4319dc628624a63e484d97f00b92d09edce7;
proof[6] = 0x7463c9b814b5d9081938e21346fe8bf81a9a9a0dcfa7bcc03b644a361e395a3b;
// Leaves: Leaf(leafIndex, hash) — heap position is derived on-chain
MerkleMountainRange.Leaf[] memory leaves = new MerkleMountainRange.Leaf[](5);
leaves[0] = MerkleMountainRange.Leaf(2, 0x2b97a4b75a93aa1ac8581fac0f7d4ab42406569409a737bdf9de584903b372c5);
leaves[1] = MerkleMountainRange.Leaf(5, 0xd279eb4bf22b2aeded31e65a126516215a9d93f83e3e425fdcd1a05ab347e535);
leaves[2] = MerkleMountainRange.Leaf(8, 0x38e18ac9b4d78020e0f164d6da9ea61b962ab1975bcf6e8e80e9a9fc2ae509f8);
leaves[3] = MerkleMountainRange.Leaf(10, 0x1a3930f70948f7eb1ceab07ecdb0967986091fd8b4b4f447406045431abd9795);
leaves[4] = MerkleMountainRange.Leaf(12, 0xe54ccfb12a140c2dddb6cf78d1c6121610260412c66d00658ed1267863427ab9);
// Check for calculated vs expected root.
bytes32 root = CalculateRoot(proof, leaves, leafCount);
console.logBytes32(root);
bytes32 expectedRoot = 0x5aac4bad5c6a9014429b7e19ec0e5cd059d28d697c9cdd3f71e78cb6bfbd2600;
assertEq(root, expectedRoot);
// Verify the proof
bool isValid = MerkleMountainRange.VerifyProof(expectedRoot, proof, leaves, leafCount);
assertTrue(isValid);
}
/**
* @notice Regression test: leaf index=1 with leafCount=1 previously bypassed the early-exit
* check, allowing arbitrary leaf hashes to pass verification. Now reverts with
* UnconsumedLeaves() because the leaf is never assigned to any peak subtree.
*/
function testExploit_LeafIndexBypass() public {
bytes32 knownRoot = 0x466dddba7e9a84a0f2632b59be71b8bd489e3334a1314a61253f8b827c9d3a36;
bytes32[] memory proof = new bytes32[](1);
proof[0] = knownRoot;
MerkleMountainRange.Leaf[] memory leaves = new MerkleMountainRange.Leaf[](1);
leaves[0] = MerkleMountainRange.Leaf(
1,
0xb870f0ca830d738b546237c54a050b892a1657766fae0786a412632ef318a99d
);
uint256 leafCount = 1;
// After fix: CalculateRoot reverts because the leaf at index=1 is never
// consumed by any peak subtree when leafCount=1
vm.expectRevert(MerkleMountainRange.OutOfBoundsLeaves.selector);
this.CalculateRoot(proof, leaves, leafCount);
}
/**
* @notice Leaf index exceeds leafCount — leaf belongs to no subtree.
* Uses real leaf hash from testMerkleMountainRange but with index 100 (out of range for leafCount=14).
*/
function testUnconsumedLeaves_IndexExceedsLeafCount() public {
bytes32[] memory proof = new bytes32[](7);
proof[0] = 0xa4a7208a40e95acaf2fe1a3c675b1b5d8c341060e4f179b76ba79493582a95a6;
proof[1] = 0x989a7025bda9312b19569d9e84e33a624e7fc007e54db23b6758d5f819647071;
proof[2] = 0xfc5b56233029d71e7e9aff8e230ff491475dee2d8074b27d5fecf8f5154d7c8d;
proof[3] = 0x37db026959b7bafb26c0d292ecd69c24df5eab845d9625ac5301324402938f25;
proof[4] = 0x754310be011a7a378b07fa7cbac39dbedcadf645c518ddec58deeaa8c29e0634;
proof[5] = 0x06be3c46e5a06d7b3e438a9d698f4319dc628624a63e484d97f00b92d09edce7;
proof[6] = 0x7463c9b814b5d9081938e21346fe8bf81a9a9a0dcfa7bcc03b644a361e395a3b;
MerkleMountainRange.Leaf[] memory leaves = new MerkleMountainRange.Leaf[](1);
leaves[0] = MerkleMountainRange.Leaf(100, 0x2b97a4b75a93aa1ac8581fac0f7d4ab42406569409a737bdf9de584903b372c5);
vm.expectRevert(MerkleMountainRange.OutOfBoundsLeaves.selector);
this.CalculateRoot(proof, leaves, 14);
}
/**
* @notice Leaf index equals leafCount — off by one, still out of range.
* Index 14 is the first index past a 14-leaf MMR.
*/
function testUnconsumedLeaves_IndexEqualsLeafCount() public {
bytes32[] memory proof = new bytes32[](7);
proof[0] = 0xa4a7208a40e95acaf2fe1a3c675b1b5d8c341060e4f179b76ba79493582a95a6;
proof[1] = 0x989a7025bda9312b19569d9e84e33a624e7fc007e54db23b6758d5f819647071;
proof[2] = 0xfc5b56233029d71e7e9aff8e230ff491475dee2d8074b27d5fecf8f5154d7c8d;
proof[3] = 0x37db026959b7bafb26c0d292ecd69c24df5eab845d9625ac5301324402938f25;
proof[4] = 0x754310be011a7a378b07fa7cbac39dbedcadf645c518ddec58deeaa8c29e0634;
proof[5] = 0x06be3c46e5a06d7b3e438a9d698f4319dc628624a63e484d97f00b92d09edce7;
proof[6] = 0x7463c9b814b5d9081938e21346fe8bf81a9a9a0dcfa7bcc03b644a361e395a3b;
MerkleMountainRange.Leaf[] memory leaves = new MerkleMountainRange.Leaf[](1);
leaves[0] = MerkleMountainRange.Leaf(14, 0xd279eb4bf22b2aeded31e65a126516215a9d93f83e3e425fdcd1a05ab347e535);
vm.expectRevert(MerkleMountainRange.OutOfBoundsLeaves.selector);
this.CalculateRoot(proof, leaves, 14);
}
/**
* @notice Leaf index is max uint256 — never passes any subtree boundary check.
*/
function testUnconsumedLeaves_MaxUint256Index() public {
bytes32[] memory proof = new bytes32[](7);
proof[0] = 0xa4a7208a40e95acaf2fe1a3c675b1b5d8c341060e4f179b76ba79493582a95a6;
proof[1] = 0x989a7025bda9312b19569d9e84e33a624e7fc007e54db23b6758d5f819647071;
proof[2] = 0xfc5b56233029d71e7e9aff8e230ff491475dee2d8074b27d5fecf8f5154d7c8d;
proof[3] = 0x37db026959b7bafb26c0d292ecd69c24df5eab845d9625ac5301324402938f25;
proof[4] = 0x754310be011a7a378b07fa7cbac39dbedcadf645c518ddec58deeaa8c29e0634;
proof[5] = 0x06be3c46e5a06d7b3e438a9d698f4319dc628624a63e484d97f00b92d09edce7;
proof[6] = 0x7463c9b814b5d9081938e21346fe8bf81a9a9a0dcfa7bcc03b644a361e395a3b;
MerkleMountainRange.Leaf[] memory leaves = new MerkleMountainRange.Leaf[](1);
leaves[0] = MerkleMountainRange.Leaf(type(uint256).max, 0x38e18ac9b4d78020e0f164d6da9ea61b962ab1975bcf6e8e80e9a9fc2ae509f8);
vm.expectRevert(MerkleMountainRange.OutOfBoundsLeaves.selector);
this.CalculateRoot(proof, leaves, 14);
}
/**
* @notice Valid leaves from testMerkleMountainRange with one extra out-of-range leaf appended.
* The valid leaves compute correctly but leaf at index 50 is never consumed.
*/
function testUnconsumedLeaves_MixedValidAndInvalid() public {
bytes32[] memory proof = new bytes32[](7);
proof[0] = 0xa4a7208a40e95acaf2fe1a3c675b1b5d8c341060e4f179b76ba79493582a95a6;
proof[1] = 0x989a7025bda9312b19569d9e84e33a624e7fc007e54db23b6758d5f819647071;
proof[2] = 0xfc5b56233029d71e7e9aff8e230ff491475dee2d8074b27d5fecf8f5154d7c8d;
proof[3] = 0x37db026959b7bafb26c0d292ecd69c24df5eab845d9625ac5301324402938f25;
proof[4] = 0x754310be011a7a378b07fa7cbac39dbedcadf645c518ddec58deeaa8c29e0634;
proof[5] = 0x06be3c46e5a06d7b3e438a9d698f4319dc628624a63e484d97f00b92d09edce7;
proof[6] = 0x7463c9b814b5d9081938e21346fe8bf81a9a9a0dcfa7bcc03b644a361e395a3b;
MerkleMountainRange.Leaf[] memory leaves = new MerkleMountainRange.Leaf[](6);
leaves[0] = MerkleMountainRange.Leaf(2, 0x2b97a4b75a93aa1ac8581fac0f7d4ab42406569409a737bdf9de584903b372c5);
leaves[1] = MerkleMountainRange.Leaf(5, 0xd279eb4bf22b2aeded31e65a126516215a9d93f83e3e425fdcd1a05ab347e535);
leaves[2] = MerkleMountainRange.Leaf(8, 0x38e18ac9b4d78020e0f164d6da9ea61b962ab1975bcf6e8e80e9a9fc2ae509f8);
leaves[3] = MerkleMountainRange.Leaf(10, 0x1a3930f70948f7eb1ceab07ecdb0967986091fd8b4b4f447406045431abd9795);
leaves[4] = MerkleMountainRange.Leaf(12, 0xe54ccfb12a140c2dddb6cf78d1c6121610260412c66d00658ed1267863427ab9);
leaves[5] = MerkleMountainRange.Leaf(50, 0xe54ccfb12a140c2dddb6cf78d1c6121610260412c66d00658ed1267863427ab9);
vm.expectRevert(MerkleMountainRange.OutOfBoundsLeaves.selector);
this.CalculateRoot(proof, leaves, 14);
}
function VerifyProof(bytes32 root, bytes32[] memory proof, MerkleMountainRange.Leaf[] memory leaves, uint256 leafCount)
public
pure
returns (bool)
{
return MerkleMountainRange.VerifyProof(root, proof, leaves, leafCount);
}
function CalculateRoot(bytes32[] memory proof, MerkleMountainRange.Leaf[] memory leaves, uint256 leafCount)
public
pure
returns (bytes32)
{
return MerkleMountainRange.CalculateRoot(proof, leaves, leafCount);
}
}
================================================
FILE: tests/foundry/MerkleMultiProof.t.sol
================================================
// Copyright (C) Polytope Labs Ltd.
// SPDX-License-Identifier: Apache-2.0
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
pragma solidity ^0.8.17;
import {Test, console} from "forge-std/Test.sol";
import {MerkleMultiProof} from "../../src/MerkleMultiProof.sol";
contract MerkleMultiProofTest is Test {
function CalculateRoot(
bytes32[] memory proof,
MerkleMultiProof.Leaf[] memory leaves,
uint256 numLeaves
) public view returns (bytes32) {
uint256 startGas = gasleft();
bytes32 root = MerkleMultiProof.CalculateRoot(
proof,
leaves,
numLeaves
);
uint256 gasUsed = startGas - gasleft();
console.log(gasUsed);
return root;
}
}
================================================
FILE: tests/foundry/MerklePatricia.t.sol
================================================
// SPDX-License-Identifier: UNLICENSED
pragma solidity ^0.8.20;
import {Test} from "forge-std/Test.sol";
import {MerklePatricia} from "../../src/MerklePatricia.sol";
import {SubstrateTrieDB} from "../../src/trie/substrate/SubstrateTrieDB.sol";
import {NodeKind, NibbledBranch, Leaf} from "../../src/trie/Node.sol";
import {ScaleCodec} from "../../src/trie/substrate/ScaleCodec.sol";
import {NibbleSlice, NibbleSliceOps} from "../../src/trie/NibbleSlice.sol";
import {ByteSlice} from "../../src/trie/Bytes.sol";
contract MerklePatriciaTest is Test {
function testSubstrateMerklePatricia() public pure {
bytes[] memory keys = new bytes[](1);
// trie key for pallet_timestamp::Now
keys[
0
] = hex"f0c365c3cf59d671eb72da0e7a4113c49f1f0515f462cdcf84e0f1d6045dfcbb";
bytes[] memory proof = new bytes[](2);
proof[
0
] = hex"802e98809b03c6ae83e3b70aa89acfe0947b3a18b5d35569662335df7127ab8fcb88c88780e5d1b21c5ecc2891e3467f6273f27ce2e73a292d6b8306197edfa97b3d965bd080c51e5f53a03d92ea8b2792218f152da738b9340c6eeb08581145825348bbdba480ad103a9320581c7747895a01d79d2fa5f103c4b83c5af10b0a13bc1749749523806eea23c0854ced8445a3338833e2401753fdcfadb3b56277f8f1af4004f73719806d990657a5b5c3c97b8a917d9f153cafc463acd90592f881bc071d6ba64e90b380346031472f91f7c44631224cb5e61fb29d530a9fafd5253551cbf43b7e97e79a";
proof[
1
] = hex"9f00c365c3cf59d671eb72da0e7a4113c41002505f0e7b9012096b41c4eb3aaf947f6ea429080000685f0f1f0515f462cdcf84e0f1d6045dfcbb2035e90c7f86010000";
bytes32 root = hex"6b5710000eccbd59b6351fc2eb53ff2c1df8e0f816f7186ddd309ca85e8798dd";
bytes memory value = MerklePatricia
.VerifySubstrateProof(root, proof, keys)[0].value;
uint256 timestamp = ScaleCodec.decodeUint256(value);
assert(timestamp == 1677168798005);
}
function testSubstrateMerklePatriciaSingleNode() public {
bytes[] memory keys = new bytes[](1);
// trie key for pallet_timestamp::Now
keys[0] = hex"00";
bytes[] memory proof = new bytes[](1);
proof[
0
] = hex"8100110034402c280401000b5db899138701804f1dc18c0729c67df638dcb17ff86372be663d0d85339a845510498c6c42fc3b";
bytes32 root = hex"9ec7b55dd538898d95dec220abf8f60e8c626bdb4a348d117d1ecaa564cb565c";
bytes memory value = MerklePatricia
.VerifySubstrateProof(root, proof, keys)[0].value;
assertEq(
ScaleCodec.decodeUintCompact(ByteSlice(value, 4)),
1679661054045
);
}
function testEthereumMerklePatricia() public {
bytes[] memory keys = new bytes[](1);
// slot at 0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc
keys[
0
] = hex"75b20eef8615de99c108b05f0dbda081c91897128caa336d75dffb97c4132b4d";
// proof from account 0xbEb5Fc579115071764c7423A4f12eDde41f106Ed on Ethreum mainnet at block 19458609
bytes[] memory proof = new bytes[](7);
proof[
0
] = hex"f90211a0e36cd4c05239f5c535a7840f9f14b74bf287328e3fb87d09f02355e644f4378fa0fa16c734775c7d81b5d459addf9cf6ef8c8e20b71a4df4754c6be3895cf20977a0ec900883398b84efcb12fee51e991e878fac4ce09c6cc07c8d0c0941398a3664a01de14c6cc89ac9ba2e5294a300c5497bb6817f7481e68febf3975a8b017e5c15a010765286a0f030df2b4e13c82eb2c95fb2f865e1e5ae67fb19fbadc4c554771da0f5cac85a41291eb814b14706fa3d27c6f8890ce2fed6f92ef03b6e8d22e093f8a0ff187925da588f90250b7680887c3d8d155b51f58112a0cfdc6356884c60d4eaa06b82977e5bb3457a89b79f252f3f448817761c03e14fdd99988d0d71a914fb96a0d0ed46c3bf3a217660177d3c0f3744aa40d3a67ed4dabe5371dcd101ebcb71d7a02d642b6a1035a17fca9ea16138d6ede364da0af4b2c5c362360e3b3669e96a7da0768a6012c6be41406374fa24604b803906c2dc3421d6aa9752fddcd7042ddb94a09c139cf4bbd79b32c255928ff267ee22948d06976efbc0ac869cb5a674857917a06e617a3e1bc92d939a0b6feff4c8d688f74315af5cecfb9a2869267b326a8ba6a078368124e1131b2743d0fbdda3b1b0c0a8adedb1b8839bb99fd31e34d50d1922a0dccc4ddd77c9580f1443dfd64c2d6a22014ba2dccd81281e6663f488c3447e21a0dfaeb0b02df962296d9e0645c2ae2961e15d0d1d92392164624a502cad96fd9480";
proof[
1
] = hex"f90211a0486fcee760e354cec03ba3a431a9f035417f63c957ea05b33cf49e3afaef8741a08ef7adc166a1423cb4f035d5a83583726cd5803405a4b8684ad0c023203ce6a7a098d08b44d840f34fa68397f2a5453c4eb89e86195f96ccadfe14de7925664561a0817d070ecb8e3451a505c6f9d008db91763c68f186346dd4555962bbba4abe03a0115d9190622849e9205a96f863707032e29621447c34506145efc434dac91321a0298e4f7519bea373ceb5f94cb34d78383c4664e2f5beac262b01b428c3b2a280a0c0ff7e3638c51912db91d92a6d637a2c8bbcae73fe33bf3738db40369763e5aba0fde04f5dc892f5156e5b1fd7985e90d9c64330d439645b80311561a370338ef9a09bd1e2ef9548cacf1ce0057f13c93344300849fbca2638f3055bdf638d59948ea078a2c1a862906b603291741138bee56444fd3f34e6a4f3ca1f0e318429a94862a08ecc44cc4a06c9a7ac2e21860942750ada47e14e2f2efd561d03104525ba27aaa08d407dec2d5527e80d6d73e8ee8c1ef8cd8819bd82628faf93abe454f48b8a7ca0bf759b30dbbf0e5e6a3c5bc258a91b7c6778e6f0b474e7fdb4117c1659ea5774a06b30bcd113d0d3ea46f1134654f7172ccf7253d17397ed0e57847cb14f1a4a64a0c2b47985c05a2eb85463fc687d7e09bb8aafddf1c57a323652b04a9b791d76c9a0d65a53fe9963a9086bc3c7796aa30286f029b01814f561ff98f1f4b898ad264380";
proof[
2
] = hex"f90211a0496f94fb20f86b96949c1e8ff21646543893848e9ba0fc88c29989993d6f4733a0346f2b92631b4ccec787638be96dce0f9082650f6839f951b4003282f0ea2934a0ddd5e6839f84a978019b8e076dc6057c579e3951ef766dcfb3342b251c424ee4a04feb7109266fd467e79012fada3159bad47d478612b15c812d853b5bbfb038cea083181482a9c3349b408b2cb957b78dd71aa9faf842d727e889afbf8d2e8f5fe2a0d417081e6c2f8b46bfa50ada78eb44bd87d620e834d5107055567f383ef04427a001d74013d6ab118109e0b4f6d3be65dbf5d16857cdd7e7f2698ef28a06177b9aa075bb8b6d0cffe800afd4dc0df14cffe73140cce5c716e75fb56bb65db4b403f4a055db1b1c8e3760650d578218fb0787d80b56295ce5e302515d5c300217f5179ba0c338b90051d671575d53c2cee1bd88fc422e7b84d7fa7fb8e0ae9c414382b1e0a03c2f2bb41a5f16e318785c263dc7811b17f4efaee0c6ca2645364259cbcb8487a0bdb8ff1b3ab9c0ef2cb6d22eb12cb0883a040b5a331cb3e178310bf052753552a00d8dcb4eab5db9a22b7a31d5cdf9a67c02b959f4f4ae49d325a6597622586859a0db07ac1cc177e0b96f40fda37b035e3954f0890942d6286fb18b6a9ca9e0a7dda033db3178750e7f06f808461292ddf275697492a5df3a13f25e7e6afb6d31ca70a0a81061197b9e888f84b6e647eebddee898f41c7056e8f878e9a4cd1c27c65e5980";
proof[
3
] = hex"f8f1a0b9bbfd6ce22f4ae072ef3d74361ee4264c93d20f2db91820d80e7241a2fad769a021681b13bd0228de9fc4094fe565dfe7edd4904bac9aad26ed2a7f2bf8acea0fa00944ff72f3186a894ecbd5e4990ae7cf28d3014cfa2dfebb42a703218c5b5e808080808080a05500178fdbdbd39144f93e0bb3c5b1304337ed6b72a0898e4e951c4f2565637d808080a0e7b6274092da49eb53015db452d82ba172bd098ef90653d812dbcc32df8afb72a08a93d8b68d7ec95d2fe990bb6503309cc116a435f05ee710a24fc82f68c2c00f80a076be6f941a705872dfb43bca24baaa291663ad1332f60b9f28a7c111c941592c80";
proof[
4
] = hex"e210a0b798089c35eabfa9992866d0ff2d19040e85326547b79dad85be810b5482bfb2";
proof[
5
] = hex"f851808080808080808080a03b00a9adfccdbcb4252a987ba894a37829d4d2d5bf4f30740ac23f93a22510ab80808080a07904f9b847c710858697e144376dab844c380807ecf4b6b7364c57fc22a86ed18080";
proof[
6
] = hex"f59e20ef8615de99c108b05f0dbda081c91897128caa336d75dffb97c4132b4d9594ababe63514ddd6277356f8cc3d6518aa8bdeb4de";
bytes32 root = hex"c162613853b0d814a7aaaf9869f44cad1aab4cb151321da5c60ff3a2ddc14daf";
bytes memory value = VerifyEthereum(root, proof, keys)[0].value;
bytes
memory expectedOutput = hex"94ababe63514ddd6277356f8cc3d6518aa8bdeb4de";
assertEq(value, expectedOutput);
}
function VerifyKeys(
bytes32 root,
bytes[] memory proof,
bytes[] memory keys
) public pure returns (MerklePatricia.StorageValue[] memory) {
return MerklePatricia.VerifySubstrateProof(root, proof, keys);
}
function VerifyEthereum(
bytes32 root,
bytes[] memory proof,
bytes[] memory keys
) public pure returns (MerklePatricia.StorageValue[] memory) {
return MerklePatricia.VerifyEthereumProof(root, proof, keys);
}
function decodeNodeKind(
bytes memory node
) public pure returns (NodeKind memory) {
return SubstrateTrieDB.decodeNodeKind(node);
}
function decodeNibbledBranch(
bytes memory node
) external pure returns (NibbledBranch memory) {
return
SubstrateTrieDB.decodeNibbledBranch(
SubstrateTrieDB.decodeNodeKind(node)
);
}
function decodeLeaf(bytes memory node) external pure returns (Leaf memory) {
return SubstrateTrieDB.decodeLeaf(SubstrateTrieDB.decodeNodeKind(node));
}
function nibbleLen(
NibbleSlice memory nibble
) public pure returns (uint256) {
return NibbleSliceOps.len(nibble);
}
function mid(
NibbleSlice memory self,
uint256 i
) public pure returns (NibbleSlice memory) {
return NibbleSliceOps.mid(self, i);
}
function isNibbleEmpty(NibbleSlice memory self) public pure returns (bool) {
return NibbleSliceOps.isEmpty(self);
}
function eq(
NibbleSlice memory self,
NibbleSlice memory other
) public pure returns (bool) {
return NibbleSliceOps.eq(self, other);
}
function nibbleAt(
NibbleSlice memory self,
uint256 i
) public pure returns (uint256) {
return NibbleSliceOps.at(self, i);
}
function startsWith(
NibbleSlice memory self,
NibbleSlice memory other
) public pure returns (bool) {
return NibbleSliceOps.startsWith(self, other);
}
function commonPrefix(
NibbleSlice memory self,
NibbleSlice memory other
) public pure returns (uint256) {
return NibbleSliceOps.commonPrefix(self, other);
}
}
================================================
FILE: tests/rust/Cargo.toml
================================================
[package]
name = "solidity-merkle-trees-test"
version = "0.1.0"
edition = "2021"
description = "Integration tests for solidity-merkle-trees"
authors = ["Polytope Labs "]
[dependencies]
once_cell = "1.17.0"
hex-literal = "0.4.1"
hex = "0.4.3"
trie-db = "0.28.0"
sp-trie = "29.0.0"
sp-core = "28.0.0"
primitive-types = "0.12.1"
codec = { package = "parity-scale-codec", version = "3.4.0" }
proptest = "=1.5.0"
rand = "0.8.5"
sp-runtime = "31.0.1"
binary-merkle-tree = "13.0.0"
# EVM execution
revm = { version = "19", default-features = false, features = ["std"] }
alloy-sol-types = "0.8"
alloy-primitives = "0.8"
serde_json = "1"
indexmap = "=2.7.1"
ruint = "=1.12.3"
# Merkle libraries
ckb-merkle-mountain-range = { git = "https://github.com/polytope-labs/merkle-mountain-range", branch = "seun/simplified-mmr" }
rs_merkle = { git = "https://github.com/polytope-labs/rs-merkle", branch = "seun/2d-merkle-proofs" }
# Prevent this from interfering with workspaces
[workspace]
members = [".", "fuzz"]
================================================
FILE: tests/rust/fuzz/Cargo.toml
================================================
[package]
name = "solidity-merkle-trees-fuzz"
version = "0.1.0"
edition = "2021"
[package.metadata]
cargo-fuzz = true
[dependencies]
solidity-merkle-trees-test = { path = "../" }
alloy-primitives = "0.8"
alloy-sol-types = "0.8"
# Trie deps
trie-db = "0.28.0"
sp-trie = "29.0.0"
sp-core = "28.0.0"
libfuzzer-sys = "0.4"
# Pin for Rust 1.81 compat
indexmap = "=2.7.1"
ruint = "=1.12.3"
[[bin]]
name = "trie_proof_invalid"
path = "fuzz_targets/trie_proof_invalid.rs"
[[bin]]
name = "trie_proof_valid"
path = "fuzz_targets/trie_proof_valid.rs"
================================================
FILE: tests/rust/fuzz/fuzz_targets/trie_proof_invalid.rs
================================================
#![no_main]
use libfuzzer_sys::fuzz_target;
use solidity_merkle_trees_fuzz::fuzz_that_verify_rejects_invalid_proofs;
fuzz_target!(|data: &[u8]| {
fuzz_that_verify_rejects_invalid_proofs(data);
});
================================================
FILE: tests/rust/fuzz/fuzz_targets/trie_proof_valid.rs
================================================
#![no_main]
use libfuzzer_sys::fuzz_target;
use solidity_merkle_trees_fuzz::fuzz_that_verify_accepts_valid_proofs;
fuzz_target!(|data: &[u8]| {
fuzz_that_verify_accepts_valid_proofs(data);
});
================================================
FILE: tests/rust/fuzz/src/lib.rs
================================================
#![allow(dead_code, unused_imports)]
use alloy_primitives::FixedBytes;
use alloy_sol_types::{sol, SolCall};
use solidity_merkle_trees_test::evm_runner::{project_root, EvmRunner};
use sp_core::KeccakHasher;
use sp_trie::{LayoutV0, MemoryDB, StorageProof};
use std::collections::HashSet;
use trie_db::{
DBValue, Hasher, Recorder, Trie, TrieDBBuilder, TrieDBMutBuilder, TrieLayout, TrieMut,
};
sol! {
struct StorageValue {
bytes key;
bytes value;
}
function VerifyKeys(bytes32 root, bytes[] proof, bytes[] keys) external pure returns (StorageValue[]);
}
fn data_sorted_unique(input: Vec<(Vec, Vec)>) -> Vec<(Vec, Vec)> {
let mut m = std::collections::BTreeMap::new();
for (k, v) in input.into_iter() {
let _ = m.insert(k, v);
}
m.into_iter().collect()
}
fn fuzz_to_data(input: &[u8]) -> Vec<(Vec, Vec)> {
let mut result = Vec::new();
let mut minkeylen = if let Some(v) = input.get(0) {
let mut v = *v & 31u8;
v = v + 1;
v
} else {
return result;
};
let mut maxkeylen = if let Some(v) = input.get(1) {
let mut v = *v & 31u8;
v = v + 1;
v
} else {
return result;
};
if maxkeylen < minkeylen {
let v = minkeylen;
minkeylen = maxkeylen;
maxkeylen = v;
}
let mut ix = 2;
loop {
let keylen = if let Some(v) = input.get(ix) {
let mut v = *v & 31u8;
v = v + 1;
v = std::cmp::max(minkeylen, v);
v = std::cmp::min(maxkeylen, v);
v as usize
} else {
break;
};
let key = if input.len() > ix + keylen { input[ix..ix + keylen].to_vec() } else { break };
ix += keylen;
let val = if input.len() > ix + 2 { input[ix..ix + 2].to_vec() } else { break };
result.push((key, val));
}
result
}
fn test_generate_proof(
entries: Vec<(Vec, Vec)>,
keys: Vec>,
) -> (::Out, Vec>, Vec<(Vec, Option)>) {
let (db, root) = {
let mut db = >::default();
let mut root = Default::default();
{
let mut trie = TrieDBMutBuilder::::new(&mut db, &mut root).build();
for (key, value) in &entries {
trie.insert(key, value).unwrap();
}
}
(db, root)
};
let proof = {
let mut recorder = Recorder::::new();
let trie_db = TrieDBBuilder::::new(&db, &root).with_recorder(&mut recorder).build();
for (key, expected) in &entries {
let value = trie_db.get(key).unwrap().unwrap();
assert_eq!(&value, expected);
}
let proof = recorder.drain().into_iter().map(|f| f.data).collect::>();
{
let mdb = StorageProof::new(proof.clone()).into_memory_db::();
let trie_db = TrieDBBuilder::::new(&mdb, &root).build();
for (key, expected) in &entries {
let value = trie_db.get(key).unwrap().unwrap();
assert_eq!(&value, expected);
}
}
proof.into_iter().collect::>()
};
let trie = TrieDBBuilder::::new(&db, &root).build();
let items = keys
.into_iter()
.map(|key| {
let value = trie.get(&key).unwrap();
(key, value)
})
.collect();
(root, proof, items)
}
pub fn fuzz_that_verify_rejects_invalid_proofs(input: &[u8]) {
if input.len() < 4 {
return;
}
let random_int = u32::from_le_bytes(input[0..4].try_into().expect("slice is 4 bytes")) as usize;
let mut data = fuzz_to_data(&input[4..]);
let mut keys = data[(data.len() / 3)..].iter().map(|(key, _)| key.clone()).collect::>();
data.truncate(data.len() * 2 / 3);
let data = data_sorted_unique(data);
keys.sort();
keys.dedup();
if keys.is_empty() {
return;
}
let (root, proof, mut items) = test_generate_proof::>(data, keys);
if proof.is_empty() {
return;
}
// Make all items incorrect.
for i in 0..items.len() {
match &mut items[i] {
(_, Some(value)) if random_int % 2 == 0 => value.push(0),
(_, value) if value.is_some() => *value = None,
(_, value) => *value = Some(DBValue::new()),
}
}
let project = project_root();
let mut runner = EvmRunner::new();
let addr = runner.deploy(&project, "MerklePatriciaTest");
for (key, value) in items {
let call = VerifyKeysCall {
root: FixedBytes(root.into()),
proof: proof.clone().into_iter().map(Into::into).collect(),
keys: vec![key.into()],
};
let result_bytes = runner.call_raw(addr, call.abi_encode());
let decoded = VerifyKeysCall::abi_decode_returns(&result_bytes, true).unwrap();
let result =
if decoded._0[0].value.is_empty() { None } else { Some(decoded._0[0].value.to_vec()) };
assert_ne!(result, value);
}
}
pub fn fuzz_that_verify_accepts_valid_proofs(input: &[u8]) {
let mut data = fuzz_to_data(input);
let mut keys = data[(data.len() / 3)..].iter().map(|(key, _)| key.clone()).collect::>();
data.truncate(data.len() * 2 / 3);
let data = data_sorted_unique(data);
keys.sort();
keys.dedup();
let (root, proof, items) = test_generate_proof::>(data, keys);
if proof.is_empty() {
return;
}
let project = project_root();
let mut runner = EvmRunner::new();
let addr = runner.deploy(&project, "MerklePatriciaTest");
for (key, value) in items {
let call = VerifyKeysCall {
root: FixedBytes(root.into()),
proof: proof.clone().into_iter().map(Into::into).collect(),
keys: vec![key.into()],
};
let result_bytes = runner.call_raw(addr, call.abi_encode());
let decoded = VerifyKeysCall::abi_decode_returns(&result_bytes, true).unwrap();
let result =
if decoded._0[0].value.is_empty() { None } else { Some(decoded._0[0].value.to_vec()) };
assert_eq!(result, value);
}
}
================================================
FILE: tests/rust/proptest-regressions/merkle_mountain_range.txt
================================================
# Seeds for failure cases proptest has generated in the past. It is
# automatically read and these particular cases re-run before any
# novel cases are generated.
#
# It is recommended to check this file in to source control so that
# everyone who runs the test benefits from these saved cases.
cc 786a0e8756a4c1cc24c6eb3efb1a657707317d591e85b2281189ece37748aca5 # shrinks to count = 10
cc 760da80270e89498620305cdbaf0ffd6a979861dfafd990077e2cc4845341a52 # shrinks to count = 10
cc bc6092f077a4ef04cfd7f8a90053a5803ff86077af4fd33dd4c9ef39303ff74c # shrinks to count = 419
cc b6acdac8a7e3dad5786abcc9c45da828c30c9758cde9fcc71f490e07c9b5d016 # shrinks to count = 433
================================================
FILE: tests/rust/proptest-regressions/merkle_multi_proof.txt
================================================
# Seeds for failure cases proptest has generated in the past. It is
# automatically read and these particular cases re-run before any
# novel cases are generated.
#
# It is recommended to check this file in to source control so that
# everyone who runs the test benefits from these saved cases.
cc 8e11c95cc32d31490d02c792495052b00168f3d41817fdfeb10e7137b6b74252 # shrinks to num_leaves = 72, leaf_idx_raw = 64
cc fdabdbfc23c9998c8ebd4f6c887f616f1750e968ca096bc4f3a7589495c93c29 # shrinks to num_leaves = 6, leaf_idx_raw = 40, delta = 2
cc cff38b2870eeba958311cccd1f2d0c4695101c764e4e4c21d0b44cff8909f347 # shrinks to num_leaves = 14, leaf_idx_raw = 194, offset = 24
cc 477a0d84d51dbba35d5491a3b5ff6805bc1dedabb820eb97ab027eedf5aa39ed # shrinks to num_leaves = 2, leaf_idx_raw = 91, offset = 29
================================================
FILE: tests/rust/rustfmt.toml
================================================
# Basic
hard_tabs = false
max_width = 100
use_small_heuristics = "Max"
# Imports
imports_granularity = "Crate"
reorder_imports = true
# Consistency
newline_style = "Unix"
# Format comments
comment_width = 100
wrap_comments = true
# Misc
chain_width = 80
spaces_around_ranges = false
binop_separator = "Back"
reorder_impl_items = false
match_arm_leading_pipes = "Preserve"
match_arm_blocks = false
match_block_trailing_comma = true
trailing_comma = "Vertical"
trailing_semicolon = false
use_field_init_shorthand = true
================================================
FILE: tests/rust/src/evm_runner.rs
================================================
use alloy_primitives::{Address, Bytes, U256};
use revm::{
db::{CacheDB, EmptyDB},
primitives::{AccountInfo, ExecutionResult, Output, TransactTo},
Evm,
};
/// Disable the contract size limit for deployments (test contracts can be large)
const DISABLE_CONTRACT_SIZE_LIMIT: bool = true;
use std::path::Path;
pub struct EvmRunner {
db: CacheDB,
caller: Address,
}
impl EvmRunner {
pub fn new() -> Self {
let mut db = CacheDB::new(EmptyDB::default());
let caller = Address::repeat_byte(0x01);
db.insert_account_info(caller, AccountInfo { balance: U256::MAX, ..Default::default() });
Self { db, caller }
}
pub fn deploy(&mut self, project_root: &Path, contract_name: &str) -> Address {
let bytecode = load_bytecode(self, project_root, contract_name);
let result = {
let mut evm = Evm::builder()
.with_ref_db(&mut self.db)
.modify_cfg_env(|cfg| {
cfg.limit_contract_code_size =
if DISABLE_CONTRACT_SIZE_LIMIT { Some(usize::MAX) } else { None };
})
.modify_tx_env(|tx| {
tx.caller = self.caller;
tx.transact_to = TransactTo::Create;
tx.data = Bytes::from(bytecode);
tx.value = U256::ZERO;
tx.gas_limit = 30_000_000;
})
.build();
evm.transact_commit().unwrap()
};
match result {
ExecutionResult::Success { output: Output::Create(_, Some(addr)), .. } => addr,
other => panic!("deployment of {contract_name} failed: {other:?}"),
}
}
fn deploy_raw(&mut self, bytecode: Vec) -> Address {
let result = {
let mut evm = Evm::builder()
.with_ref_db(&mut self.db)
.modify_cfg_env(|cfg| {
cfg.limit_contract_code_size =
if DISABLE_CONTRACT_SIZE_LIMIT { Some(usize::MAX) } else { None };
})
.modify_tx_env(|tx| {
tx.caller = self.caller;
tx.transact_to = TransactTo::Create;
tx.data = Bytes::from(bytecode);
tx.value = U256::ZERO;
tx.gas_limit = 30_000_000;
})
.build();
evm.transact_commit().unwrap()
};
match result {
ExecutionResult::Success { output: Output::Create(_, Some(addr)), .. } => addr,
other => panic!("deployment of library failed: {other:?}"),
}
}
pub fn call_raw(&mut self, to: Address, calldata: Vec) -> Vec {
let result = {
let mut evm = Evm::builder()
.with_ref_db(&mut self.db)
.modify_tx_env(|tx| {
tx.caller = self.caller;
tx.transact_to = TransactTo::Call(to);
tx.data = Bytes::from(calldata);
tx.value = U256::ZERO;
tx.gas_limit = 30_000_000;
})
.build();
evm.transact_commit().unwrap()
};
match result {
ExecutionResult::Success { output: Output::Call(data), .. } => data.to_vec(),
other => panic!("call failed: {other:?}"),
}
}
pub fn call_with_gas(&mut self, to: Address, calldata: Vec) -> (Vec, u64) {
let result = {
let mut evm = Evm::builder()
.with_ref_db(&mut self.db)
.modify_tx_env(|tx| {
tx.caller = self.caller;
tx.transact_to = TransactTo::Call(to);
tx.data = Bytes::from(calldata);
tx.value = U256::ZERO;
tx.gas_limit = 30_000_000;
})
.build();
evm.transact_commit().unwrap()
};
match result {
ExecutionResult::Success { output: Output::Call(data), gas_used, .. } =>
(data.to_vec(), gas_used),
other => panic!("call failed: {other:?}"),
}
}
pub fn call_may_revert(&mut self, to: Address, calldata: Vec) -> Result, String> {
let result = {
let mut evm = Evm::builder()
.with_ref_db(&mut self.db)
.modify_tx_env(|tx| {
tx.caller = self.caller;
tx.transact_to = TransactTo::Call(to);
tx.data = Bytes::from(calldata);
tx.value = U256::ZERO;
tx.gas_limit = 30_000_000;
})
.build();
evm.transact_commit().unwrap()
};
match result {
ExecutionResult::Success { output: Output::Call(data), .. } => Ok(data.to_vec()),
ExecutionResult::Revert { output, .. } => Err(format!("reverted: {}", output)),
other => Err(format!("failed: {other:?}")),
}
}
}
/// Load bytecode from a foundry artifact, deploying and linking any libraries.
fn load_bytecode(runner: &mut EvmRunner, project_root: &Path, contract_name: &str) -> Vec {
let out_dir = project_root.join("out");
load_and_link_artifact(runner, &out_dir, contract_name)
}
/// Load and link a library, deploying any transitive library dependencies first.
fn load_and_link_artifact(runner: &mut EvmRunner, out_dir: &Path, artifact_name: &str) -> Vec {
// Find the artifact
for entry in std::fs::read_dir(out_dir).unwrap() {
let entry = entry.unwrap();
if entry.file_type().unwrap().is_dir() {
let json_path = entry.path().join(format!("{artifact_name}.json"));
if json_path.exists() {
let content = std::fs::read_to_string(&json_path).unwrap();
let artifact: serde_json::Value = serde_json::from_str(&content).unwrap();
let mut bytecode_hex = artifact["bytecode"]["object"]
.as_str()
.expect("missing bytecode.object in artifact")
.to_string();
// Recursively link any library dependencies
if let Some(link_refs) = artifact["bytecode"]["linkReferences"].as_object() {
for (_source_file, libs) in link_refs {
for (lib_name, offsets) in libs.as_object().unwrap() {
// Recursively load and deploy the library
let lib_bytecode = load_and_link_artifact(runner, out_dir, lib_name);
let lib_addr = runner.deploy_raw(lib_bytecode);
let addr_hex = hex::encode(lib_addr.as_slice());
for offset_info in offsets.as_array().unwrap() {
let start = offset_info["start"].as_u64().unwrap() as usize;
let length = offset_info["length"].as_u64().unwrap() as usize;
assert_eq!(length, 20);
let hex_start = if bytecode_hex.starts_with("0x") {
2 + start * 2
} else {
start * 2
};
let hex_end = hex_start + length * 2;
bytecode_hex.replace_range(hex_start..hex_end, &addr_hex);
}
}
}
}
let hex_str = bytecode_hex.strip_prefix("0x").unwrap_or(&bytecode_hex);
return hex::decode(hex_str).expect("invalid hex in bytecode after linking");
}
}
}
panic!("Artifact for '{artifact_name}' not found in {out_dir:?}");
}
/// Convenience: get the project root (parent of integration-tests/)
/// Get the project root (two levels up from tests/rust/)
pub fn project_root() -> std::path::PathBuf {
std::env::current_dir()
.unwrap()
.parent()
.unwrap()
.parent()
.unwrap()
.to_path_buf()
}
================================================
FILE: tests/rust/src/lib.rs
================================================
#![allow(unused_parens, dead_code)]
pub mod evm_runner;
pub mod merkle_mountain_range;
pub mod merkle_multi_proof;
pub mod merkle_patricia;
pub mod multi_proof_utils;
use alloy_primitives::keccak256;
use ckb_merkle_mountain_range::{Error, Merge};
use rs_merkle::Hasher;
#[derive(Clone)]
pub struct Keccak256;
impl Hasher for Keccak256 {
type Hash = [u8; 32];
fn hash(data: &[u8]) -> [u8; 32] {
keccak256(data).0
}
}
pub struct MergeKeccak;
impl Merge for MergeKeccak {
type Item = NumberHash;
fn merge(lhs: &Self::Item, rhs: &Self::Item) -> Result {
let mut concat = vec![];
concat.extend(&lhs.0);
concat.extend(&rhs.0);
let hash = keccak256(&concat);
Ok(NumberHash(hash.0.to_vec()))
}
}
#[derive(Eq, PartialEq, Clone, Debug, Default)]
pub struct NumberHash(pub Vec);
impl From for NumberHash {
fn from(num: u32) -> Self {
let hash = keccak256(&num.to_le_bytes());
NumberHash(hash.0.to_vec())
}
}
================================================
FILE: tests/rust/src/merkle_mountain_range.rs
================================================
#![cfg(test)]
use crate::{
evm_runner::{project_root, EvmRunner},
MergeKeccak, NumberHash,
};
use alloy_primitives::{FixedBytes, U256};
use alloy_sol_types::{sol, SolCall};
use ckb_merkle_mountain_range::{util::MemStore, MMR};
use proptest::{prop_assert, proptest};
sol! {
struct MmrLeaf {
uint256 index;
bytes32 hash;
}
function CalculateRoot(bytes32[] proof, MmrLeaf[] leaves, uint256 leafCount) external pure returns (bytes32);
function VerifyProof(bytes32 root, bytes32[] proof, MmrLeaf[] leaves, uint256 leafCount) external pure returns (bool);
}
fn solidity_calculate_root(
runner: &mut EvmRunner,
contract: alloy_primitives::Address,
custom_leaves: Vec<(u32, [u8; 32])>,
proof_items: Vec>,
leaf_count: u64,
) -> [u8; 32] {
let leaves: Vec = custom_leaves
.into_iter()
.map(|(index, hash)| MmrLeaf { index: U256::from(index), hash: FixedBytes(hash) })
.collect();
let proof: Vec> = proof_items
.into_iter()
.map(|p| {
let mut bytes = [0u8; 32];
bytes.copy_from_slice(&p);
FixedBytes(bytes)
})
.collect();
let call = CalculateRootCall { proof, leaves, leafCount: U256::from(leaf_count) };
let result = runner.call_raw(contract, call.abi_encode());
let decoded = CalculateRootCall::abi_decode_returns(&result, true).unwrap();
decoded._0.0
}
fn test_mmr(
runner: &mut EvmRunner,
contract: alloy_primitives::Address,
count: u32,
mut proof_elem: Vec,
) {
proof_elem.sort();
let store = MemStore::default();
let mut mmr = MMR::<_, MergeKeccak, _>::new(0, &store);
let positions: Vec =
(0u32..count).map(|i| mmr.push(NumberHash::from(i)).unwrap()).collect();
let root = mmr.get_root().expect("get root");
let proof = mmr
.gen_proof(proof_elem.iter().map(|elem| positions[*elem as usize]).collect())
.expect("gen proof");
mmr.commit().expect("commit changes");
let leaves = proof_elem
.iter()
.map(|elem| (positions[*elem as usize], NumberHash::from(*elem)))
.collect::>();
let result = proof.verify(root.clone(), leaves.clone()).unwrap();
assert!(result);
let mut custom_leaves = leaves
.into_iter()
.zip(proof_elem.clone().into_iter())
.map(|((_pos, leaf), index)| {
let mut hash = [0u8; 32];
hash.copy_from_slice(&leaf.0);
(index, hash)
})
.collect::>();
custom_leaves.dedup_by(|a, b| a.0 == b.0);
custom_leaves.sort_by(|a, b| a.0.cmp(&b.0));
let calculated = solidity_calculate_root(
runner,
contract,
custom_leaves,
proof.proof_items().to_vec().into_iter().map(|n| n.0).collect(),
count as u64,
);
let mut root_hash = [0u8; 32];
root_hash.copy_from_slice(&root.0);
assert_eq!(root_hash, calculated);
}
fn setup() -> (EvmRunner, alloy_primitives::Address) {
let root = project_root();
let mut runner = EvmRunner::new();
let addr = runner.deploy(&root, "MerkleMountainRangeTest");
(runner, addr)
}
#[test]
fn test_mmr_3_peaks() {
let (mut runner, addr) = setup();
test_mmr(&mut runner, addr, 11, vec![5]);
}
#[test]
fn test_mmr_2_peaks() {
let (mut runner, addr) = setup();
test_mmr(&mut runner, addr, 10, vec![5]);
}
#[test]
fn test_mmr_1_peak() {
let (mut runner, addr) = setup();
test_mmr(&mut runner, addr, 8, vec![5]);
}
#[test]
fn test_mmr_first_elem_proof() {
let (mut runner, addr) = setup();
test_mmr(&mut runner, addr, 11, vec![0]);
}
#[test]
fn test_mmr_last_elem_proof() {
let (mut runner, addr) = setup();
test_mmr(&mut runner, addr, 11, vec![10]);
}
#[test]
fn test_failing_case() {
let (mut runner, addr) = setup();
let elem = vec![
85, 120, 113, 104, 109, 6, 101, 97, 41, 95, 15, 52, 19, 82, 33, 102, 114, 70, 53, 32, 107,
65, 59, 80, 72, 36, 64, 22, 16, 38, 57, 106, 74, 76, 28, 81, 117, 83, 61, 122, 1, 12, 14,
63, 20, 46, 4, 24, 111, 90, 2, 29, 126,
];
test_mmr(&mut runner, addr, 127, elem);
}
#[test]
fn test_mmr_1_elem() {
let (mut runner, addr) = setup();
test_mmr(&mut runner, addr, 1, vec![0]);
}
#[test]
fn test_mmr_2_elems() {
let (mut runner, addr) = setup();
test_mmr(&mut runner, addr, 2, vec![0]);
test_mmr(&mut runner, addr, 2, vec![1]);
}
#[test]
fn test_mmr_2_leaves_merkle_proof() {
let (mut runner, addr) = setup();
test_mmr(&mut runner, addr, 11, vec![3, 7]);
test_mmr(&mut runner, addr, 11, vec![3, 4]);
}
#[test]
fn test_mmr_2_sibling_leaves_merkle_proof() {
let (mut runner, addr) = setup();
test_mmr(&mut runner, addr, 11, vec![4, 5]);
test_mmr(&mut runner, addr, 11, vec![5, 6]);
test_mmr(&mut runner, addr, 11, vec![6, 7]);
}
#[test]
fn test_mmr_3_leaves_merkle_proof() {
let (mut runner, addr) = setup();
test_mmr(&mut runner, addr, 11, vec![4, 5, 6]);
test_mmr(&mut runner, addr, 11, vec![3, 5, 7]);
test_mmr(&mut runner, addr, 11, vec![3, 4, 5]);
test_mmr(&mut runner, addr, 100, vec![3, 5, 13]);
}
#[test]
fn test_gen_proof_with_duplicate_leaves() {
let (mut runner, addr) = setup();
test_mmr(&mut runner, addr, 10, vec![5, 5]);
}
fn solidity_verify_proof(
runner: &mut EvmRunner,
contract: alloy_primitives::Address,
root: [u8; 32],
proof: Vec>,
leaves: Vec,
leaf_count: u64,
) -> Result {
let call = VerifyProofCall {
root: FixedBytes(root),
proof,
leaves,
leafCount: U256::from(leaf_count),
};
match runner.call_may_revert(contract, call.abi_encode()) {
Ok(result) => {
let decoded = VerifyProofCall::abi_decode_returns(&result, true)
.map_err(|e| format!("decode error: {e}"))?;
Ok(decoded._0)
},
Err(e) => Err(e),
}
}
/// Build a valid MMR proof and return all the pieces needed for Solidity verification.
fn build_mmr_proof(
count: u32,
leaf_idx: u32,
) -> (
[u8; 32], // root
Vec>, // proof items
Vec, // leaves
[u8; 32], // leaf hash
) {
let store = MemStore::default();
let mut mmr = MMR::<_, MergeKeccak, _>::new(0, &store);
let positions: Vec = (0..count).map(|i| mmr.push(NumberHash::from(i)).unwrap()).collect();
let root = mmr.get_root().unwrap();
let proof = mmr.gen_proof(vec![positions[leaf_idx as usize]]).unwrap();
mmr.commit().unwrap();
let leaf = NumberHash::from(leaf_idx);
let mut leaf_hash = [0u8; 32];
leaf_hash.copy_from_slice(&leaf.0);
let mut root_hash = [0u8; 32];
root_hash.copy_from_slice(&root.0);
let sol_proof: Vec> = proof
.proof_items()
.iter()
.map(|p| {
let mut b = [0u8; 32];
b.copy_from_slice(&p.0);
FixedBytes(b)
})
.collect();
let sol_leaves = vec![MmrLeaf { index: U256::from(leaf_idx), hash: FixedBytes(leaf_hash) }];
(root_hash, sol_proof, sol_leaves, leaf_hash)
}
proptest! {
#[test]
fn test_random_mmr(count in 10u32..500u32) {
use rand::seq::SliceRandom;
use rand::Rng;
let mut leaves: Vec = (0..count).collect();
let mut rng = rand::thread_rng();
leaves.shuffle(&mut rng);
let leaves_count = rng.gen_range(1..count - 1);
leaves.truncate(leaves_count as usize);
let (mut runner, addr) = setup();
test_mmr(&mut runner, addr, count, leaves);
}
/// Corrupting a proof element must not verify.
#[test]
fn test_corrupt_proof_element(
count in 2u32..200u32,
leaf_idx_raw in 0u32..200u32,
byte_idx in 0usize..32,
) {
let leaf_idx = leaf_idx_raw % count;
let (root_hash, mut sol_proof, sol_leaves, _) = build_mmr_proof(count, leaf_idx);
if sol_proof.is_empty() { return Ok(()); }
sol_proof[0].0[byte_idx] ^= 0xff;
let (mut runner, addr) = setup();
match solidity_verify_proof(&mut runner, addr, root_hash, sol_proof, sol_leaves, count as u64) {
Ok(verified) => prop_assert!(!verified, "corrupted proof verified for count={count}, leaf={leaf_idx}"),
Err(_) => {} // revert is fine
}
}
/// Corrupting the leaf hash must not verify.
#[test]
fn test_corrupt_leaf_hash(
count in 2u32..200u32,
leaf_idx_raw in 0u32..200u32,
byte_idx in 0usize..32,
) {
let leaf_idx = leaf_idx_raw % count;
let (root_hash, sol_proof, mut sol_leaves, _) = build_mmr_proof(count, leaf_idx);
sol_leaves[0].hash.0[byte_idx] ^= 0xff;
let (mut runner, addr) = setup();
match solidity_verify_proof(&mut runner, addr, root_hash, sol_proof, sol_leaves, count as u64) {
Ok(verified) => prop_assert!(!verified, "forged leaf hash verified for count={count}, leaf={leaf_idx}"),
Err(_) => {}
}
}
/// Wrong root must not verify.
#[test]
fn test_wrong_root(
count in 2u32..200u32,
leaf_idx_raw in 0u32..200u32,
byte_idx in 0usize..32,
) {
let leaf_idx = leaf_idx_raw % count;
let (mut root_hash, sol_proof, sol_leaves, _) = build_mmr_proof(count, leaf_idx);
root_hash[byte_idx] ^= 0xff;
let (mut runner, addr) = setup();
match solidity_verify_proof(&mut runner, addr, root_hash, sol_proof, sol_leaves, count as u64) {
Ok(verified) => prop_assert!(!verified, "wrong root verified for count={count}, leaf={leaf_idx}"),
Err(_) => {}
}
}
/// Out-of-bounds leaf index must not verify.
#[test]
fn test_oob_leaf_index(
count in 2u32..200u32,
leaf_idx_raw in 0u32..200u32,
offset in 1u64..256u64,
) {
let leaf_idx = leaf_idx_raw % count;
let (root_hash, sol_proof, mut sol_leaves, _) = build_mmr_proof(count, leaf_idx);
sol_leaves[0].index = U256::from(count as u64 + offset);
let (mut runner, addr) = setup();
match solidity_verify_proof(&mut runner, addr, root_hash, sol_proof, sol_leaves, count as u64) {
Ok(verified) => prop_assert!(!verified, "OOB leaf index verified for count={count}"),
Err(_) => {}
}
}
/// Random replacement hash must not verify.
#[test]
fn test_random_leaf_hash(
count in 2u32..200u32,
leaf_idx_raw in 0u32..200u32,
fake_hash in proptest::array::uniform32(0u8..),
) {
let leaf_idx = leaf_idx_raw % count;
let (root_hash, sol_proof, mut sol_leaves, real_hash) = build_mmr_proof(count, leaf_idx);
if fake_hash == real_hash { return Ok(()); }
sol_leaves[0].hash = FixedBytes(fake_hash);
let (mut runner, addr) = setup();
match solidity_verify_proof(&mut runner, addr, root_hash, sol_proof, sol_leaves, count as u64) {
Ok(verified) => prop_assert!(!verified, "random hash verified for count={count}, leaf={leaf_idx}"),
Err(_) => {}
}
}
}
#[test]
fn test_mmr_gas_benchmark() {
use rand::Rng;
let (mut runner, contract) = setup();
for count in [8u32, 32, 64, 128, 256, 512, 1024] {
let store = MemStore::default();
let mut mmr = MMR::<_, MergeKeccak, _>::new(0, &store);
let positions: Vec =
(0..count).map(|i| mmr.push(NumberHash::from(i)).unwrap()).collect();
let root = mmr.get_root().unwrap();
let threshold = std::cmp::max(1, count / 3);
let mut rng = rand::thread_rng();
let mut indices_set = std::collections::HashSet::new();
while indices_set.len() < threshold as usize {
indices_set.insert(rng.gen_range(0..count));
}
let mut indices: Vec = indices_set.into_iter().collect();
indices.sort();
let proof =
mmr.gen_proof(indices.iter().map(|&i| positions[i as usize]).collect()).unwrap();
mmr.commit().unwrap();
let mut custom_leaves: Vec<(u32, [u8; 32])> = indices
.iter()
.map(|&i| {
let leaf = NumberHash::from(i);
let mut hash = [0u8; 32];
hash.copy_from_slice(&leaf.0);
(i, hash)
})
.collect();
custom_leaves.dedup_by(|a, b| a.0 == b.0);
let sol_leaves: Vec = custom_leaves
.iter()
.map(|&(idx, hash)| MmrLeaf { index: U256::from(idx), hash: FixedBytes(hash) })
.collect();
let sol_proof: Vec> = proof
.proof_items()
.iter()
.map(|p| {
let mut b = [0u8; 32];
b.copy_from_slice(&p.0);
FixedBytes(b)
})
.collect();
let call = CalculateRootCall {
proof: sol_proof.clone(),
leaves: sol_leaves,
leafCount: U256::from(count),
};
let (result, gas) = runner.call_with_gas(contract, call.abi_encode());
let decoded = CalculateRootCall::abi_decode_returns(&result, true).unwrap();
let mut root_hash = [0u8; 32];
root_hash.copy_from_slice(&root.0);
assert_eq!(decoded._0.0, root_hash);
println!(
"leaves={:>4} proving={:>4} proof_elements={:>4} gas={:>8}",
count,
indices.len(),
sol_proof.len(),
gas
);
}
}
================================================
FILE: tests/rust/src/merkle_multi_proof.rs
================================================
#![cfg(test)]
#![allow(dead_code, unused_imports)]
use crate::{
evm_runner::{project_root, EvmRunner},
multi_proof_utils::{Leaf, RsMerkleProof, SolidityProof},
Keccak256,
};
use alloy_primitives::{keccak256, FixedBytes, U256};
use alloy_sol_types::{sol, SolCall};
use primitive_types::H256;
use proptest::{prop_assert, prop_assert_eq, prop_assert_ne, proptest};
use rand::Rng;
use rs_merkle::MerkleTree;
use std::collections::HashSet;
sol! {
struct MpLeaf {
uint256 index;
bytes32 hash;
}
function CalculateRoot(bytes32[] proof, MpLeaf[] leaves, uint256 numLeaves) external view returns (bytes32);
}
fn leaves_to_abi(leaves: &[Leaf]) -> Vec {
leaves
.iter()
.map(|l| MpLeaf { index: U256::from(l.index), hash: FixedBytes(l.hash.0) })
.collect()
}
fn proof_to_abi(proof_hashes: &[H256]) -> Vec> {
proof_hashes.iter().map(|h| FixedBytes(h.0)).collect()
}
fn solidity_calculate_root(
runner: &mut EvmRunner,
contract: alloy_primitives::Address,
proof: &SolidityProof,
num_leaves: usize,
) -> H256 {
let call = CalculateRootCall {
proof: proof_to_abi(&proof.proof_hashes),
leaves: leaves_to_abi(&proof.leaves),
numLeaves: U256::from(num_leaves),
};
let result = runner.call_raw(contract, call.abi_encode());
let decoded = CalculateRootCall::abi_decode_returns(&result, true).unwrap();
H256(decoded._0.0)
}
fn solidity_calc_root_raw(
runner: &mut EvmRunner,
contract: alloy_primitives::Address,
proof: &SolidityProof,
num_leaves: usize,
) -> Result<[u8; 32], String> {
let call = CalculateRootCall {
proof: proof_to_abi(&proof.proof_hashes),
leaves: leaves_to_abi(&proof.leaves),
numLeaves: U256::from(num_leaves),
};
match runner.call_may_revert(contract, call.abi_encode()) {
Ok(result) => {
let decoded = CalculateRootCall::abi_decode_returns(&result, true)
.map_err(|e| format!("decode: {e}"))?;
Ok(decoded._0.0)
},
Err(e) => Err(e),
}
}
#[test]
fn test_calculate_root() {
let num_leaves = 600;
let threshold = ((num_leaves * 1) / 3) - 1;
let leaves = (0..num_leaves).map(|_| H256::random().as_bytes().to_vec()).collect::>();
let leaf_hashes = leaves.iter().map(|l| keccak256(l).0).collect::>();
let tree = MerkleTree::::from_leaves(&leaf_hashes);
let mut rng = rand::thread_rng();
let mut indices = HashSet::new();
while indices.len() < threshold {
indices.insert(rng.gen_range(0..num_leaves));
}
let mut indices: Vec = indices.into_iter().collect();
indices.sort();
let rs_proof = tree.proof(&indices);
let leaves_to_prove: Vec<[u8; 32]> = indices.iter().map(|&i| leaf_hashes[i]).collect();
assert!(rs_proof.verify(tree.root().unwrap(), &indices, &leaves_to_prove, num_leaves));
let sol_proof = SolidityProof::from(RsMerkleProof {
proof: &rs_proof,
leaf_indices: &indices,
leaf_hashes: &leaves_to_prove,
});
let project = project_root();
let mut runner = EvmRunner::new();
let contract = runner.deploy(&project, "MerkleMultiProofTest");
let calculated = solidity_calculate_root(&mut runner, contract, &sol_proof, leaves.len());
assert_eq!(H256(tree.root().unwrap()), calculated);
let beefy_root =
binary_merkle_tree::merkle_root::(leaves.clone());
assert_eq!(beefy_root, calculated);
}
#[test]
fn test_rs_merkle_proof_conversion() {
let num_leaves = 600;
let threshold = ((num_leaves * 1) / 3) - 1;
let leaves = (0..num_leaves).map(|_| H256::random().as_bytes().to_vec()).collect::>();
let leaf_hashes = leaves.iter().map(|l| keccak256(l).0).collect::>();
let tree = MerkleTree::::from_leaves(&leaf_hashes);
let mut rng = rand::thread_rng();
let mut indices_set = HashSet::new();
while indices_set.len() < threshold {
indices_set.insert(rng.gen_range(0..num_leaves));
}
let mut indices: Vec = indices_set.into_iter().collect();
indices.sort();
let rs_proof = tree.proof(&indices);
let leaves_to_prove: Vec<[u8; 32]> = indices.iter().map(|&i| leaf_hashes[i]).collect();
assert!(rs_proof.verify(tree.root().unwrap(), &indices, &leaves_to_prove, num_leaves));
let sol_proof = SolidityProof::from(RsMerkleProof {
proof: &rs_proof,
leaf_indices: &indices,
leaf_hashes: &leaves_to_prove,
});
let project = project_root();
let mut runner = EvmRunner::new();
let contract = runner.deploy(&project, "MerkleMultiProofTest");
let calculated = solidity_calculate_root(&mut runner, contract, &sol_proof, num_leaves);
assert_eq!(H256(tree.root().unwrap()), calculated);
}
/// Build a tree and single-leaf proof, return everything needed for Solidity verification.
fn build_multi_proof(
num_leaves: usize,
leaf_idx: usize,
) -> (
[u8; 32], // root
SolidityProof, // converted proof
[u8; 32], // leaf hash
) {
let leaf_hashes: Vec<[u8; 32]> =
(0..num_leaves).map(|i| keccak256(&(i as u32).to_le_bytes()).0).collect();
let tree = MerkleTree::::from_leaves(&leaf_hashes);
let root = tree.root().unwrap();
let proof = tree.proof(&[leaf_idx]);
let sol_proof = SolidityProof::from(RsMerkleProof {
proof: &proof,
leaf_indices: &[leaf_idx],
leaf_hashes: &[leaf_hashes[leaf_idx]],
});
(root, sol_proof, leaf_hashes[leaf_idx])
}
proptest! {
/// Random tree sizes and leaf selections must produce matching roots.
#[test]
fn test_random_multi_proof(
num_leaves in 2usize..200,
leaf_idx_raw in 0usize..200,
) {
let leaf_idx = leaf_idx_raw % num_leaves;
let (root, sol_proof, _) = build_multi_proof(num_leaves, leaf_idx);
let project = project_root();
let mut runner = EvmRunner::new();
let contract = runner.deploy(&project, "MerkleMultiProofTest");
let calculated = solidity_calc_root_raw(&mut runner, contract, &sol_proof, num_leaves)
.expect("CalculateRoot should not revert for valid proof");
prop_assert_eq!(calculated, root);
}
/// Corrupted proof hash must produce different root.
#[test]
fn test_corrupt_proof_node(
num_leaves in 2usize..200,
leaf_idx_raw in 0usize..200,
byte_idx in 0usize..32,
) {
let leaf_idx = leaf_idx_raw % num_leaves;
let (root, mut sol_proof, _) = build_multi_proof(num_leaves, leaf_idx);
if sol_proof.proof_hashes.is_empty() { return Ok(()); }
sol_proof.proof_hashes[0].0[byte_idx] ^= 0xff;
let project = project_root();
let mut runner = EvmRunner::new();
let contract = runner.deploy(&project, "MerkleMultiProofTest");
match solidity_calc_root_raw(&mut runner, contract, &sol_proof, num_leaves) {
Ok(calc) => prop_assert_ne!(calc, root, "corrupted proof matched root"),
Err(_) => {} // revert is fine
}
}
/// Corrupted leaf hash must produce different root.
#[test]
fn test_corrupt_leaf_hash(
num_leaves in 2usize..200,
leaf_idx_raw in 0usize..200,
byte_idx in 0usize..32,
) {
let leaf_idx = leaf_idx_raw % num_leaves;
let (root, mut sol_proof, _) = build_multi_proof(num_leaves, leaf_idx);
sol_proof.leaves[0].hash.0[byte_idx] ^= 0xff;
let project = project_root();
let mut runner = EvmRunner::new();
let contract = runner.deploy(&project, "MerkleMultiProofTest");
match solidity_calc_root_raw(&mut runner, contract, &sol_proof, num_leaves) {
Ok(calc) => prop_assert_ne!(calc, root, "forged leaf hash matched root"),
Err(_) => {}
}
}
/// Random replacement hash must produce different root.
#[test]
fn test_random_leaf_hash(
num_leaves in 2usize..200,
leaf_idx_raw in 0usize..200,
fake_hash in proptest::array::uniform32(0u8..),
) {
let leaf_idx = leaf_idx_raw % num_leaves;
let (root, mut sol_proof, real_hash) = build_multi_proof(num_leaves, leaf_idx);
if fake_hash == real_hash { return Ok(()); }
sol_proof.leaves[0].hash = H256(fake_hash);
let project = project_root();
let mut runner = EvmRunner::new();
let contract = runner.deploy(&project, "MerkleMultiProofTest");
match solidity_calc_root_raw(&mut runner, contract, &sol_proof, num_leaves) {
Ok(calc) => prop_assert_ne!(calc, root, "random hash matched root"),
Err(_) => {}
}
}
/// OOB leaf index must not produce matching root.
#[test]
fn test_oob_leaf_index(
num_leaves in 2usize..200,
leaf_idx_raw in 0usize..200,
offset in 1usize..256,
) {
let leaf_idx = leaf_idx_raw % num_leaves;
let (root, mut sol_proof, _) = build_multi_proof(num_leaves, leaf_idx);
sol_proof.leaves[0].index = num_leaves + offset;
let project = project_root();
let mut runner = EvmRunner::new();
let contract = runner.deploy(&project, "MerkleMultiProofTest");
match solidity_calc_root_raw(&mut runner, contract, &sol_proof, num_leaves) {
Ok(calc) => prop_assert_ne!(calc, root, "OOB index matched root"),
Err(_) => {}
}
}
/// Shifted leaf index must not produce matching root.
#[test]
fn test_shifted_leaf_index(
num_leaves in 2usize..200,
leaf_idx_raw in 0usize..200,
delta in 1usize..5,
) {
let leaf_idx = leaf_idx_raw % num_leaves;
let (root, mut sol_proof, _) = build_multi_proof(num_leaves, leaf_idx);
let real_idx = sol_proof.leaves[0].index;
let new_idx = if real_idx > delta { real_idx - delta } else { real_idx + delta };
if new_idx == real_idx { return Ok(()); }
sol_proof.leaves[0].index = new_idx;
let project = project_root();
let mut runner = EvmRunner::new();
let contract = runner.deploy(&project, "MerkleMultiProofTest");
match solidity_calc_root_raw(&mut runner, contract, &sol_proof, num_leaves) {
Ok(calc) => prop_assert_ne!(calc, root, "shifted index matched root"),
Err(_) => {}
}
}
}
#[test]
fn test_gas_benchmark() {
let project = project_root();
let mut runner = EvmRunner::new();
let contract = runner.deploy(&project, "MerkleMultiProofTest");
for num_leaves in [8, 32, 64, 128, 256, 512, 1024] {
let leaf_hashes: Vec<[u8; 32]> =
(0..num_leaves).map(|i| keccak256(&(i as u32).to_le_bytes()).0).collect();
let tree = MerkleTree::::from_leaves(&leaf_hashes);
// Prove ~1/3 of leaves
let threshold = std::cmp::max(1, num_leaves / 3);
let mut rng = rand::thread_rng();
let mut indices_set = HashSet::new();
while indices_set.len() < threshold {
indices_set.insert(rng.gen_range(0..num_leaves));
}
let mut indices: Vec = indices_set.into_iter().collect();
indices.sort();
let rs_proof = tree.proof(&indices);
let leaves_to_prove: Vec<[u8; 32]> = indices.iter().map(|&i| leaf_hashes[i]).collect();
let sol_proof = SolidityProof::from(RsMerkleProof {
proof: &rs_proof,
leaf_indices: &indices,
leaf_hashes: &leaves_to_prove,
});
let call = CalculateRootCall {
proof: proof_to_abi(&sol_proof.proof_hashes),
leaves: leaves_to_abi(&sol_proof.leaves),
numLeaves: U256::from(num_leaves),
};
let (result, gas) = runner.call_with_gas(contract, call.abi_encode());
let decoded = CalculateRootCall::abi_decode_returns(&result, true).unwrap();
assert_eq!(decoded._0.0, tree.root().unwrap());
println!(
"leaves={:>4} proving={:>4} proof_elements={:>4} gas={:>8}",
num_leaves,
indices.len(),
sol_proof.proof_hashes.len(),
gas
);
}
}
================================================
FILE: tests/rust/src/merkle_patricia.rs
================================================
#![cfg(test)]
#![allow(dead_code, unused_imports)]
use crate::evm_runner::{project_root, EvmRunner};
use alloy_primitives::{FixedBytes, U256};
use alloy_sol_types::{sol, SolCall, SolValue};
use codec::Decode;
use hex_literal::hex;
use primitive_types::H256;
use sp_core::KeccakHasher;
use sp_trie::{LayoutV0, MemoryDB, NodeCodec, StorageProof};
use std::collections::HashSet;
use trie_db::{
DBValue, Hasher, NodeCodec as NodeCodecT, Recorder, Trie, TrieDBBuilder, TrieDBMutBuilder,
TrieLayout, TrieMut,
};
sol! {
struct StorageValue {
bytes key;
bytes value;
}
struct SolNibbleSlice {
bytes data;
uint256 offset;
}
struct SolByteSlice {
bytes data;
uint256 offset;
}
struct SolNodeHandle {
bool isHash;
bytes32 hash;
bool isInline;
bytes inLine;
}
struct SolNodeHandleOption {
bool isSome;
SolNodeHandle value;
}
struct SolNodeKind {
bool isEmpty;
bool isLeaf;
bool isHashedLeaf;
bool isNibbledValueBranch;
bool isNibbledHashedValueBranch;
bool isNibbledBranch;
bool isExtension;
bool isBranch;
uint256 nibbleSize;
SolByteSlice data;
}
function VerifyKeys(bytes32 root, bytes[] proof, bytes[] keys) external pure returns (StorageValue[]);
function VerifyEthereum(bytes32 root, bytes[] proof, bytes[] keys) external pure returns (StorageValue[]);
function decodeNodeKind(bytes node) external pure returns (SolNodeKind);
function decodeNibbledBranch(bytes node) external;
function decodeLeaf(bytes node) external;
function nibbleLen(SolNibbleSlice nibble) external pure returns (uint256);
function isNibbleEmpty(SolNibbleSlice self_) external pure returns (bool);
function nibbleAt(SolNibbleSlice self_, uint256 i) external pure returns (uint256);
function mid(SolNibbleSlice self_, uint256 i) external pure returns (SolNibbleSlice);
function commonPrefix(SolNibbleSlice self_, SolNibbleSlice other) external pure returns (uint256);
function startsWith(SolNibbleSlice self_, SolNibbleSlice other) external pure returns (bool);
function eq(SolNibbleSlice self_, SolNibbleSlice other) external pure returns (bool);
}
fn proof_data() -> ([u8; 32], Vec>, Vec) {
let key = hex!("f0c365c3cf59d671eb72da0e7a4113c49f1f0515f462cdcf84e0f1d6045dfcbb").to_vec();
let proof = vec![
hex!("802e98809b03c6ae83e3b70aa89acfe0947b3a18b5d35569662335df7127ab8fcb88c88780e5d1b21c5ecc2891e3467f6273f27ce2e73a292d6b8306197edfa97b3d965bd080c51e5f53a03d92ea8b2792218f152da738b9340c6eeb08581145825348bbdba480ad103a9320581c7747895a01d79d2fa5f103c4b83c5af10b0a13bc1749749523806eea23c0854ced8445a3338833e2401753fdcfadb3b56277f8f1af4004f73719806d990657a5b5c3c97b8a917d9f153cafc463acd90592f881bc071d6ba64e90b380346031472f91f7c44631224cb5e61fb29d530a9fafd5253551cbf43b7e97e79a").to_vec(),
hex!("9f00c365c3cf59d671eb72da0e7a4113c41002505f0e7b9012096b41c4eb3aaf947f6ea429080000685f0f1f0515f462cdcf84e0f1d6045dfcbb2035e90c7f86010000").to_vec(),
];
let root = hex!("6b5710000eccbd59b6351fc2eb53ff2c1df8e0f816f7186ddd309ca85e8798dd");
(root, proof, key)
}
fn setup() -> (EvmRunner, alloy_primitives::Address) {
let root = project_root();
let mut runner = EvmRunner::new();
let addr = runner.deploy(&root, "MerklePatriciaTest");
(runner, addr)
}
fn make_nibble(data: &[u8], offset: u64) -> SolNibbleSlice {
SolNibbleSlice { data: data.to_vec().into(), offset: U256::from(offset) }
}
#[test]
fn test_decode_nibbled_branch() {
let (mut runner, addr) = setup();
let (_, proof, _) = proof_data();
for item in proof {
let _plan = NodeCodec::::decode_plan(&mut &item[..]).unwrap().build(&item);
let call = decodeNodeKindCall { node: item.clone().into() };
let result = runner.call_raw(addr, call.abi_encode());
let decoded = decodeNodeKindCall::abi_decode_returns(&result, true).unwrap();
assert!(decoded._0.isNibbledBranch);
// Just check decodeNibbledBranch doesn't revert
let call = decodeNibbledBranchCall { node: item.clone().into() };
runner.call_raw(addr, call.abi_encode());
}
}
#[test]
fn test_decode_leaf() {
let leaves: Vec> = vec![
vec![95, 14, 123, 144, 18, 9, 107, 65, 196, 235, 58, 175, 148, 127, 110, 164, 41, 8, 0, 0],
vec![
95, 15, 31, 5, 21, 244, 98, 205, 207, 132, 224, 241, 214, 4, 93, 252, 187, 32, 240,
214, 144, 122, 134, 1, 0, 0,
],
];
let (mut runner, addr) = setup();
for leaf in leaves {
let _plan = NodeCodec::::decode_plan(&mut &leaf[..]).unwrap().build(&leaf);
let call = decodeNodeKindCall { node: leaf.clone().into() };
let result = runner.call_raw(addr, call.abi_encode());
let decoded = decodeNodeKindCall::abi_decode_returns(&result, true).unwrap();
assert!(decoded._0.isLeaf);
// Just check decodeLeaf doesn't revert
let call = decodeLeafCall { node: leaf.clone().into() };
runner.call_raw(addr, call.abi_encode());
}
}
static D: &[u8; 3] = &[0x01u8, 0x23, 0x45];
#[test]
fn test_nibble_slice_ops_basics() {
let (mut runner, addr) = setup();
// nibbleLen with offset 0
let call = nibbleLenCall { nibble: make_nibble(D, 0) };
let result = runner.call_raw(addr, call.abi_encode());
let decoded = nibbleLenCall::abi_decode_returns(&result, true).unwrap();
assert_eq!(decoded._0, U256::from(6));
// isNibbleEmpty with offset 0
let call = isNibbleEmptyCall { self_: make_nibble(D, 0) };
let result = runner.call_raw(addr, call.abi_encode());
let decoded = isNibbleEmptyCall::abi_decode_returns(&result, true).unwrap();
assert!(!decoded._0);
// isNibbleEmpty with offset 6
let call = isNibbleEmptyCall { self_: make_nibble(D, 6) };
let result = runner.call_raw(addr, call.abi_encode());
let decoded = isNibbleEmptyCall::abi_decode_returns(&result, true).unwrap();
assert!(decoded._0);
// nibbleLen with offset 3
let call = nibbleLenCall { nibble: make_nibble(D, 3) };
let result = runner.call_raw(addr, call.abi_encode());
let decoded = nibbleLenCall::abi_decode_returns(&result, true).unwrap();
assert_eq!(decoded._0, U256::from(3));
// nibbleAt with offset 3
for i in 0u64..3 {
let call = nibbleAtCall { self_: make_nibble(D, 3), i: U256::from(i) };
let result = runner.call_raw(addr, call.abi_encode());
let decoded = nibbleAtCall::abi_decode_returns(&result, true).unwrap();
assert_eq!(decoded._0, U256::from(i + 3));
}
}
#[test]
fn test_nibble_slice_ops_mid() {
let (mut runner, addr) = setup();
// mid(D, 2)
let call = midCall { self_: make_nibble(D, 0), i: U256::from(2) };
let result = runner.call_raw(addr, call.abi_encode());
let nibble = midCall::abi_decode_returns(&result, true).unwrap()._0;
for i in 0u64..4 {
let call = nibbleAtCall { self_: nibble.clone(), i: U256::from(i) };
let result = runner.call_raw(addr, call.abi_encode());
let decoded = nibbleAtCall::abi_decode_returns(&result, true).unwrap();
assert_eq!(decoded._0, U256::from(i + 2));
}
// mid(D, 3)
let call = midCall { self_: make_nibble(D, 0), i: U256::from(3) };
let result = runner.call_raw(addr, call.abi_encode());
let nibble = midCall::abi_decode_returns(&result, true).unwrap()._0;
for i in 0u64..3 {
let call = nibbleAtCall { self_: nibble.clone(), i: U256::from(i) };
let result = runner.call_raw(addr, call.abi_encode());
let decoded = nibbleAtCall::abi_decode_returns(&result, true).unwrap();
assert_eq!(decoded._0, U256::from(i + 3));
}
}
#[test]
fn test_nibble_slice_ops_shared() {
let (mut runner, addr) = setup();
let n = make_nibble(D, 0);
let other = &[0x01u8, 0x23, 0x01, 0x23, 0x45, 0x67];
let m = make_nibble(other, 0);
// commonPrefix(n, m) == 4
let call = commonPrefixCall { self_: n.clone(), other: m.clone() };
let result = runner.call_raw(addr, call.abi_encode());
assert_eq!(commonPrefixCall::abi_decode_returns(&result, true).unwrap()._0, U256::from(4));
// commonPrefix(m, n) == 4
let call = commonPrefixCall { self_: m.clone(), other: n.clone() };
let result = runner.call_raw(addr, call.abi_encode());
assert_eq!(commonPrefixCall::abi_decode_returns(&result, true).unwrap()._0, U256::from(4));
// m_mid_4 = mid(m, 4)
let call = midCall { self_: m.clone(), i: U256::from(4) };
let result = runner.call_raw(addr, call.abi_encode());
let m_mid_4 = midCall::abi_decode_returns(&result, true).unwrap()._0;
// startsWith(m_mid_4, n) == true
let call = startsWithCall { self_: m_mid_4.clone(), other: n.clone() };
let result = runner.call_raw(addr, call.abi_encode());
assert!(startsWithCall::abi_decode_returns(&result, true).unwrap()._0);
// startsWith(n, m_mid_4) == false
let call = startsWithCall { self_: n.clone(), other: m_mid_4.clone() };
let result = runner.call_raw(addr, call.abi_encode());
assert!(!startsWithCall::abi_decode_returns(&result, true).unwrap()._0);
// commonPrefix(n, m_mid_4) == 6
let call = commonPrefixCall { self_: n.clone(), other: m_mid_4.clone() };
let result = runner.call_raw(addr, call.abi_encode());
assert_eq!(commonPrefixCall::abi_decode_returns(&result, true).unwrap()._0, U256::from(6));
// n_mid_1 = mid(n, 1), m_mid_1 = mid(m, 1), m_mid_2 = mid(m, 2)
let call = midCall { self_: n.clone(), i: U256::from(1) };
let result = runner.call_raw(addr, call.abi_encode());
let n_mid_1 = midCall::abi_decode_returns(&result, true).unwrap()._0;
let call = midCall { self_: m.clone(), i: U256::from(1) };
let result = runner.call_raw(addr, call.abi_encode());
let m_mid_1 = midCall::abi_decode_returns(&result, true).unwrap()._0;
let call = midCall { self_: m.clone(), i: U256::from(2) };
let result = runner.call_raw(addr, call.abi_encode());
let m_mid_2 = midCall::abi_decode_returns(&result, true).unwrap()._0;
// commonPrefix(n_mid_1, m_mid_1) == 3
let call = commonPrefixCall { self_: n_mid_1.clone(), other: m_mid_1.clone() };
let result = runner.call_raw(addr, call.abi_encode());
assert_eq!(commonPrefixCall::abi_decode_returns(&result, true).unwrap()._0, U256::from(3));
// commonPrefix(n_mid_1, m_mid_2) == 0
let call = commonPrefixCall { self_: n_mid_1.clone(), other: m_mid_2.clone() };
let result = runner.call_raw(addr, call.abi_encode());
assert_eq!(commonPrefixCall::abi_decode_returns(&result, true).unwrap()._0, U256::from(0));
}
#[test]
fn test_merkle_patricia_trie() {
let (root, proof, key) = proof_data();
let (mut runner, addr) = setup();
let call = VerifyKeysCall {
root: FixedBytes(root),
proof: proof.into_iter().map(Into::into).collect(),
keys: vec![key.into()],
};
let result = runner.call_raw(addr, call.abi_encode());
let decoded = VerifyKeysCall::abi_decode_returns(&result, true).unwrap();
let value = &decoded._0[0].value;
let timestamp = ::decode(&mut &value[..]).unwrap();
assert_eq!(timestamp, 1_677_168_798_005);
}
fn generate_proof(
) -> (::Out, Vec>, Vec<(Vec, Option)>) {
let keys = (0..10).map(|_| H256::random().as_bytes().to_vec()).collect::>();
let values = (0..10).map(|_| H256::random().as_bytes().to_vec()).collect::>();
let entries = keys.clone().into_iter().zip(values.clone().into_iter()).collect::>();
let (db, root) = {
let mut db = >::default();
let mut root = Default::default();
{
let mut trie = TrieDBMutBuilder::::new(&mut db, &mut root).build();
for (key, value) in &entries {
trie.insert(key, value).unwrap();
}
}
(db, root)
};
let proof = {
let mut recorder = Recorder::::new();
let trie_db = TrieDBBuilder::::new(&db, &root).with_recorder(&mut recorder).build();
for (key, expected) in &entries {
let value = trie_db.get(key).unwrap().unwrap();
assert_eq!(&value, expected);
}
let proof = recorder.drain().into_iter().map(|f| f.data).collect::>();
{
let mdb = StorageProof::new(proof.clone()).into_memory_db::();
let trie_db = TrieDBBuilder::::new(&mdb, &root).build();
for (key, expected) in &entries {
let value = trie_db.get(key).unwrap().unwrap();
assert_eq!(&value, expected);
}
}
proof.into_iter().collect::>()
};
let trie = TrieDBBuilder::::new(&db, &root).build();
let items = keys
.into_iter()
.map(|key| {
let value = trie.get(&key).unwrap();
(key, value)
})
.collect();
(root, proof, items)
}
#[test]
fn test_merkle_patricia_trie_layout_v0() {
let (root, proof, entries) = generate_proof::>();
let (mut runner, addr) = setup();
for (key, value) in entries {
let call = VerifyKeysCall {
root: FixedBytes(root.into()),
proof: proof.clone().into_iter().map(Into::into).collect(),
keys: vec![key.into()],
};
let result = runner.call_raw(addr, call.abi_encode());
let decoded = VerifyKeysCall::abi_decode_returns(&result, true).unwrap();
assert_eq!(decoded._0[0].value.to_vec(), value.unwrap());
}
// non-membership proof
let call = VerifyKeysCall {
root: FixedBytes(root.into()),
proof: proof.into_iter().map(Into::into).collect(),
keys: vec![H256::random().as_bytes().to_vec().into()],
};
let result = runner.call_raw(addr, call.abi_encode());
let decoded = VerifyKeysCall::abi_decode_returns(&result, true).unwrap();
assert_eq!(decoded._0[0].value.len(), 0);
}
#[test]
fn test_merkle_patricia_trie_ethereum_verify_transaction_trie_single_node() {
let (mut runner, addr) = setup();
let call = VerifyEthereumCall {
root: FixedBytes(hex!("ecabc214ab6c55e1342e888fa677e2bcc29218a4b248a56fcebf7aa357807b60")),
proof: vec![
hex!("f89b822080b89601f89301808080808080f847f84580f842a00000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000080a08c7939f0e613736150a05565fcddda959b22c44ddac6c6aed8ec59e1462a0498a0166d30e3763829d64fca3d38601e65ba6f0e94f7e3c544381ae5e9e9b12dacd0").to_vec().into(),
],
keys: vec![hex!("80").to_vec().into()],
};
let result = runner.call_raw(addr, call.abi_encode());
let decoded = VerifyEthereumCall::abi_decode_returns(&result, true).unwrap();
assert_eq!(
decoded._0[0].value.to_vec(),
hex!("01f89301808080808080f847f84580f842a00000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000080a08c7939f0e613736150a05565fcddda959b22c44ddac6c6aed8ec59e1462a0498a0166d30e3763829d64fca3d38601e65ba6f0e94f7e3c544381ae5e9e9b12dacd0").to_vec()
);
}
#[test]
fn test_merkle_patricia_trie_ethereum_verify_transaction_trie_multi_node() {
let (mut runner, addr) = setup();
let call = VerifyEthereumCall {
root: FixedBytes(hex!("ac39df3a470f95659f9f6f30c4de252479ddd4e6083ba7a7be72d2505b4062e2")),
proof: vec![
hex!("f90131a0abd3b92264de818dd5c44b3212ee3d20de2478ca6a080d59b0f7eadc165aea33a014dd1fc275045e27cee26d6ec8395aa68681155e524550de255580f4181e7cd1a014dd1fc275045e27cee26d6ec8395aa68681155e524550de255580f4181e7cd1a014dd1fc275045e27cee26d6ec8395aa68681155e524550de255580f4181e7cd1a014dd1fc275045e27cee26d6ec8395aa68681155e524550de255580f4181e7cd1a014dd1fc275045e27cee26d6ec8395aa68681155e524550de255580f4181e7cd1a014dd1fc275045e27cee26d6ec8395aa68681155e524550de255580f4181e7cd1a014dd1fc275045e27cee26d6ec8395aa68681155e524550de255580f4181e7cd1a06bae3f278b07352be6e823e9c9584d2a8ba01e000cc7653b5b1d213888b841548080808080808080").to_vec().into(),
hex!("f871a036218dd16b29d804118d9bb961896fd3eb036e028af02d3638937cdcaa65036da060c25b7b04d23c401c874963020e932bcafdef5b9b5c4f25394e2c3bba644feca06cf2dabf824eee4758b57a77612e1f7a0d483e3656d9a76ea7b11a70a50dd4008080808080808080808080808080").to_vec().into(),
hex!("f8b1a0cb8fc8ea7d198bd1b6fb48a51e17932ad8333ef0b57b17326fbe3c4f6abf231ea018c89cbf38ef1aa86fc642cfc5eae17f49aef97c493c5ada614743d630de32fba018c89cbf38ef1aa86fc642cfc5eae17f49aef97c493c5ada614743d630de32fba018c89cbf38ef1aa86fc642cfc5eae17f49aef97c493c5ada614743d630de32fba0951955209df36420e52fc0961cba8318a288676262651f9e7cdafd0cf177c1e5808080808080808080808080").to_vec().into(),
hex!("f90211a03f4a0248cd0ff7ad4496f72fde07d31a0d46d7986af6505f0e4e85b72b3401aaa03f4a0248cd0ff7ad4496f72fde07d31a0d46d7986af6505f0e4e85b72b3401aaa03f4a0248cd0ff7ad4496f72fde07d31a0d46d7986af6505f0e4e85b72b3401aaa03f4a0248cd0ff7ad4496f72fde07d31a0d46d7986af6505f0e4e85b72b3401aaa03f4a0248cd0ff7ad4496f72fde07d31a0d46d7986af6505f0e4e85b72b3401aaa03f4a0248cd0ff7ad4496f72fde07d31a0d46d7986af6505f0e4e85b72b3401aaa03f4a0248cd0ff7ad4496f72fde07d31a0d46d7986af6505f0e4e85b72b3401aaa03f4a0248cd0ff7ad4496f72fde07d31a0d46d7986af6505f0e4e85b72b3401aaa03f4a0248cd0ff7ad4496f72fde07d31a0d46d7986af6505f0e4e85b72b3401aaa03f4a0248cd0ff7ad4496f72fde07d31a0d46d7986af6505f0e4e85b72b3401aaa03f4a0248cd0ff7ad4496f72fde07d31a0d46d7986af6505f0e4e85b72b3401aaa03f4a0248cd0ff7ad4496f72fde07d31a0d46d7986af6505f0e4e85b72b3401aaa03f4a0248cd0ff7ad4496f72fde07d31a0d46d7986af6505f0e4e85b72b3401aaa03f4a0248cd0ff7ad4496f72fde07d31a0d46d7986af6505f0e4e85b72b3401aaa03f4a0248cd0ff7ad4496f72fde07d31a0d46d7986af6505f0e4e85b72b3401aaa03f4a0248cd0ff7ad4496f72fde07d31a0d46d7986af6505f0e4e85b72b3401aa80").to_vec().into(),
hex!("f90211a014dd1fc275045e27cee26d6ec8395aa68681155e524550de255580f4181e7cd1a014dd1fc275045e27cee26d6ec8395aa68681155e524550de255580f4181e7cd1a014dd1fc275045e27cee26d6ec8395aa68681155e524550de255580f4181e7cd1a014dd1fc275045e27cee26d6ec8395aa68681155e524550de255580f4181e7cd1a014dd1fc275045e27cee26d6ec8395aa68681155e524550de255580f4181e7cd1a014dd1fc275045e27cee26d6ec8395aa68681155e524550de255580f4181e7cd1a014dd1fc275045e27cee26d6ec8395aa68681155e524550de255580f4181e7cd1a014dd1fc275045e27cee26d6ec8395aa68681155e524550de255580f4181e7cd1a014dd1fc275045e27cee26d6ec8395aa68681155e524550de255580f4181e7cd1a014dd1fc275045e27cee26d6ec8395aa68681155e524550de255580f4181e7cd1a014dd1fc275045e27cee26d6ec8395aa68681155e524550de255580f4181e7cd1a014dd1fc275045e27cee26d6ec8395aa68681155e524550de255580f4181e7cd1a014dd1fc275045e27cee26d6ec8395aa68681155e524550de255580f4181e7cd1a014dd1fc275045e27cee26d6ec8395aa68681155e524550de255580f4181e7cd1a014dd1fc275045e27cee26d6ec8395aa68681155e524550de255580f4181e7cd1a014dd1fc275045e27cee26d6ec8395aa68681155e524550de255580f4181e7cd180").to_vec().into(),
hex!("f90211a036218dd16b29d804118d9bb961896fd3eb036e028af02d3638937cdcaa65036da036218dd16b29d804118d9bb961896fd3eb036e028af02d3638937cdcaa65036da036218dd16b29d804118d9bb961896fd3eb036e028af02d3638937cdcaa65036da036218dd16b29d804118d9bb961896fd3eb036e028af02d3638937cdcaa65036da036218dd16b29d804118d9bb961896fd3eb036e028af02d3638937cdcaa65036da036218dd16b29d804118d9bb961896fd3eb036e028af02d3638937cdcaa65036da036218dd16b29d804118d9bb961896fd3eb036e028af02d3638937cdcaa65036da036218dd16b29d804118d9bb961896fd3eb036e028af02d3638937cdcaa65036da036218dd16b29d804118d9bb961896fd3eb036e028af02d3638937cdcaa65036da036218dd16b29d804118d9bb961896fd3eb036e028af02d3638937cdcaa65036da036218dd16b29d804118d9bb961896fd3eb036e028af02d3638937cdcaa65036da036218dd16b29d804118d9bb961896fd3eb036e028af02d3638937cdcaa65036da036218dd16b29d804118d9bb961896fd3eb036e028af02d3638937cdcaa65036da036218dd16b29d804118d9bb961896fd3eb036e028af02d3638937cdcaa65036da036218dd16b29d804118d9bb961896fd3eb036e028af02d3638937cdcaa65036da036218dd16b29d804118d9bb961896fd3eb036e028af02d3638937cdcaa65036d80").to_vec().into(),
hex!("f89920b89601f89301808080808080f847f84580f842a00000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000080a08c7939f0e613736150a05565fcddda959b22c44ddac6c6aed8ec59e1462a0498a0166d30e3763829d64fca3d38601e65ba6f0e94f7e3c544381ae5e9e9b12dacd0").to_vec().into(),
],
keys: vec![hex!("8232c8").to_vec().into()],
};
let result = runner.call_raw(addr, call.abi_encode());
let decoded = VerifyEthereumCall::abi_decode_returns(&result, true).unwrap();
assert_eq!(
decoded._0[0].value.to_vec(),
hex!("01f89301808080808080f847f84580f842a00000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000080a08c7939f0e613736150a05565fcddda959b22c44ddac6c6aed8ec59e1462a0498a0166d30e3763829d64fca3d38601e65ba6f0e94f7e3c544381ae5e9e9b12dacd0").to_vec()
);
}
#[test]
fn test_merkle_patricia_trie_ethereum_verify_state_trie_single_node() {
let (mut runner, addr) = setup();
let call = VerifyEthereumCall {
root: FixedBytes(hex!("0ce23f3c809de377b008a4a3ee94a0834aac8bec1f86e28ffe4fdb5a15b0c785")),
proof: vec![
hex!("f86aa1205380c7b7ae81a58eb98d9c78de4a1fd7fd9535fc953ed2be602daaa41767312ab846f8448080a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").to_vec().into(),
],
keys: vec![hex!("5380c7b7ae81a58eb98d9c78de4a1fd7fd9535fc953ed2be602daaa41767312a").to_vec().into()],
};
let result = runner.call_raw(addr, call.abi_encode());
let decoded = VerifyEthereumCall::abi_decode_returns(&result, true).unwrap();
assert_eq!(
decoded._0[0].value.to_vec(),
hex!("f8448080a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").to_vec()
);
}
#[test]
fn test_merkle_patricia_trie_ethereum_verify_state_trie_multi_node() {
let (mut runner, addr) = setup();
let call = VerifyEthereumCall {
root: FixedBytes(hex!("4dc3e58e944d713c36c6b9cc58df023b3e578093de16e175faefa8f91727ca6e")),
proof: vec![
hex!("f90211a07466452b9c24acc76f0c9a0a4d43f5b362a031626eb6a0d7e7409c9c2fe1ecdda00bc98d4eaa34347ecb42d4d716161612692c2ac599cf3c4f5eade18fbb07da4ca00146ab246da36aec011dcdf8fd4c6a505ec7caca75441268c7eab80e9aa96767a038a41d42f1edccf59f2a1d6e7887ab1f3dfcd0a26f3be309ee237ed4aecd9d38a05f542a7ffff85163015c7cfc8c7d947ac98114f1bc576ab74ec663dd97d9596fa0208cb7384b248a341c22ef52d1882f6045345c6435c38a5b4d382e7a02c53f48a086b590086c7e7738c59cd0bba1a136dc42cc099fa9ae8af77abaa76fa1f3f503a0ab69ef5e7d461a547675de48c30be16f2d297509f6d005325365cbebe8735104a0896573c4595ea56992cb4237091ce8f00a73988f80506ef7bde78de9cfddbfa9a06d78ae475034b4aec9afef58c3f93e997f2c50f2a4948a7214b0295c5ae1776ea0763c0ec3ea13b7cbfe139cf8a3cf76e75026b2d42854bf822a47a0497dabf679a028fd50ebf9eed4e9a0969a73682ea615cb1134510f80aa057c60acf657a13a05a00c9f1e12244dabf2db619f0ce1098dd6d19f7c9dd1b17da1ffd02b0e0f3d4d7ea0aa2a772e989b23bd7e2eba714f153031c79c03cb835539fc56debc7669b64148a07f6544adbc5e30eca006a050384d85df7a510bac66dde8c32b7741486b319610a0c859cc09be23308083a16f96c19dffd9b48770b715c150220a35e07ff5ea716b80").to_vec().into(),
hex!("f90171a02ffa31221e3db9f56751599b181b16cd0489d9870f58a481ee1398d6991a6ed3a0af7f7b8a8aa219ebd9e9562bd3f917c47f7205bb0f29bdb064c63e51f8e14ee680a0fc0482eb10e5eccc57013233746a95072c2d80746898c30a23fc43c52eabddf5a0985b92a5617ee65b05be517cbbeb1c5598e5479bdb45107552b80d339c6c23eba0984d7ace51d61b40d4336aec39c867fc5db4566036ef0e6225df604dd93a6538a068ff593d5fc203763242dc6b95e559472f8d43ceed1a4fececb11a0eb6ef1070a0d7a609f017d3641ff18327916212ebf21d6ae93b3ffe09ae6fc5e1160c7f571f80a0d62e9137db3e883d506c3927310d0100e624b5befe180830f029c99859451230a0ce9a78f4d5abb17cfe7c189a83524a479bd8aed870cc7608c3ede431cdd214cd8080a0f58d48fa8ca7b152aa825128a173c6061b6cabe9143473e246f6d8732f59cc3880a030a23d0cd92c346ce27019e17ddbf8651695cd3fd6350f0f7862f915269a57ff80").to_vec().into(),
hex!("f8518080808080808080a0fed0c7841e83453b78135ca2a36e8fb5d8c5fbb5883746f3f93054e42205e7e880808080a0711f6aa6ad472844f3e563a1c3ff5777c4e2c3b3126b42fa4eb4d115546116c5808080").to_vec().into(),
hex!("f8689f30c7b7ae81a58eb98d9c78de4a1fd7fd9535fc953ed2be602daaa41767312ab846f8448080a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").to_vec().into(),
],
keys: vec![hex!("5380c7b7ae81a58eb98d9c78de4a1fd7fd9535fc953ed2be602daaa41767312a").to_vec().into()],
};
let result = runner.call_raw(addr, call.abi_encode());
let decoded = VerifyEthereumCall::abi_decode_returns(&result, true).unwrap();
assert_eq!(
decoded._0[0].value.to_vec(),
hex!("f8448080a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").to_vec()
);
}
================================================
FILE: tests/rust/src/multi_proof_utils.rs
================================================
use crate::Keccak256;
use primitive_types::H256;
use rs_merkle::MerkleProof;
/// A leaf in the merkle tree with its 0-based index and hash value.
#[derive(Debug, Clone, PartialEq, Default)]
pub struct Leaf {
pub hash: H256,
pub index: usize,
}
/// The inputs needed to convert an rs-merkle proof into the flat format
/// expected by the Solidity MerkleMultiProof verifier.
pub struct RsMerkleProof<'a> {
pub proof: &'a MerkleProof,
pub leaf_indices: &'a [usize],
pub leaf_hashes: &'a [[u8; 32]],
}
/// The converted proof ready for Solidity consumption.
pub struct SolidityProof {
pub proof_hashes: Vec,
pub leaves: Vec,
}
impl<'a> From> for SolidityProof {
fn from(input: RsMerkleProof<'a>) -> Self {
let proof_hashes = input.proof.proof_hashes().iter().map(|&h| H256(h)).collect();
let mut leaves: Vec = input
.leaf_indices
.iter()
.zip(input.leaf_hashes)
.map(|(&i, hash)| Leaf { hash: H256(*hash), index: i })
.collect();
leaves.sort_by_key(|l| l.index);
SolidityProof { proof_hashes, leaves }
}
}