[
  {
    "path": ".github/workflows/ci.yml",
    "content": "name: CI\n\non:\n  push:\n    branches: [master, develop]\n  pull_request:\n    branches: [master, develop]\n\nenv:\n  CARGO_TERM_COLOR: always\n\njobs:\n  test-base:\n    runs-on: ubuntu-latest\n    steps:\n      - name: Checkout\n        uses: actions/checkout@v2\n      - name: Use Nightly\n        uses: actions-rs/toolchain@v1\n        with:\n          toolchain: nightly-2024-04-25\n          override: true\n      - name: Cache\n        uses: actions/cache@v3\n        with:\n          path: |\n            ~/.cargo/bin/\n            ~/.cargo/registry/index/\n            ~/.cargo/registry/cache/\n            ~/.cargo/git/db/\n            ~/.cargo/registry/src/**/librocksdb-sys-*\n            target/\n          key: ${{ runner.os }}-test-base-${{ hashFiles('Cargo.toml') }}\n      - name: Test\n        uses: actions-rs/cargo@v1\n        with:\n          command: test\n          args: --verbose\n\n  test-all-features:\n    runs-on: ubuntu-latest\n    steps:\n      - name: Checkout\n        uses: actions/checkout@v2\n      - name: Use Nightly\n        uses: actions-rs/toolchain@v1\n        with:\n          toolchain: nightly-2024-04-25\n          override: true\n      - name: Cache\n        uses: actions/cache@v3\n        with:\n          path: |\n            ~/.cargo/bin/\n            ~/.cargo/registry/index/\n            ~/.cargo/registry/cache/\n            ~/.cargo/git/db/\n            ~/.cargo/registry/src/**/librocksdb-sys-*\n            target/\n          key: ${{ runner.os }}-test-all-features-${{ hashFiles('Cargo.toml') }}\n      - name: Test\n        uses: actions-rs/cargo@v1\n        with:\n          command: test\n          args: --verbose --all-features\n\n  coverage:\n    runs-on: ubuntu-latest\n    steps:\n      - name: Checkout\n        uses: actions/checkout@v2\n      - name: Use Nightly\n        uses: actions-rs/toolchain@v1\n        with:\n          toolchain: nightly-2024-04-25\n          components: llvm-tools-preview\n          override: true\n      - name: Cache\n        uses: actions/cache@v3\n        with:\n          path: |\n            ~/.cargo/bin/\n            ~/.cargo/registry/index/\n            ~/.cargo/registry/cache/\n            ~/.cargo/git/db/\n            ~/.cargo/registry/src/**/librocksdb-sys-*\n            target/\n          key: ${{ runner.os }}-coverage-${{ hashFiles('Cargo.toml') }}\n      - name: Install Coverage Tooling\n        uses: actions-rs/cargo@v1\n        with:\n          command: install\n          args: cargo-llvm-cov --force\n      - name: Run Coverage\n        uses: actions-rs/cargo@v1\n        with:\n          command: llvm-cov\n          args: --all-features --workspace --lcov --output-path lcov.info\n      - name: Upload to codecov.io\n        uses: codecov/codecov-action@v1\n        with:\n          token: ${{ secrets.CODECOV_TOKEN }}\n          files: lcov.info\n          fail_ci_if_error: true\n\n  format:\n    runs-on: ubuntu-latest\n    steps:\n      - name: Checkout\n        uses: actions/checkout@v2\n      - name: Use Nightly\n        uses: actions-rs/toolchain@v1\n        with:\n          toolchain: nightly-2024-04-25\n          components: rustfmt\n          override: true\n      - name: Check\n        uses: actions-rs/cargo@v1\n        with:\n          command: fmt\n          args: --all -- --check\n\n  clippy:\n    runs-on: ubuntu-latest\n    steps:\n      - name: Checkout\n        uses: actions/checkout@v2\n      - name: Use Nightly\n        uses: actions-rs/toolchain@v1\n        with:\n          toolchain: nightly-2024-04-25\n          components: clippy\n          override: true\n      - name: Cache\n        uses: actions/cache@v3\n        with:\n          path: |\n            ~/.cargo/bin/\n            ~/.cargo/registry/index/\n            ~/.cargo/registry/cache/\n            ~/.cargo/git/db/\n            ~/.cargo/registry/src/**/librocksdb-sys-*\n            target/\n          key: ${{ runner.os }}-clippy-${{ hashFiles('Cargo.toml') }}\n      - name: Check\n        uses: actions-rs/clippy-check@v1\n        with:\n          token: ${{ secrets.GITHUB_TOKEN }}\n          args: --all-features -- -D warnings\n\n  benches:\n    runs-on: ubuntu-latest\n    steps:\n      - name: Checkout\n        uses: actions/checkout@v2\n      - name: Use Nightly\n        uses: actions-rs/toolchain@v1\n        with:\n          toolchain: nightly-2024-04-25\n          override: true\n      - name: Cache\n        uses: actions/cache@v3\n        with:\n          path: |\n            ~/.cargo/bin/\n            ~/.cargo/registry/index/\n            ~/.cargo/registry/cache/\n            ~/.cargo/git/db/\n            ~/.cargo/registry/src/**/librocksdb-sys-*\n            target/\n          key: ${{ runner.os }}-benches-${{ hashFiles('Cargo.toml') }}\n      - name: Run Benches\n        uses: actions-rs/cargo@v1\n        with:\n          command: bench\n"
  },
  {
    "path": ".gitignore",
    "content": "target\ntemp.db\n.DS_Store\nCargo.lock\n"
  },
  {
    "path": "CHANGELOG.md",
    "content": "# Changelog\n\n## [Unreleased]\n\n### Bug Fixes\n\n- Fixed bug where column families would be non-atomically flushed when one memtable was filled, resulting in inconsistency after a crash.\n\n[Unreleased]: https://github.com/nomic-io/merk/compare/v1.0.0-alpha.8...HEAD\n"
  },
  {
    "path": "Cargo.toml",
    "content": "[package]\nname = \"merk\"\ndescription = \"High-performance Merkle key/value store\"\nversion = \"2.0.0\"\nauthors = [\"Turbofish <team@turbofish.org>\"]\nedition = \"2018\"\nlicense = \"Apache-2.0\"\n\n[dependencies]\nthiserror= \"1.0.58\"\nsha2 = \"0.10.8\"\nlog = \"0.4.21\"\n\n[dependencies.colored]\nversion = \"2.1.0\"\noptional = true\n\n[dependencies.num_cpus]\nversion = \"1.16.0\"\noptional = true\n\n[dependencies.ed]\nversion = \"0.3.0\"\noptional = true\n\n[dependencies.rand]\nversion = \"0.8.5\"\nfeatures = [\"small_rng\"]\noptional = true\n\n[dependencies.rocksdb]\nversion = \"0.22.0\"\ndefault-features = false\noptional = true\n\n[dependencies.jemallocator]\nversion = \"0.5.4\"\nfeatures = [\"disable_initial_exec_tls\"]\noptional = true\n\n[features]\ndefault = [\"full\", \"verify\"]\nfull = [\n    \"rand\",\n    \"rocksdb\",\n    \"colored\",\n    \"num_cpus\",\n    \"ed\",\n]\nverify = [\"ed\"]\n"
  },
  {
    "path": "LICENSE",
    "content": "                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "README.md",
    "content": "<h1 align=\"left\">\n<picture>\n  <source media=\"(prefers-color-scheme: dark)\" srcset=\"./merk-dark.svg\">\n  <source media=\"(prefers-color-scheme: light)\" srcset=\"./merk.svg\">\n  <img alt=\"merk\" src=\"./merk.svg\">\n</picture>\n</h1>\n\n*High-performance Merkle key/value store*\n\n![CI](https://github.com/turbofish-org/merk/actions/workflows/ci.yml/badge.svg)\n[![codecov](https://codecov.io/gh/turbofish-org/merk/branch/develop/graph/badge.svg?token=TTUTSt2iLz)](https://codecov.io/gh/turbofish-org/merk)\n[![Crate](https://img.shields.io/crates/v/merk.svg)](https://crates.io/crates/merk)\n[![API](https://docs.rs/merk/badge.svg)](https://docs.rs/merk)\n\nMerk is a crypto key/value store - more specifically, it's a Merkle AVL tree built on top of RocksDB (Facebook's fork of LevelDB).\n\nIts priorities are performance and reliability. While Merk was designed to be the state database for blockchains, it can also be used anywhere an auditable key/value store is needed.\n\n### Features\n- **Fast reads/writes** - Reads have no overhead compared to a normal RocksDB store, and writes are optimized for batch operations (e.g. blocks in a blockchain).\n- **Fast proof generation** - Since Merk implements an AVL tree rather than a trie, it is very efficient to create and verify proofs for ranges of keys.\n- **Concurrency** - Unlike most other Merkle stores, all operations utilize all available cores - giving huge performance gains and allowing nodes to scale along with Moore's Law.\n- **Replication** - The tree is optimized to efficiently build proofs of large chunks, allowing for nodes to download the entire state (e.g. \"state syncing\").\n- **Checkpointing** - Merk can create checkpoints on disk (an immutable view of the entire store at a certain point in time) without blocking, so there are no delays in availability or liveness.\n- **Web-friendly** - Being written in Rust means it is easy to run the proof-verification code in browsers with WebAssembly, allowing for light-clients that can verify data for themselves.\n- **Fits any Profile** - Performant on RAM-constrained Raspberry Pi's and beefy validator rigs alike.\n\nThe algorithms are based on AVL, but optimized for batches of operations and random fetches from the backing store.\n\n## Usage\n\n**Install:**\n```\ncargo add merk\n```\n\n**Example:**\n```rust\nextern crate merk;\nuse merk::*;\n\n// load or create a Merk store at the given path\nlet mut merk = Merk::open(\"./merk.db\").unwrap();\n\n// apply some operations\nlet batch = [\n    (b\"key\", Op::Put(b\"value\")),\n    (b\"key2\", Op::Put(b\"value2\")),\n    (b\"key3\", Op::Put(b\"value3\")),\n    (b\"key4\", Op::Delete)\n];\nmerk.apply(&batch).unwrap();\n```\nMerk is currently used by [Nomic](https://github.com/nomic-io/nomic), a blockchain powering decentralized custody of Bitcoin, built on [Orga](https://github.com/turbofish-org/orga).\n\n## Benchmarks\n\nBenchmarks are measured on a 1M node tree, each node having a key length of 16 bytes and value length of 40 bytes. All tests are single-threaded (not counting RocksDB background threads).\n\nYou can test these yourself by running `cargo bench`.\n\n### 2017 Macbook Pro\n\n*(Using 1 Merk thread and 4 RocksDB compaction threads)*\n\n**Pruned (no state kept in memory)**\n\n*RAM usage:* ~20MB average, ~26MB max\n\n| Test | Ops per second |\n| -------- | ------ |\n| Random inserts | 23,000 |\n| Random updates | 32,000 |\n| Random deletes | 26,000 |\n| Random reads | 210,000 |\n| Random proof generation | 133,000 |\n\n**Cached (all state kept in memory)**\n\n*RAM usage:* ~400MB average, ~1.1GB max\n\n| Test | Ops per second |\n| -------- | ------ |\n| Random inserts | 58,000 |\n| Random updates | 81,000 |\n| Random deletes | 72,000 |\n| Random reads | 1,565,000 |\n| Random proof generation | 311,000 |\n\n### i9-9900K Desktop\n\n*(Using 1 Merk thread and 16 RocksDB compaction threads)*\n\n**Pruned (no state kept in memory)**\n\n*RAM usage:* ~20MB average, ~26MB max\n\n| Test | Ops per second |\n| -------- | ------ |\n| Random inserts | 40,000 |\n| Random updates | 55,000 |\n| Random deletes | 45,000 |\n| Random reads | 383,000 |\n| Random proof generation | 249,000 |\n\n**Cached (all state kept in memory)**\n\n*RAM usage:* ~400MB average, ~1.1GB max\n\n| Test | Ops per second |\n| -------- | ------ |\n| Random inserts | 93,000 |\n| Random updates | 123,000 |\n| Random deletes | 111,000 |\n| Random reads | 2,370,000 |\n| Random proof generation | 497,000 |\n\n## Contributing\n\nMerk is an open-source project spearheaded by Turbofish. Anyone is able to contribute to Merk via GitHub.\n\n[Contribute to Merk](https://github.com/turbofish-org/merk/contribute)\n\n## Security\n\n### Security Audits\n\n| Date | Auditor | Scope | Report |\n| ---: | :---: | :--- | :---: |\n| October 2024 | Trail of Bits | `orga` `merk` `ed` `abci2` | [📄](https://github.com/trailofbits/publications/blob/master/reviews/2024-11-orgaandmerk-securityreview.pdf) |\n\nVulnerabilities should not be reported through public channels, including GitHub Issues. You can report a vulnerability via GitHub's Private Vulnerability Reporting or to Turbofish at `security@turbofish.org`.\n\n[Report a Vulnerability](https://github.com/turbofish-org/merk/security/advisories/new)\n\n## License\n\nLicensed under the Apache License, Version 2.0 (the \"License\"); you may not use the files in this repository except in compliance with the License. You may obtain a copy of the License at\n\n    https://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.\n\n---\n\nCopyright © 2024 Turbofish, Inc.\n"
  },
  {
    "path": "benches/merk.rs",
    "content": "#![feature(test)]\n\nextern crate test;\n\nuse merk::proofs::encode_into as encode_proof_into;\nuse merk::restore::Restorer;\nuse merk::test_utils::*;\nuse merk::{Merk, Result};\nuse rand::prelude::*;\nuse std::thread;\nuse test::Bencher;\n\n#[bench]\nfn get_1m_rocksdb(b: &mut Bencher) {\n    let initial_size = 1_000_000;\n    let batch_size = 2_000;\n    let num_batches = initial_size / batch_size;\n\n    let path = thread::current().name().unwrap().to_owned();\n    let mut merk = TempMerk::open(path).expect(\"failed to open merk\");\n\n    let mut batches = vec![];\n    for i in 0..num_batches {\n        let batch = make_batch_rand(batch_size, i);\n        unsafe { merk.apply_unchecked(&batch, &[]).expect(\"apply failed\") };\n        batches.push(batch);\n    }\n\n    let mut i = 0;\n    b.iter(|| {\n        let batch_index = (i % num_batches) as usize;\n        let key_index = (i / num_batches) as usize;\n\n        let key = &batches[batch_index][key_index].0;\n        merk.get(key).expect(\"get failed\");\n\n        i = (i + 1) % initial_size;\n    });\n}\n\n#[bench]\nfn insert_1m_2k_seq_rocksdb_noprune(b: &mut Bencher) {\n    let initial_size = 1_000_000;\n    let batch_size = 2_000;\n\n    let path = thread::current().name().unwrap().to_owned();\n    let mut merk = TempMerk::open(path).expect(\"failed to open merk\");\n\n    for i in 0..(initial_size / batch_size) {\n        let batch = make_batch_seq((i * batch_size)..((i + 1) * batch_size));\n        unsafe { merk.apply_unchecked(&batch, &[]).expect(\"apply failed\") };\n    }\n\n    let mut i = initial_size / batch_size;\n    b.iter(|| {\n        let batch = make_batch_seq((i * batch_size)..((i + 1) * batch_size));\n        unsafe { merk.apply_unchecked(&batch, &[]).expect(\"apply failed\") };\n        i += 1;\n    });\n}\n\n#[bench]\nfn insert_1m_2k_rand_rocksdb_noprune(b: &mut Bencher) {\n    let initial_size = 1_000_000;\n    let batch_size = 2_000;\n\n    let path = thread::current().name().unwrap().to_owned();\n    let mut merk = TempMerk::open(path).expect(\"failed to open merk\");\n\n    for i in 0..(initial_size / batch_size) {\n        let batch = make_batch_rand(batch_size, i);\n        unsafe { merk.apply_unchecked(&batch, &[]).expect(\"apply failed\") };\n    }\n\n    let mut i = initial_size / batch_size;\n    b.iter(|| {\n        let batch = make_batch_rand(batch_size, i);\n        unsafe { merk.apply_unchecked(&batch, &[]).expect(\"apply failed\") };\n        i += 1;\n    });\n}\n\n#[bench]\nfn update_1m_2k_seq_rocksdb_noprune(b: &mut Bencher) {\n    let initial_size = 1_000_000;\n    let batch_size = 2_000;\n\n    let path = thread::current().name().unwrap().to_owned();\n    let mut merk = TempMerk::open(path).expect(\"failed to open merk\");\n\n    for i in 0..(initial_size / batch_size) {\n        let batch = make_batch_seq((i * batch_size)..((i + 1) * batch_size));\n        unsafe { merk.apply_unchecked(&batch, &[]).expect(\"apply failed\") };\n    }\n\n    let mut i = 0;\n    b.iter(|| {\n        let batch = make_batch_seq((i * batch_size)..((i + 1) * batch_size));\n        unsafe { merk.apply_unchecked(&batch, &[]).expect(\"apply failed\") };\n        i = (i + 1) % (initial_size / batch_size);\n    });\n}\n\n#[bench]\nfn update_1m_2k_rand_rocksdb_noprune(b: &mut Bencher) {\n    let initial_size = 1_000_000;\n    let batch_size = 2_000;\n\n    let path = thread::current().name().unwrap().to_owned();\n    let mut merk = TempMerk::open(path).expect(\"failed to open merk\");\n\n    for i in 0..(initial_size / batch_size) {\n        let batch = make_batch_rand(batch_size, i);\n        unsafe { merk.apply_unchecked(&batch, &[]).expect(\"apply failed\") };\n    }\n\n    let mut i = 0;\n    b.iter(|| {\n        let batch = make_batch_rand(batch_size, i);\n        unsafe { merk.apply_unchecked(&batch, &[]).expect(\"apply failed\") };\n        i = (i + 1) % (initial_size / batch_size);\n    });\n}\n\n#[bench]\nfn delete_1m_2k_rand_rocksdb_noprune(b: &mut Bencher) {\n    let initial_size = 1_000_000;\n    let batch_size = 2_000;\n\n    let path = thread::current().name().unwrap().to_owned();\n    let mut merk = TempMerk::open(path).expect(\"failed to open merk\");\n\n    for i in 0..(initial_size / batch_size) {\n        let batch = make_batch_rand(batch_size, i);\n        unsafe { merk.apply_unchecked(&batch, &[]).expect(\"apply failed\") };\n    }\n\n    let mut i = 0;\n    b.iter(|| {\n        if i >= (initial_size / batch_size) {\n            println!(\"WARNING: too many bench iterations, whole tree deleted\");\n            return;\n        }\n        let batch = make_del_batch_rand(batch_size, i);\n        unsafe { merk.apply_unchecked(&batch, &[]).expect(\"apply failed\") };\n        i = (i + 1) % (initial_size / batch_size);\n    });\n}\n\n#[bench]\nfn prove_1m_1_rand_rocksdb_noprune(b: &mut Bencher) {\n    let initial_size = 1_000_000;\n    let batch_size = 1_000;\n    let proof_size = 1;\n\n    let path = thread::current().name().unwrap().to_owned();\n    let mut merk = TempMerk::open(path).expect(\"failed to open merk\");\n\n    for i in 0..(initial_size / batch_size) {\n        let batch = make_batch_rand(batch_size, i);\n        unsafe { merk.apply_unchecked(&batch, &[]).expect(\"apply failed\") };\n    }\n\n    let mut i = 0;\n    b.iter(|| {\n        let batch = make_batch_rand(proof_size, i);\n        let mut keys = Vec::with_capacity(batch.len());\n        for (key, _) in batch {\n            keys.push(merk::proofs::query::QueryItem::Key(key));\n        }\n        merk.prove(keys).expect(\"prove failed\");\n        i = (i + 1) % (initial_size / batch_size);\n\n        merk.commit(std::collections::LinkedList::new(), &[])\n            .unwrap();\n    });\n}\n\n#[bench]\nfn build_trunk_chunk_1m_1_rand_rocksdb_noprune(b: &mut Bencher) {\n    let initial_size = 1_000_000;\n    let batch_size = 1_000;\n\n    let path = thread::current().name().unwrap().to_owned();\n    let mut merk = TempMerk::open(path).expect(\"failed to open merk\");\n\n    for i in 0..(initial_size / batch_size) {\n        let batch = make_batch_rand(batch_size, i);\n        unsafe { merk.apply_unchecked(&batch, &[]).expect(\"apply failed\") };\n    }\n\n    let mut bytes = vec![];\n\n    b.iter(|| {\n        bytes.clear();\n\n        let (ops, _) = merk.walk(|walker| walker.unwrap().create_trunk_proof().unwrap());\n        encode_proof_into(ops.iter(), &mut bytes);\n\n        merk.commit(std::collections::LinkedList::new(), &[])\n            .unwrap();\n    });\n\n    b.bytes = bytes.len() as u64;\n}\n\n#[bench]\nfn chunkproducer_rand_1m_1_rand_rocksdb_noprune(b: &mut Bencher) {\n    let mut rng = rand::thread_rng();\n\n    let initial_size = 1_000_000;\n    let batch_size = 1_000;\n\n    let path = thread::current().name().unwrap().to_owned();\n    let mut merk = TempMerk::open(path).expect(\"failed to open merk\");\n\n    for i in 0..(initial_size / batch_size) {\n        let batch = make_batch_rand(batch_size, i);\n        unsafe { merk.apply_unchecked(&batch, &[]).expect(\"apply failed\") };\n    }\n\n    let mut chunks = merk.chunks().unwrap();\n    let mut total_bytes = 0;\n    let mut i = 0;\n\n    let mut next = || {\n        let index = rng.gen::<usize>() % chunks.len();\n        chunks.chunk(index).unwrap()\n    };\n\n    b.iter(|| {\n        let chunk = next();\n        total_bytes += chunk.len();\n        i += 1;\n    });\n\n    b.bytes = (total_bytes / i) as u64;\n}\n\n#[bench]\nfn chunk_iter_1m_1_rand_rocksdb_noprune(b: &mut Bencher) {\n    let initial_size = 1_000_000;\n    let batch_size = 1_000;\n\n    let path = thread::current().name().unwrap().to_owned();\n    let mut merk = TempMerk::open(path).expect(\"failed to open merk\");\n\n    for i in 0..(initial_size / batch_size) {\n        let batch = make_batch_rand(batch_size, i);\n        unsafe { merk.apply_unchecked(&batch, &[]).expect(\"apply failed\") };\n    }\n\n    let mut chunks = merk.chunks().unwrap().into_iter();\n    let mut total_bytes = 0;\n    let mut i = 0;\n\n    let mut next = || match chunks.next() {\n        Some(chunk) => chunk,\n        None => {\n            chunks = merk.chunks().unwrap().into_iter();\n            chunks.next().unwrap()\n        }\n    };\n\n    b.iter(|| {\n        let chunk = next();\n        total_bytes += chunk.unwrap().len();\n        i += 1;\n    });\n\n    b.bytes = (total_bytes / i) as u64;\n}\n\n#[bench]\nfn restore_1m_1_rand_rocksdb_noprune(b: &mut Bencher) {\n    let initial_size = 1_000_000;\n    let batch_size = 1_000;\n\n    let path = thread::current().name().unwrap().to_owned();\n    let mut merk = TempMerk::open(path).expect(\"failed to open merk\");\n\n    for i in 0..(initial_size / batch_size) {\n        let batch = make_batch_rand(batch_size, i);\n        unsafe { merk.apply_unchecked(&batch, &[]).expect(\"apply failed\") };\n    }\n\n    let chunks = merk\n        .chunks()\n        .unwrap()\n        .into_iter()\n        .collect::<Result<Vec<_>>>()\n        .unwrap();\n\n    let path = thread::current().name().unwrap().to_owned() + \"_restore\";\n    let mut restorer: Option<Restorer> = None;\n\n    let mut total_bytes = 0;\n    let mut i = 0;\n\n    b.iter(|| {\n        if i % chunks.len() == 0 {\n            if i != 0 {\n                let restorer_merk = restorer.take().unwrap().finalize();\n                drop(restorer_merk);\n                std::fs::remove_dir_all(&path).unwrap();\n            }\n\n            restorer = Some(Merk::restore(&path, merk.root_hash(), chunks.len()).unwrap());\n        }\n\n        let restorer = restorer.as_mut().unwrap();\n        let chunk = chunks[i % chunks.len()].as_slice();\n        restorer.process_chunk(chunk).unwrap();\n\n        total_bytes += chunk.len();\n        i += 1;\n    });\n\n    std::fs::remove_dir_all(&path).unwrap();\n\n    b.bytes = (total_bytes / i) as u64;\n}\n\n#[bench]\nfn checkpoint_create_destroy_1m_1_rand_rocksdb_noprune(b: &mut Bencher) {\n    let initial_size = 1_000_000;\n    let batch_size = 1_000;\n\n    let path = thread::current().name().unwrap().to_owned();\n    let mut merk = TempMerk::open(&path).expect(\"failed to open merk\");\n\n    for i in 0..(initial_size / batch_size) {\n        let batch = make_batch_rand(batch_size, i);\n        unsafe { merk.apply_unchecked(&batch, &[]).expect(\"apply failed\") };\n    }\n\n    let path = path + \".checkpoint\";\n    b.iter(|| {\n        let checkpoint = merk.checkpoint(&path).unwrap();\n        checkpoint.destroy().unwrap();\n    });\n}\n"
  },
  {
    "path": "benches/ops.rs",
    "content": "#![feature(test)]\n\nextern crate test;\n\nuse merk::owner::Owner;\nuse merk::test_utils::*;\nuse test::Bencher;\n\n#[bench]\nfn insert_1m_10k_seq_memonly(b: &mut Bencher) {\n    let initial_size = 1_000_000;\n    let batch_size = 10_000;\n\n    let mut tree = Owner::new(make_tree_seq(initial_size));\n\n    let mut i = initial_size / batch_size;\n    b.iter(|| {\n        let batch = make_batch_seq((i * batch_size)..((i + 1) * batch_size));\n        tree.own(|tree| apply_memonly_unchecked(tree, &batch));\n        i += 1;\n    });\n}\n\n#[bench]\nfn insert_1m_10k_rand_memonly(b: &mut Bencher) {\n    let initial_size = 1_000_000;\n    let batch_size = 10_000;\n\n    let mut tree = Owner::new(make_tree_rand(initial_size, batch_size, 0));\n\n    let mut i = initial_size / batch_size;\n    b.iter(|| {\n        let batch = make_batch_rand(batch_size, i);\n        tree.own(|tree| apply_memonly_unchecked(tree, &batch));\n        i += 1;\n    });\n}\n\n#[bench]\nfn update_1m_10k_seq_memonly(b: &mut Bencher) {\n    let initial_size = 1_000_000;\n    let batch_size = 10_000;\n\n    let mut tree = Owner::new(make_tree_seq(initial_size));\n\n    let mut i = 0;\n    b.iter(|| {\n        let batch = make_batch_seq((i * batch_size)..((i + 1) * batch_size));\n        tree.own(|tree| apply_memonly_unchecked(tree, &batch));\n        i = (i + 1) % (initial_size / batch_size);\n    });\n}\n\n#[bench]\nfn update_1m_10k_rand_memonly(b: &mut Bencher) {\n    let initial_size = 1_010_000;\n    let batch_size = 10_000;\n\n    let mut tree = Owner::new(make_tree_rand(initial_size, batch_size, 0));\n\n    let mut i = 0;\n    b.iter(|| {\n        let batch = make_batch_rand(batch_size, i);\n        tree.own(|tree| apply_memonly_unchecked(tree, &batch));\n        i = (i + 1) % (initial_size / batch_size);\n    });\n}\n"
  },
  {
    "path": "docs/algorithms.md",
    "content": "# Merk - A High-Performance Merkle AVL Tree\n\n**Matt Bell ([@mappum](https://twitter.com/mappum))** • [Nomic Hodlings, Inc.](https://nomic.io)\n\nv0.0.4 - _August 5, 2020_\n\n## Introduction\n\nMerk is a Merkle AVL tree designed for performance, running on top of a backing key/value store such as RocksDB. Notable features include concurrent operations for higher throughput, an optimized key/value layout for performant usage of the backing store, and efficient proof generation to enable bulk tree replication.\n\n_Note that this document is meant to be a way to grok how Merk works, rather than an authoritative specification._\n\n## Algorithm Overview\n\nThe Merk tree was inspired by [`tendermint/iavl`](https://github.com/tendermint/iavl) from the [Tendermint](https://tendermint.com) team but makes various fundamental design changes in the name of performance.\n\n### Tree Structure\n\n#### Nodes and Hashing\n\nIn many Merkle tree designs, only leaf nodes contain key/value pairs (inner nodes only contain child hashes). To contrast, every node in a Merk tree contains a key and a value, including inner nodes.\n\nEach node contains a \"kv hash\", which is the hash of its key/value pair, in addition to its child hashes. The hash of the node is just the hash of the concatenation of these three hashes:\n\n```\nkv_hash = H(key, value)\nnode_hash = H(kv_hash, left_child_hash, right_child_hash)\n```\n\nNote that the `left_child_hash` and/or `right_child_hash` values may be null since it is possible for the node to have no children or only one child.\n\nIn our implementation, the hash function used is SHA512/256 (SHA512 with output truncated to 256 bits) but this choice is trivially swappable.\n\n#### Database Representation\n\nIn the backing key/value store, nodes are stored using their key/value pair key as the database key, and a binary encoding that contains the fields in the above `Node` structure - minus the `key` field since that is already implied by the database entry.\n\nStoring nodes by key rather than by hash is an important optimization, and is the reason why inner nodes each have a key/value pair. The implication is that reading a key does not require traversing through the tree structure but only requires a single read in the backing key/value store, meaning there is practically no overhead versus using the backing store without a tree structure. Additionally, we can efficiently iterate through nodes in the tree in their in-order traversal just by iterating by key in the backing store (which RocksDB and LevelDB are optimized for).\n\nThis means we lose the \"I\" compared to the IAVL library - immutability. Since now we operate on the tree nodes in-place in the backing store, we don't by default have views of past states of the tree. However, **in** our implementation we replicate this functionality with RocksDB's snapshot and checkpoint features which provide a consistent view of the store at a certain point in history - either ephemerally in memory or persistently on disk.\n\n### Operations\n\nOperating on a Merk tree is optimized for batches - in the real world we will only be updating the tree once per block, applying a batch of many changes from many transactions at the same time.\n\n#### Concurrent Batch Operator\n\nTo mutate the tree, we apply batches of operations, each of which can either be `Put(key, value)` or `Delete(key)`.\n\nBatches of operations are expected to be sorted by key, with every key appearing only once. Our implementation provides an `apply` method which sorts the batch and checks for duplicate keys, and an `apply_unchecked` method which skips the sorting/checking step for performance reasons when the caller has already ensured the batch is sorted.\n\nThe algorithm to apply these operations to the tree is called recursively on each relevant node.\n\n_Simplified pseudocode for the operation algorithm:_\n\n- Given a node and a batch of operations:\n  - Binary search for the current node's key in the batch:\n    - If this node's key is found in the batch at index `i`:\n      - Apply the operation to this node:\n        - If operation is `Put`, update its `value` and `kv_hash`\n        - If the operation is `Delete`, perform a traditional BST node removal\n      - Split the batch into left and right sub-batches (excluding the operation we just applied):\n        - Left batch from batch start to index `i`\n        - Right batch from index `i + 1` to the end of the batch\n    - If this node's key is not found in the batch, but could be inserted at index `i` maintaining sorted order:\n      - Split the batch into left and right sub-batches:\n        - Left batch from batch start to index `i`\n        - Right batch from index `i` to the end of the batch\n  - Recurse:\n    - Apply the left sub-batch to this node's left child\n    - Apply the right sub-batch to this node's right child\n  - Balance:\n    - If after recursing the left and right subtrees are unbalanced (their heights differ by more than 1), perform an AVL tree rotation (possibly more than one)\n  - Recompute node's hash based on hash of its updated children and `kv_hash`, then return\n\nThis batch application of operations can happen concurrently - recursing into the left and right subtrees of a node are two fully independent operations (operations on one subtree will never involve reading or writing to/from any of the nodes on the other subtree). This means we have an _implicit lock_ - we don't need to coordinate with mutexes but only need to wait for both the left side and right side to finish their operations.\n\n### Proofs\n\nMerk was designed with efficient proofs in mind, both for application queries (e.g. a user checking their account balance) and bulk tree replication (a.k.a. \"state syncing\") between untrusted nodes.\n\n#### Structure\n\nMerk proofs are a list of stack-based operators and node data, with 3 possible operators: `Push(node)`, `Parent`, and `Child`. A stream of these operators can be processed by a verifier in order to reconstruct a sparse representation of part of the tree, in a way where the data can be verified against a known root hash.\n\nThe value of `node` in a `Push` operation can be one of three types:\n\n- `Hash(hash)` - The hash of a node\n- `KVHash(hash)` - The key/value hash of a node\n- `KV(key, value)` - The key and value of a node\n\nThis proof format can be encoded in a binary format and has negligible space overhead for efficient transport over the network.\n\n#### Verification\n\nA verifier can process a proof by maintaining a stack of connected tree nodes, and executing the operators in order:\n\n- `Push(node)` - Push some node data onto the stack.\n- `Child` - Pop a value from the stack, `child`. Pop another value from the stack, `parent`. Set `child` as the right child of `parent`, and push the combined result back on the stack.\n- `Parent` - Pop a value from the stack, `parent`. Pop another value from the stack, `child`. Set `child` as the left child of `parent`, and push the combined result back on the stack.\n\nProof verification will fail if e.g. `Child` or `Parent` try to pop a value from the stack but the stack is empty, `Child` or `Parent` try to overwrite an existing child, or the proof does not result in exactly one stack item.\n\nThis proof language can be used to specify any possible set or subset of the tree's data in a way that can be reconstructed efficiently by the verifier. Proofs can contain either an arbitrary set of selected key/value pairs (e.g. in an application query), or contiguous tree chunks (when replicating the tree). After processing an entire proof, the verifier should have derived a root hash which can be compared to the root hash they expect (e.g. the one validators committed to in consensus), and have a set of proven key/value pairs.\n\nNote that this can be computed in a streaming fashion, e.g. while downloading the proof, which makes the required memory for verification very low even for large proofs. However, the verifier cannot tell if the proof is valid until finishing the entire proof, so very large proofs should be broken up into multiple proofs of smaller size.\n\n#### Generation\n\nEfficient proof generation is important since nodes will likely receive a high volume of queries and constantly be serving proofs, essentially providing an API service to end-user application clients, as well as servicing demand for replication when new nodes come onto the network.\n\nNodes can generate proofs for a set of keys by traversing through the tree from the root and building up the required proof branches. Much like the batch operator aglorithm, this algorithm takes a batch of sorted, unique keys as input.\n\n_Simplified pseudocode for proof generation (based on an in-order traversal):_\n\n- Given a node and a batch of keys to include in the proof:\n  - If the batch is empty, append `Push(Hash(node_hash))` to the proof and return\n  - Binary search the for the current node's key in the batch:\n    - If this node's key is found in the batch at index `i`:\n      - Partition the batch into left and right sub-batches at index `i` (excluding index `i`)\n    - If this node's key is not found in the batch, but could be inserted at index `i` maintaining sorted order:\n      - Partition the batch into left and right sub-batches at index `i`\n  - **Recurse left:** If there is a left child:\n    - If the left sub-batch is not empty, query the left child (appending operators to the proof)\n    - If the left sub-batch is empty, append `Push(Hash(left_child_hash))` to the proof\n  - Append proof operator:\n    - If this node's key is in the batch, or if the left sub-batch was not empty and no left child exists, or if the right sub-batch is not empty and no right child exists,or if the left child's right edge queried a non-existent key, or if the right child's left edge queried a non-existent key, append `Push(KV(key, value))` to the proof\n    - Otherwise, append `Push(KVHash(kv_hash))` to the proof\n  - If the left child exists, append `Parent` to the proof\n  - **Recurse right:** If there is a right child:\n    - If the right sub-batch is not empty, query the right child (appending operators to the proof)\n    - If the right sub-batch is empty, append `Push(Hash(left_child_hash))` to the proof\n    - Append `Child` to the proof\n\nSince RocksDB allows concurrent reading from a consistent snapshot/checkpoint, nodes can concurrently generate proofs on all cores to service a higher volume of queries, even if our algorithm isn't designed for concurrency.\n\n#### Binary Format\n\nWe can efficiently encode these proofs by encoding each operator as follows:\n\n```\nPush(Hash(hash)) => 0x01 <20-byte hash>\nPush(KVHash(hash)) => 0x02 <20-byte hash>\nPush(KV(key, value)) => 0x03 <1-byte key length> <n-byte key> <2-byte value length> <n-byte value>\nParent => 0x10\nChild => 0x11\n```\n\nThis results in a compact binary representation, with a very small space overhead (roughly 2 bytes per node in the proof (1 byte for the `Push` operator type flag, and 1 byte for a `Parent` or `Child` operator), plus 3 bytes per key/value pair (1 byte for the key length, and 2 bytes for the value length)).\n\n#### Efficient Chunk Proofs for Replication\n\nAn alternate, optimized proof generation can be used when generating proofs for large contiguous subtrees, e.g. chunks for tree replication. This works by iterating sequentially through keys in the backing store (which is much faster than random lookups).\n\nBased on some early benchmarks, I estimate that typical server hardware should be able to generate this kind of range proof at a rate of hundreds of MB/s, which means the bottleneck for bulk replication will likely be bandwidth rather than CPU. To improve performance further, these proofs can be cached and trivially served by a CDN or a P2P swarm (each node of which can easily verify the chunks they pass around).\n\nDue to the tree structure we already use, streaming the entries in key-order gives us all the nodes to construct complete contiguous subtrees. For instance, in the diagram below, streaming from keys `1` to `7` will give us a complete subtree. This subtree can be verified to be a part of the full tree as long as we know the hash of `4`.\n\n```\n             8\n           /   \\\n        /      ...\n      4\n    /   \\\n  2       6\n / \\     / \\\n1   3   5   7\n```\n\nOur algorithm builds verifiable chunks by first constructing a chunk of the upper levels of the tree, called the _trunk chunk_, plus each subtree below that (each of which is called a _leaf chunk_).\n\nThe number of levels to include in the trunk can be chosen to control the size of the leaf nodes. For example, a tree of height 10 should have approximately 1,023 nodes. If the trunk contains the top 5 levels, the trunk and the 32 resulting leaf nodes will each contain ~31 nodes. We can even prove to the verifier the trunk size was chosen correctly by also including an approximate tree height proof, by including the branch all the way to the leftmost node of the tree (node `1` in the figure) and using this height as our basis to select the number of trunk levels.\n\nAfter the prover builds the trunk by traversing from the root node and making random lookups down to the chosen level, it can generate the leaf nodes extremely efficiently by reading the database keys sequentially as described a few paragraphs above. We can trivially detect when a chunk should end whenever a node at or above the trunk level is encountered (e.g. encountering node `8` signals we have read a complete subtree).\n\nThe generated proofs can be efficiently encoded into the same proof format described above. Verifiers only have the added constraint that none of the data should be abbridged (all nodes contain a key and value, rather than just a hash or kvhash). After first downloading and verifying the trunk, verifiers can also download leaf chunks in parallel and verify that each connects to the trunk by comparing each subtree's root hash.\n\nNote that this algorithm produces proofs with very little memory requirements, plus little overhead added to the sequential read from disk. In a proof-of-concept benchmark, proof generation was measured to be ~750 MB/s on a modern solid-state drive and processor, meaning a 4GB state tree (the size of the Cosmos Hub state at the time of writing) could be fully proven in ~5 seconds (without considering parallelization). In conjunction with the RocksDB checkpoint feature, this process can happen in the background without blocking the node from executing later blocks.\n\n_Pseudocode for the range proof generation algorithm:_\n\n- Given a tree and a range of keys to prove:\n  - Create a stack of keys (initially empty)\n  - **Range iteration:** for every key/value entry within the query range in the backing store:\n    - Append `Push(KV(key, value))` to the proof\n    - If the current node has a left child, append `Parent` to the proof\n    - If the current node has a right child, push the right child's key onto the key stack\n    - If the current node does not have a right child:\n      - While the current node's key is greater than or equal to the key at the top of the key stack, append `Child` to the proof and pop from the key stack\n\nNote that this algorithm produces the proof in a streaming fashion and has very little memory requirements (the only overhead is the key stack, which will be small even for extremely large trees since its length is a maximum of `log N`).\n\n#### Example Proofs\n\nLet's walk through a concrete proof example. Consider the following tree:\n\n```\n       5\n      / \\\n    /     \\\n  2        9\n / \\      /  \\\n1   4    7    11\n   /    / \\   /\n  3    6   8 10\n```\n\n_Small proof:_\n\nFirst, let's create a proof for a small part of the tree. Let's say the user makes a query for keys `1, 2, 3, 4`.\n\nIf we follow our proof generation algorithm, we should get a proof that looks like this:\n\n```\nPush(KV(1, <value of 1>)),\nPush(KV(2, <value of 2>)),\nParent,\nPush(KV(3, <value of 3>)),\nPush(KV(4, <value of 4>)),\nParent,\nChild,\nPush(KVHash(<kv_hash of 5>)),\nParent,\nPush(Hash(<hash of 9>)),\nChild\n```\n\nLet's step through verification to show that this proof works. We'll create a verification stack, which starts out empty, and walk through each operator in the proof, in order:\n\n```\nStack: (empty)\n```\n\nWe will push a key/value pair on the stack, creating a node. However, note that for verification purposes this node will only need to contain the kv_hash which we will compute at this step.\n\n```\nOperator: Push(KV(1, <value of 1>))\n\nStack:\n1\n```\n\n```\nOperator: Push(KV(2, <value of 2>))\n\nStack:\n1\n2\n```\n\nNow we connect nodes 1 and 2, with 2 as the parent.\n\n```\nOperator: Parent\n\nStack:\n  2\n /\n1\n```\n\n```\nOperator: Push(KV(3, <value of 3>))\n\nStack:\n  2\n /\n1\n3\n```\n\n```\nOperator: Push(KV(4, <value of 4>))\n\nStack:\n  2\n /\n1\n3\n4\n```\n\n```\nOperator: Parent\n\nStack:\n  2\n /\n1\n  4\n /\n3\n```\n\nNow connect these two graphs with 4 as the child of 2.\n\n```\nOperator: Child\n\nStack:\n  2\n / \\\n1   4\n   /\n  3\n```\n\nSince the user isn't querying the data from node 5, we only need its kv_hash.\n\n```\nOperator: Push(KVHash(<kv_hash of 5>))\n\nStack:\n  2\n / \\\n1   4\n   /\n  3\n5\n```\n\n```\nOperator: Parent\n\nStack:\n    5\n   /\n  2\n / \\\n1   4\n   /\n  3\n```\n\nWe only need the hash of node 9.\n\n```\nOperator: Push(Hash(<hash of 9>))\n\nStack:\n    5\n   /\n  2\n / \\\n1   4\n   /\n  3\n9\n```\n\n```\nOperator: Child\n\nStack:\n    5\n   / \\\n  2   9\n / \\\n1   4\n   /\n  3\n```\n\nNow after going through all these steps, we have sufficient knowlege of the tree's structure and data to compute node hashes in order to verify. At the end, we will have computed a hash for node 5 (the root), and we verify by comparing this hash to the one we expected.\n"
  },
  {
    "path": "rustfmt.toml",
    "content": "comment_width = 80\nwrap_comments = true\n\n"
  },
  {
    "path": "scripts/pgo.sh",
    "content": "#!/bin/bash\n\ndefault_host_triple=\"\"\ndefault_toolchain=\"\"\nIFS=\" = \"\nwhile read -r name value\ndo\n  value=\"${value//\\\"/}\"\n  if [ \"${name}\" == \"default_host_triple\" ]; then\n    default_host_triple=\"${value}\"\n  elif [ \"${name}\" == \"default_toolchain\" ]; then\n    default_toolchain=\"${value}\"\n  fi\ndone < ~/.rustup/settings.toml\n\necho \"default_host_triple=${default_host_triple}\"\necho \"default_toolchain=${default_toolchain}\"\n\nrustup component add llvm-tools-preview\n\nrm -rf /tmp/merk-pgo\nRUSTFLAGS=\"-Cprofile-generate=/tmp/merk-pgo\" cargo bench rand_rocks\n~/.rustup/toolchains/${default_toolchain}/lib/rustlib/${default_host_triple}/bin/llvm-profdata merge -o /tmp/merk-pgo/merged.profdata /tmp/merk-pgo\nRUSTFLAGS=\"-Cprofile-use=/tmp/merk-pgo/merged.profdata\" cargo bench\n"
  },
  {
    "path": "src/error.rs",
    "content": "pub use thiserror::Error;\n\n#[derive(Error, Debug)]\npub enum Error {\n    #[error(\"Attach Error: {0}\")]\n    Attach(String),\n    #[error(\"Batch Key Error: {0}\")]\n    BatchKey(String),\n    #[error(\"Bound Error: {0}\")]\n    Bound(String),\n    #[error(\"Chunk Processing Error: {0}\")]\n    ChunkProcessing(String),\n    #[error(transparent)]\n    Ed(#[from] ed::Error),\n    #[error(\"Fetch Error: {0}\")]\n    Fetch(String),\n    #[error(\"Proof did not match expected hash\\n\\tExpected: {0:?}\\n\\tActual: {1:?}\")]\n    HashMismatch([u8; 32], [u8; 32]),\n    #[error(\"Index OoB Error: {0}\")]\n    IndexOutOfBounds(String),\n    #[error(\"Integer conversion error: {0}\")]\n    IntegerConversionError(#[from] std::num::TryFromIntError),\n    #[error(transparent)]\n    IO(#[from] std::io::Error),\n    #[error(\"Tried to delete non-existent key {0:?}\")]\n    KeyDelete(Vec<u8>),\n    #[error(\"Key Error: {0}\")]\n    Key(String),\n    #[error(\"Key not found: {0}\")]\n    KeyNotFound(String),\n    #[error(\"Proof is missing data for query\")]\n    MissingData,\n    #[error(\"Path Error: {0}\")]\n    Path(String),\n    #[error(\"Proof Error: {0}\")]\n    Proof(String),\n    #[cfg(feature = \"full\")]\n    #[error(transparent)]\n    RocksDB(#[from] rocksdb::Error),\n    #[error(\"Stack Underflow\")]\n    StackUnderflow,\n    #[error(\"Tree Error: {0}\")]\n    Tree(String),\n    #[error(\"Unexpected Node Error: {0}\")]\n    UnexpectedNode(String),\n    #[error(\"Unknown Error\")]\n    Unknown,\n    #[error(\"Version Error: {0}\")]\n    Version(String),\n}\n\npub type Result<T> = std::result::Result<T, Error>;\n"
  },
  {
    "path": "src/lib.rs",
    "content": "//! A high-performance Merkle key/value store.\n//!\n//! Merk is a crypto key/value store - more specifically, it's a Merkle AVL tree\n//! built on top of RocksDB (Facebook's fork of LevelDB).\n//!\n//! Its priorities are performance and reliability. While Merk was designed to\n//! be the state database for blockchains, it can also be used anywhere an\n//! auditable key/value store is needed.\n\n#![feature(trivial_bounds)]\n\n#[global_allocator]\n#[cfg(feature = \"jemallocator\")]\nstatic ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;\n\n#[cfg(feature = \"full\")]\npub use rocksdb;\n\n/// Error and Result types.\nmod error;\n/// The top-level store API.\n#[cfg(feature = \"full\")]\nmod merk;\n/// Provides a container type that allows temporarily taking ownership of a\n/// value.\n// TODO: move this into its own crate\npub mod owner;\n/// Algorithms for generating and verifying Merkle proofs.\npub mod proofs;\n\n/// Various helpers useful for tests or benchmarks.\n#[cfg(feature = \"full\")]\npub mod test_utils;\n/// The core tree data structure.\npub mod tree;\n\n#[cfg(feature = \"full\")]\npub use crate::merk::{chunks, restore, snapshot, Merk, MerkSource, Snapshot};\n\npub use error::{Error, Result};\npub use tree::{Batch, BatchEntry, Hash, Op, PanicSource, HASH_LENGTH};\n\n#[allow(deprecated)]\npub use proofs::query::verify_query;\n\npub use proofs::query::verify;\n"
  },
  {
    "path": "src/merk/chunks.rs",
    "content": "//! Provides `ChunkProducer`, which creates chunk proofs for full replication of\n//! a Merk.\n\nuse super::Merk;\nuse crate::proofs::{chunk::get_next_chunk, Node, Op};\n\nuse crate::{Error, Result};\nuse ed::Encode;\nuse rocksdb::DBRawIterator;\n\n/// A `ChunkProducer` allows the creation of chunk proofs, used for trustlessly\n/// replicating entire Merk trees.\n///\n/// Chunks can be generated on the fly in a random order, or iterated in order\n/// for slightly better performance.\npub struct ChunkProducer<'a> {\n    trunk: Vec<Op>,\n    chunk_boundaries: Vec<Vec<u8>>,\n    raw_iter: DBRawIterator<'a>,\n    index: usize,\n}\n\nimpl<'a> ChunkProducer<'a> {\n    /// Creates a new `ChunkProducer` for the given `Merk` instance. In the\n    /// constructor, the first chunk (the \"trunk\") will be created.\n    pub fn new(merk: &'a Merk) -> Result<Self> {\n        let (trunk, has_more) = merk.walk(|maybe_walker| match maybe_walker {\n            Some(mut walker) => walker.create_trunk_proof(),\n            None => Ok((vec![], false)),\n        })?;\n\n        let chunk_boundaries = if has_more {\n            trunk\n                .iter()\n                .filter_map(|op| match op {\n                    Op::Push(Node::KV(key, _)) => Some(key.clone()),\n                    _ => None,\n                })\n                .collect()\n        } else {\n            vec![]\n        };\n\n        let mut raw_iter = merk.raw_iter();\n        raw_iter.seek_to_first();\n\n        Ok(ChunkProducer {\n            trunk,\n            chunk_boundaries,\n            raw_iter,\n            index: 0,\n        })\n    }\n\n    /// Gets the chunk with the given index. Errors if the index is out of\n    /// bounds or the tree is empty - the number of chunks can be checked by\n    /// calling `producer.len()`.\n    pub fn chunk(&mut self, index: usize) -> Result<Vec<u8>> {\n        if index >= self.len() {\n            return Err(Error::IndexOutOfBounds(\"Chunk index out-of-bounds\".into()));\n        }\n\n        self.index = index;\n\n        if index == 0 || index == 1 {\n            self.raw_iter.seek_to_first();\n        } else {\n            let preceding_key = self.chunk_boundaries.get(index - 2).unwrap();\n            self.raw_iter.seek(preceding_key);\n            self.raw_iter.next();\n        }\n\n        self.next_chunk()\n    }\n\n    /// Returns the total number of chunks for the underlying Merk tree.\n    #[allow(clippy::len_without_is_empty)]\n    pub fn len(&self) -> usize {\n        let boundaries_len = self.chunk_boundaries.len();\n        if boundaries_len == 0 {\n            1\n        } else {\n            boundaries_len + 2\n        }\n    }\n\n    /// Gets the next chunk based on the `ChunkProducer`'s internal index state.\n    /// This is mostly useful for letting `ChunkIter` yield the chunks in order,\n    /// optimizing throughput compared to random access.\n    fn next_chunk(&mut self) -> Result<Vec<u8>> {\n        if self.index == 0 {\n            if self.trunk.is_empty() {\n                return Err(Error::Fetch(\n                    \"Attempted to fetch chunk on empty tree\".into(),\n                ));\n            }\n            self.index += 1;\n            return Ok(self.trunk.encode()?);\n        }\n\n        assert!(self.index < self.len(), \"Called next_chunk after end\");\n\n        let end_key = self.chunk_boundaries.get(self.index - 1);\n        let end_key_slice = end_key.as_ref().map(|k| k.as_slice());\n\n        self.index += 1;\n\n        let chunk = get_next_chunk(&mut self.raw_iter, end_key_slice)?;\n        Ok(chunk.encode()?)\n    }\n}\n\nimpl<'a> IntoIterator for ChunkProducer<'a> {\n    type IntoIter = ChunkIter<'a>;\n    type Item = <ChunkIter<'a> as Iterator>::Item;\n\n    fn into_iter(self) -> Self::IntoIter {\n        ChunkIter(self)\n    }\n}\n\n/// A `ChunkIter` iterates through all the chunks for the underlying `Merk`\n/// instance in order (the first chunk is the \"trunk\" chunk). Yields `None`\n/// after all chunks have been yielded.\npub struct ChunkIter<'a>(ChunkProducer<'a>);\n\nimpl<'a> Iterator for ChunkIter<'a> {\n    type Item = Result<Vec<u8>>;\n\n    fn size_hint(&self) -> (usize, Option<usize>) {\n        (self.0.len(), Some(self.0.len()))\n    }\n\n    fn next(&mut self) -> Option<Self::Item> {\n        if self.0.index >= self.0.len() {\n            None\n        } else {\n            Some(self.0.next_chunk())\n        }\n    }\n}\n\nimpl Merk {\n    /// Creates a `ChunkProducer` which can return chunk proofs for replicating\n    /// the entire Merk tree.\n    pub fn chunks(&self) -> Result<ChunkProducer> {\n        ChunkProducer::new(self)\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::{\n        proofs::{\n            chunk::{verify_leaf, verify_trunk},\n            Decoder,\n        },\n        test_utils::*,\n    };\n\n    #[test]\n    fn len_small() {\n        let mut merk = TempMerk::new().unwrap();\n        let batch = make_batch_seq(1..256);\n        merk.apply(batch.as_slice(), &[]).unwrap();\n\n        let chunks = merk.chunks().unwrap();\n        assert_eq!(chunks.len(), 1);\n        assert_eq!(chunks.into_iter().size_hint().0, 1);\n    }\n\n    #[test]\n    fn len_big() {\n        let mut merk = TempMerk::new().unwrap();\n        let batch = make_batch_seq(1..10_000);\n        merk.apply(batch.as_slice(), &[]).unwrap();\n\n        let chunks = merk.chunks().unwrap();\n        assert_eq!(chunks.len(), 129);\n        assert_eq!(chunks.into_iter().size_hint().0, 129);\n    }\n\n    #[test]\n    fn generate_and_verify_chunks() -> Result<()> {\n        let mut merk = TempMerk::new().unwrap();\n        let batch = make_batch_seq(1..10_000);\n        merk.apply(batch.as_slice(), &[]).unwrap();\n\n        let mut chunks = merk.chunks().unwrap().into_iter().map(Result::unwrap);\n\n        let chunk = chunks.next().unwrap();\n        let ops = Decoder::new(chunk.as_slice());\n        let (trunk, height) = verify_trunk(ops).unwrap();\n        assert_eq!(height, 14);\n        assert_eq!(trunk.hash()?, merk.root_hash());\n\n        assert_eq!(trunk.layer(7).count(), 128);\n\n        for (chunk, node) in chunks.zip(trunk.layer(height / 2)) {\n            let ops = Decoder::new(chunk.as_slice());\n            verify_leaf(ops, node.hash()?).unwrap();\n        }\n        Ok(())\n    }\n\n    #[test]\n    fn chunks_from_reopen() {\n        let time = std::time::SystemTime::now()\n            .duration_since(std::time::SystemTime::UNIX_EPOCH)\n            .unwrap()\n            .as_nanos();\n        let path = format!(\"chunks_from_reopen_{time}.db\");\n\n        let original_chunks = {\n            let mut merk = Merk::open(&path).unwrap();\n            let batch = make_batch_seq(1..10);\n            merk.apply(batch.as_slice(), &[]).unwrap();\n\n            merk.chunks()\n                .unwrap()\n                .into_iter()\n                .map(Result::unwrap)\n                .collect::<Vec<_>>()\n                .into_iter()\n        };\n\n        let merk = TempMerk::open(path).unwrap();\n        let reopen_chunks = merk.chunks().unwrap().into_iter().map(Result::unwrap);\n\n        for (original, checkpoint) in original_chunks.zip(reopen_chunks) {\n            assert_eq!(original.len(), checkpoint.len());\n        }\n    }\n\n    #[test]\n    fn chunks_from_checkpoint() {\n        let mut merk = TempMerk::new().unwrap();\n        let batch = make_batch_seq(1..10);\n        merk.apply(batch.as_slice(), &[]).unwrap();\n\n        let path: std::path::PathBuf = \"generate_and_verify_chunks_from_checkpoint.db\".into();\n        if path.exists() {\n            std::fs::remove_dir_all(&path).unwrap();\n        }\n        let checkpoint = merk.checkpoint(&path).unwrap();\n\n        let original_chunks = merk.chunks().unwrap().into_iter().map(Result::unwrap);\n        let checkpoint_chunks = checkpoint.chunks().unwrap().into_iter().map(Result::unwrap);\n\n        for (original, checkpoint) in original_chunks.zip(checkpoint_chunks) {\n            assert_eq!(original.len(), checkpoint.len());\n        }\n\n        std::fs::remove_dir_all(&path).unwrap();\n    }\n\n    #[test]\n    fn random_access_chunks() {\n        let mut merk = TempMerk::new().unwrap();\n        let batch = make_batch_seq(1..111);\n        merk.apply(batch.as_slice(), &[]).unwrap();\n\n        let chunks = merk\n            .chunks()\n            .unwrap()\n            .into_iter()\n            .map(Result::unwrap)\n            .collect::<Vec<_>>();\n\n        let mut producer = merk.chunks().unwrap();\n        for i in 0..chunks.len() * 2 {\n            let index = i % chunks.len();\n            assert_eq!(producer.chunk(index).unwrap(), chunks[index]);\n        }\n    }\n\n    #[test]\n    #[should_panic(expected = \"Attempted to fetch chunk on empty tree\")]\n    fn test_chunk_empty() {\n        let merk = TempMerk::new().unwrap();\n\n        let _chunks = merk\n            .chunks()\n            .unwrap()\n            .into_iter()\n            .map(Result::unwrap)\n            .collect::<Vec<_>>();\n    }\n\n    #[test]\n    #[should_panic(expected = \"Chunk index out-of-bounds\")]\n    fn test_chunk_index_oob() {\n        let mut merk = TempMerk::new().unwrap();\n        let batch = make_batch_seq(1..42);\n        merk.apply(batch.as_slice(), &[]).unwrap();\n\n        let mut producer = merk.chunks().unwrap();\n        let _chunk = producer.chunk(50000).unwrap();\n    }\n\n    #[test]\n    fn test_chunk_index_gt_1_access() {\n        let mut merk = TempMerk::new().unwrap();\n        let batch = make_batch_seq(1..513);\n        merk.apply(batch.as_slice(), &[]).unwrap();\n\n        let mut producer = merk.chunks().unwrap();\n        println!(\"length: {}\", producer.len());\n        let chunk = producer.chunk(2).unwrap();\n        assert_eq!(\n            chunk,\n            vec![\n                3, 0, 8, 0, 0, 0, 0, 0, 0, 0, 18, 0, 60, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 3, 0, 8, 0, 0, 0, 0, 0, 0, 0, 19, 0, 60, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 16, 3, 0, 8, 0, 0, 0, 0, 0, 0, 0, 20, 0,\n                60, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 17, 3, 0, 8, 0, 0, 0,\n                0, 0, 0, 0, 21, 0, 60, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 16,\n                3, 0, 8, 0, 0, 0, 0, 0, 0, 0, 22, 0, 60, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 3, 0, 8, 0, 0, 0, 0, 0, 0, 0, 23, 0, 60, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 16, 3, 0, 8, 0, 0, 0, 0, 0, 0, 0, 24, 0,\n                60, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 17, 17, 3, 0, 8, 0, 0,\n                0, 0, 0, 0, 0, 25, 0, 60, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 16, 3, 0, 8, 0, 0, 0, 0, 0, 0, 0, 26, 0, 60, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 3, 0, 8, 0, 0, 0, 0, 0, 0, 0, 27, 0, 60, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 16, 3, 0, 8, 0, 0, 0, 0, 0, 0, 0,\n                28, 0, 60, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 17, 3, 0, 8,\n                0, 0, 0, 0, 0, 0, 0, 29, 0, 60, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 16, 3, 0, 8, 0, 0, 0, 0, 0, 0, 0, 30, 0, 60, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 3, 0, 8, 0, 0, 0, 0, 0, 0, 0, 31, 0, 60, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 16, 3, 0, 8, 0, 0, 0, 0, 0,\n                0, 0, 32, 0, 60, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 17, 17,\n                17\n            ]\n        );\n    }\n\n    #[test]\n    #[should_panic(expected = \"Called next_chunk after end\")]\n    fn test_next_chunk_index_oob() {\n        let mut merk = TempMerk::new().unwrap();\n        let batch = make_batch_seq(1..42);\n        merk.apply(batch.as_slice(), &[]).unwrap();\n\n        let mut producer = merk.chunks().unwrap();\n        let _chunk1 = producer.next_chunk();\n        let _chunk2 = producer.next_chunk();\n    }\n}\n"
  },
  {
    "path": "src/merk/mod.rs",
    "content": "pub mod chunks;\npub mod restore;\npub mod snapshot;\n\nuse std::cmp::Ordering;\nuse std::collections::LinkedList;\nuse std::path::{Path, PathBuf};\nuse std::sync::RwLock;\n\nuse rocksdb::DB;\nuse rocksdb::{checkpoint::Checkpoint, ColumnFamilyDescriptor, WriteBatch};\n\nuse crate::error::{Error, Result};\nuse crate::proofs::{encode_into, query::QueryItem};\nuse crate::tree::{Batch, Commit, Fetch, GetResult, Hash, Op, RefWalker, Tree, Walker, NULL_HASH};\n\npub use self::snapshot::Snapshot;\n\nconst ROOT_KEY_KEY: &[u8] = b\"root\";\nconst FORMAT_VERSION_KEY: &[u8] = b\"format\";\nconst AUX_CF_NAME: &str = \"aux\";\nconst INTERNAL_CF_NAME: &str = \"internal\";\n\nconst FORMAT_VERSION: u64 = 1;\n\nfn column_families() -> Vec<ColumnFamilyDescriptor> {\n    vec![\n        // TODO: clone opts or take args\n        ColumnFamilyDescriptor::new(AUX_CF_NAME, Merk::default_db_opts()),\n        ColumnFamilyDescriptor::new(INTERNAL_CF_NAME, Merk::default_db_opts()),\n    ]\n}\n\n/// A handle to a Merkle key/value store backed by RocksDB.\npub struct Merk {\n    pub(crate) tree: RwLock<Option<Tree>>,\n    pub(crate) db: rocksdb::DB,\n    pub(crate) path: PathBuf,\n}\n\npub type UseTreeMutResult = Result<Vec<(Vec<u8>, Option<Vec<u8>>)>>;\n\nimpl Merk {\n    /// Opens a store with the specified file path. If no store exists at that\n    /// path, one will be created.\n    pub fn open<P: AsRef<Path>>(path: P) -> Result<Merk> {\n        let db_opts = Merk::default_db_opts();\n        Merk::open_opt(path, db_opts)\n    }\n\n    pub fn open_readonly<P: AsRef<Path>>(path: P) -> Result<Merk> {\n        let db_opts = Merk::default_db_opts();\n\n        let mut path_buf = PathBuf::new();\n        path_buf.push(path);\n        let db = rocksdb::DB::open_cf_descriptors_read_only(\n            &db_opts,\n            &path_buf,\n            column_families(),\n            false,\n        )?;\n\n        let format_version = load_format_version(&db)?;\n        if format_version != FORMAT_VERSION {\n            return Err(Error::Version(format!(\n                \"Format version mismatch: expected {}, found {}\",\n                FORMAT_VERSION, format_version,\n            )));\n        }\n\n        Ok(Merk {\n            tree: RwLock::new(load_root(&db)?),\n            db,\n            path: path_buf,\n        })\n    }\n\n    /// Opens a store with the specified file path and the given options. If no\n    /// store exists at that path, one will be created.\n    pub fn open_opt<P>(path: P, db_opts: rocksdb::Options) -> Result<Merk>\n    where\n        P: AsRef<Path>,\n    {\n        let mut path_buf = PathBuf::new();\n        path_buf.push(path);\n\n        let mut db = rocksdb::DB::open_cf_descriptors(&db_opts, &path_buf, column_families())?;\n        let format_version = load_format_version(&db)?;\n\n        if has_root(&db)? {\n            if format_version == 0 {\n                log::info!(\"Migrating store from version 0 to {}...\", FORMAT_VERSION);\n\n                drop(db);\n                Merk::migrate_from_v0(&path_buf)?;\n                db = rocksdb::DB::open_cf_descriptors(&db_opts, &path_buf, column_families())?;\n            } else if format_version != FORMAT_VERSION {\n                return Err(Error::Version(format!(\n                    \"Unknown format version: expected <= {}, found {}\",\n                    FORMAT_VERSION, format_version,\n                )));\n            }\n        }\n\n        Ok(Merk {\n            tree: RwLock::new(load_root(&db)?),\n            db,\n            path: path_buf,\n        })\n    }\n\n    pub fn open_and_get_aux<P>(path: P, key: &[u8]) -> Result<Option<Vec<u8>>>\n    where\n        P: AsRef<Path>,\n    {\n        let db_opts = Merk::default_db_opts();\n        let db =\n            rocksdb::DB::open_cf_descriptors_read_only(&db_opts, path, column_families(), false)?;\n        let aux_cf = db.cf_handle(AUX_CF_NAME).unwrap();\n        Ok(db.get_cf(aux_cf, key)?)\n    }\n\n    pub fn default_db_opts() -> rocksdb::Options {\n        let mut opts = rocksdb::Options::default();\n        opts.create_if_missing(true);\n        opts.create_missing_column_families(true);\n        opts.set_atomic_flush(true);\n\n        // TODO: tune\n        opts.increase_parallelism(num_cpus::get() as i32);\n        // opts.set_advise_random_on_open(false);\n        opts.set_allow_mmap_writes(true);\n        opts.set_allow_mmap_reads(true);\n\n        opts.set_max_log_file_size(1_000_000);\n        opts.set_recycle_log_file_num(5);\n        opts.set_keep_log_file_num(5);\n        opts.set_log_level(rocksdb::LogLevel::Warn);\n\n        opts\n    }\n\n    /// Gets an auxiliary value.\n    pub fn get_aux(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {\n        let aux_cf = self.db.cf_handle(AUX_CF_NAME);\n        Ok(self.db.get_cf(aux_cf.unwrap(), key)?)\n    }\n\n    /// Gets a value for the given key. If the key is not found, `None` is\n    /// returned.\n    ///\n    /// Note that this is essentially the same as a normal RocksDB `get`, so\n    /// should be a fast operation and has almost no tree overhead.\n    pub fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {\n        self.use_tree(|maybe_tree| {\n            maybe_tree\n                .and_then(|tree| get(tree, self.source(), key).transpose())\n                .transpose()\n        })\n    }\n\n    /// Returns the root hash of the tree (a digest for the entire store which\n    /// proofs can be checked against). If the tree is empty, returns the null\n    /// hash (zero-filled).\n    pub fn root_hash(&self) -> Hash {\n        self.use_tree(root_hash)\n    }\n\n    /// Applies a batch of operations (puts and deletes) to the tree.\n    ///\n    /// This will fail if the keys in `batch` are not sorted and unique. This\n    /// check creates some overhead, so if you are sure your batch is sorted and\n    /// unique you can use the unsafe `apply_unchecked` for a small performance\n    /// gain.\n    ///\n    /// # Example\n    /// ```\n    /// # let mut store = merk::test_utils::TempMerk::new().unwrap();\n    /// # store.apply(&[(vec![4,5,6], Op::Put(vec![0]))], &[]).unwrap();\n    ///\n    /// use merk::Op;\n    ///\n    /// let batch = &[\n    ///     (vec![1, 2, 3], Op::Put(vec![4, 5, 6])), // puts value [4,5,6] to key [1,2,3]\n    ///     (vec![4, 5, 6], Op::Delete) // deletes key [4,5,6]\n    /// ];\n    /// store.apply(batch, &[]).unwrap();\n    /// ```\n    pub fn apply(&mut self, batch: &Batch, aux: &Batch) -> Result<()> {\n        // ensure keys in batch are sorted and unique\n        let mut maybe_prev_key: Option<Vec<u8>> = None;\n        for (key, _) in batch.iter() {\n            if let Some(prev_key) = maybe_prev_key {\n                match prev_key.cmp(key) {\n                    Ordering::Greater => {\n                        return Err(Error::BatchKey(\"Keys in batch must be sorted\".into()));\n                    }\n                    Ordering::Equal => {\n                        return Err(Error::BatchKey(\"Keys in batch must be unique\".into()));\n                    }\n                    _ => (),\n                }\n            }\n            maybe_prev_key = Some(key.to_vec());\n        }\n\n        unsafe { self.apply_unchecked(batch, aux) }\n    }\n\n    /// Applies a batch of operations (puts and deletes) to the tree.\n    ///\n    /// # Safety\n    /// This is unsafe because the keys in `batch` must be sorted and unique -\n    /// if they are not, there will be undefined behavior. For a safe version of\n    /// this method which checks to ensure the batch is sorted and unique, see\n    /// `apply`.\n    ///\n    /// # Example\n    /// ```\n    /// # let mut store = merk::test_utils::TempMerk::new().unwrap();\n    /// # store.apply(&[(vec![4,5,6], Op::Put(vec![0]))], &[]).unwrap();\n    ///\n    /// use merk::Op;\n    ///\n    /// let batch = &[\n    ///     (vec![1, 2, 3], Op::Put(vec![4, 5, 6])), // puts value [4,5,6] to key [1,2,3]\n    ///     (vec![4, 5, 6], Op::Delete) // deletes key [4,5,6]\n    /// ];\n    /// unsafe { store.apply_unchecked(batch, &[]).unwrap() };\n    /// ```\n    pub unsafe fn apply_unchecked(&mut self, batch: &Batch, aux: &Batch) -> Result<()> {\n        let mut tree = self.tree.write().unwrap();\n        let maybe_walker = tree.take().map(|tree| Walker::new(tree, self.source()));\n\n        let (maybe_tree, deleted_keys) = Walker::apply_to(maybe_walker, batch, self.source())?;\n        *tree = maybe_tree;\n        drop(tree);\n\n        // commit changes to db\n        self.commit(deleted_keys, aux)\n    }\n\n    /// Closes the store and deletes all data from disk.\n    pub fn destroy(self) -> Result<()> {\n        let opts = Merk::default_db_opts();\n        let path = self.path.clone();\n        drop(self);\n        rocksdb::DB::destroy(&opts, path)?;\n        Ok(())\n    }\n\n    /// Completely rebuilds the tree, keeping all the same stored keys and\n    /// values.\n    pub fn repair(self) -> Result<Self> {\n        use rocksdb::IteratorMode;\n\n        let path = self.path.clone();\n\n        let create_path = |suffix| {\n            let mut tmp_path = path.clone();\n            let tmp_file_name =\n                format!(\"{}-{}\", path.file_name().unwrap().to_str().unwrap(), suffix);\n            tmp_path.set_file_name(tmp_file_name);\n            tmp_path\n        };\n\n        let tmp_path = create_path(\"repair1\");\n        let tmp = Merk::open(&tmp_path)?;\n        tmp.destroy()?;\n\n        // TODO: split up batch\n        let mut node = Tree::new(vec![], vec![])?;\n        let batch: Vec<_> = self\n            .db\n            .iterator(IteratorMode::Start)\n            .map(|entry| {\n                let (key, node_bytes) = entry.unwrap(); // TODO\n                node.decode_into(vec![], &node_bytes);\n                (key.to_vec(), Op::Put(node.value().to_vec()))\n            })\n            .collect();\n\n        let aux_cf = self.db.cf_handle(AUX_CF_NAME).unwrap();\n        let aux: Vec<_> = self\n            .db\n            .iterator_cf(aux_cf, IteratorMode::Start)\n            .map(|entry| {\n                let (key, value) = entry.unwrap(); // TODO\n                (key.to_vec(), Op::Put(value.to_vec()))\n            })\n            .collect();\n\n        drop(self);\n\n        let mut tmp = Self::open(&tmp_path)?;\n        tmp.apply(&batch, &aux)?;\n        drop(tmp);\n\n        let tmp_path2 = create_path(\"repair2\");\n        std::fs::rename(&path, &tmp_path2)?;\n        std::fs::rename(&tmp_path, &path)?;\n        std::fs::remove_dir_all(&tmp_path2)?;\n\n        Self::open(path)\n    }\n\n    pub fn migrate_from_v0<P: AsRef<Path>>(path: P) -> Result<()> {\n        let path = path.as_ref().to_path_buf();\n        let db =\n            rocksdb::DB::open_cf_descriptors(&Merk::default_db_opts(), path, column_families())?;\n\n        let mut iter = db.raw_iterator();\n        iter.seek_to_first();\n\n        while iter.valid() {\n            let key = iter.key().unwrap();\n            let mut value = iter.value().unwrap();\n\n            let node = Tree::decode_v0(&mut value)?;\n            let new_value = node.encode();\n            db.put(key, new_value.as_slice())?;\n\n            iter.next();\n        }\n\n        db.put_cf(\n            db.cf_handle(INTERNAL_CF_NAME).unwrap(),\n            FORMAT_VERSION_KEY,\n            FORMAT_VERSION.to_be_bytes(),\n        )?;\n\n        Ok(())\n    }\n\n    /// Creates a Merkle proof for the list of queried keys. For each key in the\n    /// query, if the key is found in the store then the value will be proven to\n    /// be in the tree. For each key in the query that does not exist in the\n    /// tree, its absence will be proven by including boundary keys.\n    ///\n    /// The proof returned is in an encoded format which can be verified with\n    /// `merk::verify`.\n    pub fn prove<Q, I>(&self, query: I) -> Result<Vec<u8>>\n    where\n        Q: Into<QueryItem>,\n        I: IntoIterator<Item = Q>,\n    {\n        self.use_tree_mut(move |maybe_tree| prove(maybe_tree, self.source(), query))\n    }\n\n    pub fn flush(&self) -> Result<()> {\n        Ok(self.db.flush()?)\n    }\n\n    pub fn commit(&mut self, deleted_keys: LinkedList<Vec<u8>>, aux: &Batch) -> Result<()> {\n        let internal_cf = self.db.cf_handle(INTERNAL_CF_NAME).unwrap();\n        let aux_cf = self.db.cf_handle(AUX_CF_NAME).unwrap();\n\n        let mut batch = rocksdb::WriteBatch::default();\n        let mut to_batch = self.use_tree_mut(|maybe_tree| -> UseTreeMutResult {\n            // TODO: concurrent commit\n            if let Some(tree) = maybe_tree {\n                // TODO: configurable committer\n                let mut committer = MerkCommitter::new(tree.height(), 21);\n                tree.commit(&mut committer)?;\n\n                // update pointer to root node\n                batch.put_cf(internal_cf, ROOT_KEY_KEY, tree.key());\n\n                Ok(committer.batch)\n            } else {\n                // empty tree, delete pointer to root\n                batch.delete_cf(internal_cf, ROOT_KEY_KEY);\n\n                Ok(vec![])\n            }\n        })?;\n\n        // TODO: move this to MerkCommitter impl?\n        for key in deleted_keys {\n            to_batch.push((key, None));\n        }\n        to_batch.sort_by(|a, b| a.0.cmp(&b.0));\n        for (key, maybe_value) in to_batch {\n            if let Some(value) = maybe_value {\n                batch.put(key, value);\n            } else {\n                batch.delete(key);\n            }\n        }\n\n        for (key, value) in aux {\n            match value {\n                Op::Put(value) => batch.put_cf(aux_cf, key, value),\n                Op::Delete => batch.delete_cf(aux_cf, key),\n            };\n        }\n\n        // update format version\n        // TODO: shouldn't need a write per commit\n        batch.put_cf(\n            internal_cf,\n            FORMAT_VERSION_KEY,\n            FORMAT_VERSION.to_be_bytes(),\n        );\n\n        // write to db\n        self.write(batch)?;\n\n        Ok(())\n    }\n\n    pub fn walk<T>(&self, f: impl FnOnce(Option<RefWalker<MerkSource>>) -> T) -> T {\n        let mut tree = self.tree.write().unwrap();\n        let maybe_walker = tree\n            .as_mut()\n            .map(|tree| RefWalker::new(tree, self.source()));\n        f(maybe_walker)\n    }\n\n    pub fn raw_iter(&self) -> rocksdb::DBRawIterator {\n        self.db.raw_iterator()\n    }\n\n    pub fn checkpoint<P: AsRef<Path>>(&self, path: P) -> Result<Merk> {\n        Checkpoint::new(&self.db)?.create_checkpoint(&path)?;\n        Merk::open(path)\n    }\n\n    pub fn snapshot(&self) -> Result<Snapshot> {\n        Ok(Snapshot::new(self.db.snapshot(), load_root(&self.db)?))\n    }\n\n    pub fn db(&self) -> &DB {\n        &self.db\n    }\n\n    fn source(&self) -> MerkSource {\n        MerkSource { db: &self.db }\n    }\n\n    fn use_tree<T>(&self, f: impl FnOnce(Option<&Tree>) -> T) -> T {\n        let tree = self.tree.read().unwrap();\n        f(tree.as_ref())\n    }\n\n    fn use_tree_mut<T>(&self, f: impl FnOnce(Option<&mut Tree>) -> T) -> T {\n        let mut tree_slot = self.tree.write().unwrap();\n        let mut tree = tree_slot.take();\n        let res = f(tree.as_mut());\n        *tree_slot = tree;\n        res\n    }\n\n    pub(crate) fn write(&mut self, batch: WriteBatch) -> Result<()> {\n        let mut opts = rocksdb::WriteOptions::default();\n        opts.set_sync(false);\n        // TODO: disable WAL once we can ensure consistency with transactions\n        self.db.write_opt(batch, &opts)?;\n        Ok(())\n    }\n\n    pub(crate) fn set_root_key(&mut self, key: Vec<u8>) -> Result<()> {\n        let internal_cf = self.db.cf_handle(INTERNAL_CF_NAME).unwrap();\n        let mut batch = WriteBatch::default();\n        batch.put_cf(internal_cf, ROOT_KEY_KEY, key);\n        self.write(batch)\n    }\n\n    pub(crate) fn fetch_node(&self, key: &[u8]) -> Result<Option<Tree>> {\n        self.source().fetch_by_key(key)\n    }\n\n    pub(crate) fn load_root(&mut self) -> Result<()> {\n        let root = load_root(&self.db)?;\n        *self.tree.write().unwrap() = root;\n        Ok(())\n    }\n}\n\n#[derive(Clone)]\npub struct MerkSource<'a> {\n    db: &'a rocksdb::DB,\n}\n\nimpl<'a> Fetch for MerkSource<'a> {\n    fn fetch_by_key(&self, key: &[u8]) -> Result<Option<Tree>> {\n        Ok(self\n            .db\n            .get_pinned(key)?\n            .map(|bytes| Tree::decode(key.to_vec(), &bytes)))\n    }\n}\n\nstruct MerkCommitter {\n    batch: Vec<(Vec<u8>, Option<Vec<u8>>)>,\n    height: u8,\n    levels: u8,\n}\n\nimpl MerkCommitter {\n    fn new(height: u8, levels: u8) -> Self {\n        MerkCommitter {\n            batch: Vec::with_capacity(10000),\n            height,\n            levels,\n        }\n    }\n}\n\nimpl Commit for MerkCommitter {\n    fn write(&mut self, tree: &Tree) -> Result<()> {\n        let mut buf = Vec::with_capacity(tree.encoding_length());\n        tree.encode_into(&mut buf);\n        self.batch.push((tree.key().to_vec(), Some(buf)));\n        Ok(())\n    }\n\n    fn prune(&self, tree: &Tree) -> (bool, bool) {\n        // keep N top levels of tree\n        let prune = (self.height - tree.height()) >= self.levels;\n        (prune, prune)\n    }\n}\n\npub fn get<F: Fetch>(tree: &Tree, source: F, key: &[u8]) -> Result<Option<Vec<u8>>> {\n    Ok(match tree.get_value(key)? {\n        GetResult::Found(value) => Some(value),\n        GetResult::NotFound => None,\n        GetResult::Pruned => source.fetch_by_key(key)?.map(|node| node.value().to_vec()),\n    })\n}\n\nfn root_hash(maybe_tree: Option<&Tree>) -> Hash {\n    maybe_tree.map_or(NULL_HASH, |tree| tree.hash())\n}\n\nfn prove<Q, I, F>(maybe_tree: Option<&mut Tree>, source: F, query: I) -> Result<Vec<u8>>\nwhere\n    Q: Into<QueryItem>,\n    I: IntoIterator<Item = Q>,\n    F: Fetch + Send + Clone,\n{\n    let query_vec: Vec<QueryItem> = query.into_iter().map(Into::into).collect();\n\n    let tree =\n        maybe_tree.ok_or_else(|| Error::Proof(\"Cannot create proof for empty tree\".into()))?;\n\n    let mut ref_walker = RefWalker::new(tree, source);\n    let (proof, _) = ref_walker.create_proof(query_vec.as_slice())?;\n\n    let mut bytes = Vec::with_capacity(128);\n    encode_into(proof.iter(), &mut bytes);\n    Ok(bytes)\n}\n\nfn has_root(db: &DB) -> Result<bool> {\n    let internal_cf = db.cf_handle(INTERNAL_CF_NAME).unwrap();\n    Ok(db.get_pinned_cf(internal_cf, ROOT_KEY_KEY)?.is_some())\n}\n\nfn load_root(db: &DB) -> Result<Option<Tree>> {\n    let internal_cf = db.cf_handle(INTERNAL_CF_NAME).unwrap();\n    db.get_pinned_cf(internal_cf, ROOT_KEY_KEY)?\n        .map(|key| MerkSource { db }.fetch_by_key_expect(key.to_vec().as_slice()))\n        .transpose()\n}\n\nfn load_format_version(db: &DB) -> Result<u64> {\n    let internal_cf = db.cf_handle(INTERNAL_CF_NAME).unwrap();\n    let maybe_version = db.get_pinned_cf(internal_cf, FORMAT_VERSION_KEY)?;\n    let Some(version) = maybe_version else {\n        return Ok(0);\n    };\n\n    let mut buf = [0; 8];\n    buf.copy_from_slice(&version);\n    Ok(u64::from_be_bytes(buf))\n}\n\n#[cfg(test)]\nmod test {\n    use super::{Merk, MerkSource, RefWalker};\n    use crate::test_utils::*;\n    use crate::Op;\n    use std::thread;\n\n    // TODO: Close and then reopen test\n\n    fn assert_invariants(merk: &TempMerk) {\n        merk.use_tree(|maybe_tree| {\n            let tree = maybe_tree.expect(\"expected tree\");\n            assert_tree_invariants(tree);\n        })\n    }\n\n    #[test]\n    fn simple_insert_apply() {\n        let batch_size = 20;\n\n        let path = thread::current().name().unwrap().to_owned();\n        let mut merk = TempMerk::open(path).expect(\"failed to open merk\");\n\n        let batch = make_batch_seq(0..batch_size);\n        merk.apply(&batch, &[]).expect(\"apply failed\");\n\n        assert_invariants(&merk);\n        assert_eq!(\n            merk.root_hash(),\n            [\n                29, 99, 91, 248, 54, 96, 47, 252, 39, 203, 208, 163, 199, 30, 34, 251, 247, 34,\n                241, 203, 17, 252, 127, 44, 155, 83, 22, 54, 117, 85, 252, 200\n            ]\n        );\n    }\n\n    #[test]\n    fn insert_uncached() {\n        let batch_size = 20;\n\n        let path = thread::current().name().unwrap().to_owned();\n        let mut merk = TempMerk::open(path).expect(\"failed to open merk\");\n\n        let batch = make_batch_seq(0..batch_size);\n        merk.apply(&batch, &[]).expect(\"apply failed\");\n        assert_invariants(&merk);\n\n        let batch = make_batch_seq(batch_size..(batch_size * 2));\n        merk.apply(&batch, &[]).expect(\"apply failed\");\n        assert_invariants(&merk);\n    }\n\n    #[test]\n    fn insert_rand() {\n        let tree_size = 40;\n        let batch_size = 4;\n\n        let path = thread::current().name().unwrap().to_owned();\n        let mut merk = TempMerk::open(path).expect(\"failed to open merk\");\n\n        for i in 0..(tree_size / batch_size) {\n            println!(\"i:{i}\");\n            let batch = make_batch_rand(batch_size, i);\n            merk.apply(&batch, &[]).expect(\"apply failed\");\n        }\n    }\n\n    #[test]\n    fn actual_deletes() {\n        let path = thread::current().name().unwrap().to_owned();\n        let mut merk = TempMerk::open(path).expect(\"failed to open merk\");\n\n        let batch = make_batch_rand(10, 1);\n        merk.apply(&batch, &[]).expect(\"apply failed\");\n\n        let key = batch.first().unwrap().0.clone();\n        merk.apply(&[(key.clone(), Op::Delete)], &[]).unwrap();\n\n        let value = merk.db.get(key.as_slice()).unwrap();\n        assert!(value.is_none());\n    }\n\n    #[test]\n    fn aux_data() {\n        let path = thread::current().name().unwrap().to_owned();\n        let mut merk = TempMerk::open(path).expect(\"failed to open merk\");\n        merk.apply(&[], &[(vec![1, 2, 3], Op::Put(vec![4, 5, 6]))])\n            .expect(\"apply failed\");\n        let val = merk.get_aux(&[1, 2, 3]).unwrap();\n        assert_eq!(val, Some(vec![4, 5, 6]));\n    }\n\n    #[test]\n    fn simulated_crash() {\n        let path = thread::current().name().unwrap().to_owned();\n        let mut merk = CrashMerk::open(path).expect(\"failed to open merk\");\n\n        merk.apply(\n            &[(vec![0], Op::Put(vec![1]))],\n            &[(vec![2], Op::Put(vec![3]))],\n        )\n        .expect(\"apply failed\");\n\n        // make enough changes so that main column family gets auto-flushed\n        for i in 0..250 {\n            merk.apply(&make_batch_seq(i * 2_000..(i + 1) * 2_000), &[])\n                .expect(\"apply failed\");\n        }\n\n        unsafe {\n            merk.crash().unwrap();\n        }\n\n        assert_eq!(merk.get_aux(&[2]).unwrap(), Some(vec![3]));\n        merk.destroy().unwrap();\n    }\n\n    #[test]\n    fn get_not_found() {\n        let path = thread::current().name().unwrap().to_owned();\n        let mut merk = TempMerk::open(path).expect(\"failed to open merk\");\n\n        // no root\n        assert!(merk.get(&[1, 2, 3]).unwrap().is_none());\n\n        // cached\n        merk.apply(&[(vec![5, 5, 5], Op::Put(vec![]))], &[])\n            .unwrap();\n        assert!(merk.get(&[1, 2, 3]).unwrap().is_none());\n\n        // uncached\n        merk.apply(\n            &[\n                (vec![0, 0, 0], Op::Put(vec![])),\n                (vec![1, 1, 1], Op::Put(vec![])),\n                (vec![2, 2, 2], Op::Put(vec![])),\n            ],\n            &[],\n        )\n        .unwrap();\n        assert!(merk.get(&[3, 3, 3]).unwrap().is_none());\n    }\n\n    #[test]\n    fn reopen() {\n        fn collect(mut node: RefWalker<MerkSource>, nodes: &mut Vec<Vec<u8>>) {\n            nodes.push(node.tree().encode());\n            node.walk(true)\n                .unwrap()\n                .into_iter()\n                .for_each(|c| collect(c, nodes));\n            node.walk(false)\n                .unwrap()\n                .into_iter()\n                .for_each(|c| collect(c, nodes));\n        }\n\n        let time = std::time::SystemTime::now()\n            .duration_since(std::time::SystemTime::UNIX_EPOCH)\n            .unwrap()\n            .as_nanos();\n        let path = format!(\"merk_reopen_{time}.db\");\n\n        let original_nodes = {\n            let mut merk = Merk::open(&path).unwrap();\n            let batch = make_batch_seq(1..10_000);\n            merk.apply(batch.as_slice(), &[]).unwrap();\n            let mut tree = merk.tree.write().unwrap().take().unwrap();\n            let walker = RefWalker::new(&mut tree, merk.source());\n\n            let mut nodes = vec![];\n            collect(walker, &mut nodes);\n            nodes\n        };\n\n        let merk = TempMerk::open(&path).unwrap();\n        let mut tree = merk.tree.write().unwrap().take().unwrap();\n        let walker = RefWalker::new(&mut tree, merk.source());\n\n        let mut reopen_nodes = vec![];\n        collect(walker, &mut reopen_nodes);\n\n        assert_eq!(reopen_nodes, original_nodes);\n    }\n\n    #[test]\n    fn reopen_iter() {\n        fn collect(iter: &mut rocksdb::DBRawIterator, nodes: &mut Vec<(Vec<u8>, Vec<u8>)>) {\n            while iter.valid() {\n                nodes.push((iter.key().unwrap().to_vec(), iter.value().unwrap().to_vec()));\n                iter.next();\n            }\n        }\n\n        let time = std::time::SystemTime::now()\n            .duration_since(std::time::SystemTime::UNIX_EPOCH)\n            .unwrap()\n            .as_nanos();\n        let path = format!(\"merk_reopen_{time}.db\");\n\n        let original_nodes = {\n            let mut merk = Merk::open(&path).unwrap();\n            let batch = make_batch_seq(1..10_000);\n            merk.apply(batch.as_slice(), &[]).unwrap();\n\n            let mut nodes = vec![];\n            collect(&mut merk.raw_iter(), &mut nodes);\n            nodes\n        };\n\n        let merk = TempMerk::open(&path).unwrap();\n\n        let mut reopen_nodes = vec![];\n        collect(&mut merk.raw_iter(), &mut reopen_nodes);\n\n        assert_eq!(reopen_nodes, original_nodes);\n    }\n\n    #[test]\n    fn checkpoint() {\n        let path = thread::current().name().unwrap().to_owned();\n        let mut merk = TempMerk::open(&path).expect(\"failed to open merk\");\n\n        merk.apply(&[(vec![1], Op::Put(vec![0]))], &[])\n            .expect(\"apply failed\");\n\n        let mut checkpoint = merk.checkpoint(path + \".checkpoint\").unwrap();\n\n        assert_eq!(merk.get(&[1]).unwrap(), Some(vec![0]));\n        assert_eq!(checkpoint.get(&[1]).unwrap(), Some(vec![0]));\n\n        merk.apply(\n            &[(vec![1], Op::Put(vec![1])), (vec![2], Op::Put(vec![0]))],\n            &[],\n        )\n        .expect(\"apply failed\");\n\n        assert_eq!(merk.get(&[1]).unwrap(), Some(vec![1]));\n        assert_eq!(merk.get(&[2]).unwrap(), Some(vec![0]));\n        assert_eq!(checkpoint.get(&[1]).unwrap(), Some(vec![0]));\n        assert_eq!(checkpoint.get(&[2]).unwrap(), None);\n\n        checkpoint\n            .apply(&[(vec![2], Op::Put(vec![123]))], &[])\n            .expect(\"apply failed\");\n\n        assert_eq!(merk.get(&[1]).unwrap(), Some(vec![1]));\n        assert_eq!(merk.get(&[2]).unwrap(), Some(vec![0]));\n        assert_eq!(checkpoint.get(&[1]).unwrap(), Some(vec![0]));\n        assert_eq!(checkpoint.get(&[2]).unwrap(), Some(vec![123]));\n\n        checkpoint.destroy().unwrap();\n\n        assert_eq!(merk.get(&[1]).unwrap(), Some(vec![1]));\n        assert_eq!(merk.get(&[2]).unwrap(), Some(vec![0]));\n    }\n\n    #[test]\n    fn checkpoint_iterator() {\n        let path = thread::current().name().unwrap().to_owned();\n        let mut merk = TempMerk::open(&path).expect(\"failed to open merk\");\n\n        merk.apply(&make_batch_seq(1..100), &[])\n            .expect(\"apply failed\");\n\n        let path: std::path::PathBuf = (path + \".checkpoint\").into();\n        if path.exists() {\n            std::fs::remove_dir_all(&path).unwrap();\n        }\n        let checkpoint = merk.checkpoint(&path).unwrap();\n\n        let mut merk_iter = merk.raw_iter();\n        let mut checkpoint_iter = checkpoint.raw_iter();\n\n        loop {\n            assert_eq!(merk_iter.valid(), checkpoint_iter.valid());\n            if !merk_iter.valid() {\n                break;\n            }\n\n            assert_eq!(merk_iter.key(), checkpoint_iter.key());\n            assert_eq!(merk_iter.value(), checkpoint_iter.value());\n\n            merk_iter.next();\n            checkpoint_iter.next();\n        }\n\n        std::fs::remove_dir_all(&path).unwrap();\n    }\n\n    #[test]\n    fn repair() {\n        let path = thread::current().name().unwrap().to_owned();\n        let mut merk = Merk::open(&path).expect(\"failed to open merk\");\n\n        merk.apply(&make_batch_seq(0..100), &[])\n            .expect(\"apply failed\");\n\n        let merk = merk.repair().unwrap();\n        merk.walk(|mut maybe_walker| {\n            fn recurse(maybe_walker: &mut Option<RefWalker<MerkSource>>) {\n                if let Some(walker) = maybe_walker {\n                    recurse(&mut walker.walk(true).unwrap());\n                    recurse(&mut walker.walk(false).unwrap());\n                }\n            }\n            recurse(&mut maybe_walker);\n\n            let walker = maybe_walker.unwrap();\n            let exp_value = put_entry_value();\n            for (i, (key, value)) in walker.tree().iter().enumerate() {\n                let exp_key = seq_key(i as u64);\n                assert_eq!(key, exp_key);\n                assert_eq!(value, exp_value);\n            }\n        });\n\n        std::fs::remove_dir_all(&path).unwrap();\n    }\n}\n"
  },
  {
    "path": "src/merk/restore.rs",
    "content": "//! Provides `Restorer`, which can create a replica of a Merk instance by\n//! receiving chunk proofs.\n\nuse super::Merk;\nuse crate::{\n    merk::MerkSource,\n    proofs::{\n        chunk::{verify_leaf, verify_trunk, MIN_TRUNK_HEIGHT},\n        tree::{Child, Tree as ProofTree},\n        Decoder, Node,\n    },\n    tree::{Link, RefWalker, Tree},\n    Error, Hash, Result,\n};\nuse rocksdb::WriteBatch;\nuse std::iter::Peekable;\nuse std::path::Path;\n\n/// A `Restorer` handles decoding, verifying, and storing chunk proofs to\n/// replicate an entire Merk tree. It expects the chunks to be processed in\n/// order, retrying the last chunk if verification fails.\npub struct Restorer {\n    leaf_hashes: Option<Peekable<std::vec::IntoIter<Hash>>>,\n    parent_keys: Option<Peekable<std::vec::IntoIter<Vec<u8>>>>,\n    trunk_height: Option<usize>,\n    merk: Merk,\n    expected_root_hash: Hash,\n    stated_length: usize,\n}\n\nimpl Restorer {\n    /// Creates a new `Restorer`, which will initialize a new Merk at the given\n    /// file path. The first chunk (the \"trunk\") will be compared against\n    /// `expected_root_hash`, then each subsequent chunk will be compared\n    /// against the hashes stored in the trunk, so that the restore process will\n    /// never allow malicious peers to send more than a single invalid chunk.\n    ///\n    /// The `stated_length` should be the number of chunks stated by the peer,\n    /// which will be verified after processing a valid first chunk to make it\n    /// easier to download chunks from peers without needing to trust this\n    /// length.\n    pub fn new<P: AsRef<Path>>(\n        db_path: P,\n        expected_root_hash: Hash,\n        stated_length: usize,\n    ) -> Result<Self> {\n        if db_path.as_ref().exists() {\n            return Err(Error::Path(\"The given path already exists\".into()));\n        }\n\n        Ok(Self {\n            expected_root_hash,\n            stated_length,\n            trunk_height: None,\n            merk: Merk::open(db_path)?,\n            leaf_hashes: None,\n            parent_keys: None,\n        })\n    }\n\n    /// Verifies a chunk and writes it to the working RocksDB instance. Expects\n    /// to be called for each chunk in order. Returns the number of remaining\n    /// chunks.\n    ///\n    /// Once there are no remaining chunks to be processed, `finalize` should\n    /// be called.\n    pub fn process_chunk(&mut self, chunk_bytes: &[u8]) -> Result<usize> {\n        let ops = Decoder::new(chunk_bytes);\n\n        match self.leaf_hashes {\n            None => self.process_trunk(ops),\n            Some(_) => self.process_leaf(ops),\n        }\n    }\n\n    /// Consumes the `Restorer` and returns the newly-created, fully-populated\n    /// Merk instance. This method will return an error if called before\n    /// processing all chunks (e.g. `restorer.remaining_chunks()` is not equal\n    /// to 0).\n    pub fn finalize(mut self) -> Result<Merk> {\n        if self.remaining_chunks().is_none() || self.remaining_chunks().unwrap() != 0 {\n            return Err(Error::ChunkProcessing(\n                \"Called finalize before all chunks were processed\".into(),\n            ));\n        }\n\n        if self.trunk_height.unwrap() >= MIN_TRUNK_HEIGHT {\n            self.rewrite_trunk_child_heights()?;\n        }\n\n        self.merk.flush()?;\n        self.merk.load_root()?;\n\n        Ok(self.merk)\n    }\n\n    /// Returns the number of remaining chunks to be processed. If called before\n    /// the first chunk is processed, this method will return `None` since we do\n    /// not yet have enough information to know about the number of chunks.\n    pub fn remaining_chunks(&self) -> Option<usize> {\n        self.leaf_hashes.as_ref().map(|lh| lh.len())\n    }\n\n    /// Writes the data contained in `tree` (extracted from a verified chunk\n    /// proof) to the RocksDB.\n    fn write_chunk(&mut self, tree: ProofTree) -> Result<()> {\n        let mut batch = WriteBatch::default();\n\n        tree.visit_refs(&mut |proof_node| {\n            let (key, mut node) = match &proof_node.node {\n                // TODO: encode tree node without cloning key/value\n                Node::KV(key, value) => match Tree::new(key.clone(), value.clone()) {\n                    Ok(node) => (key, node),\n                    Err(_) => return,\n                },\n                _ => return,\n            };\n\n            *node.slot_mut(true) = proof_node.left.as_ref().map(Child::as_link);\n            *node.slot_mut(false) = proof_node.right.as_ref().map(Child::as_link);\n\n            let bytes = node.encode();\n            batch.put(key, bytes);\n        });\n\n        self.merk.write(batch)\n    }\n\n    /// Verifies the trunk then writes its data to the RocksDB.\n    ///\n    /// The trunk contains a height proof which lets us verify the total number\n    /// of expected chunks is the same as `stated_length` as passed into\n    /// `Restorer::new()`. We also verify the expected root hash at this step.\n    fn process_trunk(&mut self, ops: Decoder) -> Result<usize> {\n        let (trunk, height) = verify_trunk(ops)?;\n\n        if trunk.hash()? != self.expected_root_hash {\n            return Err(Error::HashMismatch(self.expected_root_hash, trunk.hash()?));\n        }\n\n        let root_key = trunk.key().to_vec();\n\n        let trunk_height = height / 2;\n        self.trunk_height = Some(trunk_height);\n\n        let chunks_remaining = if trunk_height >= MIN_TRUNK_HEIGHT {\n            let leaf_hashes = trunk\n                .layer(trunk_height)\n                .map(|node| node.hash())\n                .collect::<Result<Vec<_>>>()?\n                .into_iter()\n                .peekable();\n            self.leaf_hashes = Some(leaf_hashes);\n\n            let parent_keys = trunk\n                .layer(trunk_height - 1)\n                .map(|node| node.key().to_vec())\n                .collect::<Vec<Vec<u8>>>()\n                .into_iter()\n                .peekable();\n            self.parent_keys = Some(parent_keys);\n            assert_eq!(\n                self.parent_keys.as_ref().unwrap().len(),\n                self.leaf_hashes.as_ref().unwrap().len() / 2\n            );\n\n            let chunks_remaining = (2_usize).pow(trunk_height as u32);\n            assert_eq!(self.remaining_chunks_unchecked(), chunks_remaining);\n            chunks_remaining\n        } else {\n            self.leaf_hashes = Some(vec![].into_iter().peekable());\n            self.parent_keys = Some(vec![].into_iter().peekable());\n            0\n        };\n\n        if self.stated_length != chunks_remaining + 1 {\n            return Err(Error::ChunkProcessing(\n                \"Stated length does not match calculated number of chunks\".into(),\n            ));\n        }\n\n        // note that these writes don't happen atomically, which is fine here\n        // because if anything fails during the restore process we will just\n        // scrap the whole restore and start over\n        self.write_chunk(trunk)?;\n        self.merk.set_root_key(root_key)?;\n\n        Ok(chunks_remaining)\n    }\n\n    /// Verifies a leaf chunk then writes it to the RocksDB. This needs to be\n    /// called in order, retrying the last chunk for any failed verifications.\n    fn process_leaf(&mut self, ops: Decoder) -> Result<usize> {\n        let leaf_hashes = self.leaf_hashes.as_mut().unwrap();\n        let leaf_hash = leaf_hashes\n            .peek()\n            .expect(\"Received more chunks than expected\");\n\n        let leaf = verify_leaf(ops, *leaf_hash)?;\n        self.rewrite_parent_link(&leaf)?;\n        self.write_chunk(leaf)?;\n\n        let leaf_hashes = self.leaf_hashes.as_mut().unwrap();\n        leaf_hashes.next();\n\n        Ok(self.remaining_chunks_unchecked())\n    }\n\n    /// The parent of the root node of the leaf does not know the key of its\n    /// children when it is first written. Now that we have verified this leaf,\n    /// we can write the key into the parent node's entry. Note that this does\n    /// not need to recalcuate hashes since it already had the child hash.\n    fn rewrite_parent_link(&mut self, leaf: &ProofTree) -> Result<()> {\n        let parent_keys = self.parent_keys.as_mut().unwrap();\n        let parent_key = parent_keys.peek().unwrap().clone();\n        let mut parent = self\n            .merk\n            .fetch_node(parent_key.as_slice())?\n            .expect(\"Could not find parent of leaf chunk\");\n\n        let is_left_child = self.remaining_chunks_unchecked() % 2 == 0;\n        if let Some(Link::Reference { ref mut key, .. }) = parent.link_mut(is_left_child) {\n            *key = leaf.key().to_vec();\n        } else {\n            panic!(\"Expected parent links to be type Link::Reference\");\n        };\n\n        let parent_bytes = parent.encode();\n        self.merk.db.put(parent_key, parent_bytes)?;\n\n        if !is_left_child {\n            let parent_keys = self.parent_keys.as_mut().unwrap();\n            parent_keys.next();\n        }\n\n        Ok(())\n    }\n\n    fn rewrite_trunk_child_heights(&mut self) -> Result<()> {\n        fn recurse(\n            mut node: RefWalker<MerkSource>,\n            remaining_depth: usize,\n            batch: &mut WriteBatch,\n        ) -> Result<(u8, u8)> {\n            if remaining_depth == 0 {\n                return Ok(node.tree().child_heights());\n            }\n\n            let mut cloned_node =\n                Tree::decode(node.tree().key().to_vec(), node.tree().encode().as_slice());\n\n            let left_child = node.walk(true)?.unwrap();\n            let left_child_heights = recurse(left_child, remaining_depth - 1, batch)?;\n            let left_height = left_child_heights.0.max(left_child_heights.1) + 1;\n            *cloned_node.link_mut(true).unwrap().child_heights_mut() = left_child_heights;\n\n            let right_child = node.walk(false)?.unwrap();\n            let right_child_heights = recurse(right_child, remaining_depth - 1, batch)?;\n            let right_height = right_child_heights.0.max(right_child_heights.1) + 1;\n            *cloned_node.link_mut(false).unwrap().child_heights_mut() = right_child_heights;\n\n            let bytes = cloned_node.encode();\n            batch.put(node.tree().key(), bytes);\n\n            Ok((left_height, right_height))\n        }\n\n        self.merk.flush()?;\n        self.merk.load_root()?;\n\n        let mut batch = WriteBatch::default();\n\n        let depth = self.trunk_height.unwrap();\n        self.merk.use_tree_mut(|maybe_tree| {\n            let tree = maybe_tree.unwrap();\n            let walker = RefWalker::new(tree, self.merk.source());\n            recurse(walker, depth, &mut batch)\n        })?;\n\n        self.merk.write(batch)?;\n\n        Ok(())\n    }\n\n    /// Returns the number of remaining chunks to be processed. This method will\n    /// panic if called before processing the first chunk (since that chunk\n    /// gives us the information to know how many chunks to expect).\n    pub fn remaining_chunks_unchecked(&self) -> usize {\n        self.leaf_hashes.as_ref().unwrap().len()\n    }\n}\n\nimpl Merk {\n    /// Creates a new `Restorer`, which can be used to verify chunk proofs to\n    /// replicate an entire Merk tree. A new Merk instance will be initialized\n    /// by creating a RocksDB at `path`.\n    ///\n    /// The restoration process will verify integrity by checking that the\n    /// incoming chunk proofs match `expected_root_hash`. The `stated_length`\n    /// should be the number of chunks as stated by peers, which will also be\n    /// verified during the restoration process.\n    pub fn restore<P: AsRef<Path>>(\n        path: P,\n        expected_root_hash: Hash,\n        stated_length: usize,\n    ) -> Result<Restorer> {\n        Restorer::new(path, expected_root_hash, stated_length)\n    }\n}\n\nimpl ProofTree {\n    fn child_heights(&self) -> (u8, u8) {\n        (\n            self.left.as_ref().map_or(0, |c| c.tree.height as u8),\n            self.right.as_ref().map_or(0, |c| c.tree.height as u8),\n        )\n    }\n}\n\nimpl Child {\n    fn as_link(&self) -> Link {\n        let key = match &self.tree.node {\n            Node::KV(key, _) => key.as_slice(),\n            // for the connection between the trunk and leaf chunks, we don't\n            // have the child key so we must first write in an empty one. once\n            // the leaf gets verified, we can write in this key to its parent\n            _ => &[],\n        };\n\n        Link::Reference {\n            hash: self.hash,\n            child_heights: self.tree.child_heights(),\n            key: key.to_vec(),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::test_utils::*;\n    use crate::tree::{Batch, Op};\n    use std::path::PathBuf;\n\n    fn restore_test(batches: &[&Batch], expected_nodes: usize) {\n        let mut original = TempMerk::new().unwrap();\n        for batch in batches {\n            original.apply(batch, &[]).unwrap();\n        }\n        original.flush().unwrap();\n\n        let chunks = original.chunks().unwrap();\n\n        let path: PathBuf = std::thread::current().name().unwrap().into();\n        if path.exists() {\n            std::fs::remove_dir_all(&path).unwrap();\n        }\n\n        let mut restorer = Merk::restore(&path, original.root_hash(), chunks.len()).unwrap();\n\n        assert_eq!(restorer.remaining_chunks(), None);\n\n        let mut expected_remaining = chunks.len();\n        for chunk in chunks {\n            let chunk = chunk.unwrap();\n            let remaining = restorer.process_chunk(chunk.as_slice()).unwrap();\n\n            expected_remaining -= 1;\n            assert_eq!(remaining, expected_remaining);\n            assert_eq!(restorer.remaining_chunks().unwrap(), expected_remaining);\n        }\n        assert_eq!(expected_remaining, 0);\n\n        let restored = restorer.finalize().unwrap();\n        assert_eq!(restored.root_hash(), original.root_hash());\n        assert_raw_db_entries_eq(&restored, &original, expected_nodes);\n\n        std::fs::remove_dir_all(&path).unwrap();\n    }\n\n    #[test]\n    fn restore_10000() {\n        restore_test(&[&make_batch_seq(0..10_000)], 10_000);\n    }\n\n    #[test]\n    fn restore_3() {\n        restore_test(&[&make_batch_seq(0..3)], 3);\n    }\n\n    #[test]\n    fn restore_2_left_heavy() {\n        restore_test(\n            &[&[(vec![0], Op::Put(vec![]))], &[(vec![1], Op::Put(vec![]))]],\n            2,\n        );\n    }\n\n    #[test]\n    fn restore_2_right_heavy() {\n        restore_test(\n            &[&[(vec![1], Op::Put(vec![]))], &[(vec![0], Op::Put(vec![]))]],\n            2,\n        );\n    }\n\n    #[test]\n    fn restore_1() {\n        restore_test(&[&make_batch_seq(0..1)], 1);\n    }\n\n    fn assert_raw_db_entries_eq(restored: &Merk, original: &Merk, length: usize) {\n        let mut original_entries = original.raw_iter();\n        let mut restored_entries = restored.raw_iter();\n        original_entries.seek_to_first();\n        restored_entries.seek_to_first();\n\n        let mut i = 0;\n        loop {\n            assert_eq!(restored_entries.valid(), original_entries.valid());\n            if !restored_entries.valid() {\n                break;\n            }\n\n            assert_eq!(restored_entries.key(), original_entries.key());\n            assert_eq!(restored_entries.value(), original_entries.value());\n\n            restored_entries.next();\n            original_entries.next();\n\n            i += 1;\n        }\n\n        assert_eq!(i, length);\n    }\n}\n"
  },
  {
    "path": "src/merk/snapshot.rs",
    "content": "//! In-memory snapshots of database state.\n//!\n//! Snapshots are read-only views of the database state at a particular point in\n//! time. This can be useful for retaining recent versions of history which can\n//! be queried against. Merk snapshots are backed by the similar RocksDB\n//! snapshot, but with the added ability to create proofs.\n\nuse std::cell::Cell;\n\nuse crate::{\n    proofs::query::QueryItem,\n    tree::{Fetch, RefWalker, Tree, NULL_HASH},\n    Hash, Result,\n};\n\n/// A read-only view of the database state at a particular point in time.\n///\n/// `Snapshot`s are cheap to create since they are just a handle and don't copy\n/// any data - they instead just prevent the underlying replaced data from being\n/// compacted in RocksDB until they are dropped. They are only held in memory,\n/// and will not be persisted after the process exits.\npub struct Snapshot<'a> {\n    /// The underlying RocksDB snapshot.\n    ss: Option<rocksdb::Snapshot<'a>>,\n    /// The Merk tree at the time the snapshot was created.\n    tree: Cell<Option<Tree>>,\n    /// Whether the underlying RocksDB snapshot should be dropped when the\n    /// `Snapshot` is dropped.\n    should_drop_ss: bool,\n}\n\nimpl<'a> Snapshot<'a> {\n    /// Creates a new `Snapshot` from a RocksDB snapshot and a Merk tree.\n    ///\n    /// The RocksDB snapshot will be dropped when the [Snapshot] is dropped.\n    pub fn new(db: rocksdb::Snapshot<'a>, tree: Option<Tree>) -> Self {\n        Snapshot {\n            ss: Some(db),\n            tree: Cell::new(tree),\n            should_drop_ss: true,\n        }\n    }\n\n    /// Converts the [Snapshot] into a [StaticSnapshot], an alternative which\n    /// has easier (but more dangerous) lifetime requirements.\n    pub fn staticize(mut self) -> StaticSnapshot {\n        let ss: RocksDBSnapshot = unsafe { std::mem::transmute(self.ss.take().unwrap()) };\n        StaticSnapshot {\n            tree: Cell::new(self.tree.take()),\n            inner: ss.inner,\n            should_drop: false,\n        }\n    }\n\n    /// Gets the value associated with the given key, from the time the snapshot\n    /// was created.\n    pub fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {\n        self.use_tree(|maybe_tree| {\n            maybe_tree\n                .and_then(|tree| super::get(tree, self.source(), key).transpose())\n                .transpose()\n        })\n    }\n\n    /// Gets the root hash of the tree at the time the snapshot was created.\n    pub fn root_hash(&self) -> Hash {\n        self.use_tree(|tree| tree.map_or(NULL_HASH, |tree| tree.hash()))\n    }\n\n    /// Proves the given query against the tree at the time the snapshot was\n    /// created.\n    pub fn prove<Q, I>(&self, query: I) -> Result<Vec<u8>>\n    where\n        Q: Into<QueryItem>,\n        I: IntoIterator<Item = Q>,\n    {\n        self.use_tree_mut(move |maybe_tree| super::prove(maybe_tree, self.source(), query))\n    }\n\n    /// Walks the tree at the time the snapshot was created, fetching the child\n    /// node from the backing store if necessary.\n    pub fn walk<T>(&self, f: impl FnOnce(Option<RefWalker<SnapshotSource>>) -> T) -> T {\n        let mut tree = self.tree.take();\n        let maybe_walker = tree\n            .as_mut()\n            .map(|tree| RefWalker::new(tree, self.source()));\n        let res = f(maybe_walker);\n        self.tree.set(tree);\n        res\n    }\n\n    /// Returns an iterator over the keys and values in the backing store from\n    /// the time the snapshot was created.\n    pub fn raw_iter(&self) -> rocksdb::DBRawIterator {\n        self.ss.as_ref().unwrap().raw_iterator()\n    }\n\n    /// A data source which can be used to fetch values from the backing store,\n    /// from the time the snapshot was created.\n    fn source(&self) -> SnapshotSource {\n        SnapshotSource(self.ss.as_ref().unwrap())\n    }\n\n    /// Uses the tree, and then puts it back.\n    fn use_tree<T>(&self, f: impl FnOnce(Option<&Tree>) -> T) -> T {\n        let tree = self.tree.take();\n        let res = f(tree.as_ref());\n        self.tree.set(tree);\n        res\n    }\n\n    /// Uses the tree mutably, and then puts it back.\n    fn use_tree_mut<T>(&self, f: impl FnOnce(Option<&mut Tree>) -> T) -> T {\n        let mut tree = self.tree.take();\n        let res = f(tree.as_mut());\n        self.tree.set(tree);\n        res\n    }\n}\n\nimpl<'a> Drop for Snapshot<'a> {\n    fn drop(&mut self) {\n        if !self.should_drop_ss {\n            std::mem::forget(self.ss.take());\n        }\n    }\n}\n\n/// A data source which can be used to fetch values from the backing store, from\n/// the time the snapshot was created.\n///\n/// This implements [Fetch] and should be used with a type such as [RefWalker].\n#[derive(Clone)]\npub struct SnapshotSource<'a>(&'a rocksdb::Snapshot<'a>);\n\nimpl<'a> Fetch for SnapshotSource<'a> {\n    fn fetch_by_key(&self, key: &[u8]) -> Result<Option<Tree>> {\n        Ok(self\n            .0\n            .get(key)?\n            .map(|bytes| Tree::decode(key.to_vec(), &bytes)))\n    }\n}\n\n/// A read-only view of the database state at a particular point in time, but\n/// with an internal raw pointer to allow for manual lifetime management.\n///\n/// This is useful when you would otherwise want a [Snapshot], but you want to\n/// use the database while the snapshot is still alive. This is unsafe because\n/// it is the caller's responsibility to ensure that the underlying RocksDB\n/// snapshot outlives the [StaticSnapshot].\n///\n/// By default, the RocksDB snapshot will not be dropped when the\n/// [StaticSnapshot] is dropped, resulting in a memory leak. For correct usage,\n/// you must call [StaticSnapshot::drop] to ensure the RocksDB snapshot gets\n/// dropped when the [StaticSnapshot] is dropped.\npub struct StaticSnapshot {\n    /// A Merk tree based on the database state at the time the snapshot was\n    /// created.\n    tree: Cell<Option<Tree>>,\n    /// A raw pointer to the RocksDB snapshot.\n    inner: *const (),\n    /// Used to detect whether the `StaticSnapshot` was set to manually drop\n    /// before its [Drop::drop] implementation was called.\n    pub should_drop: bool,\n}\n\n/// An equivalent struct to the [rocksdb::Snapshot] struct within the `rocksdb`\n/// crate. This is used to access the private fields of the foreign crate's\n/// struct by first transmuting.\n///\n/// To guarantee that breaking changes in the `rocksdb` crate do not affect the\n/// transmutation into this struct, see the\n/// [tests::rocksdb_snapshot_struct_format] test.\nstruct RocksDBSnapshot<'a> {\n    /// A reference to the associated RocksDB database.\n    _db: &'a rocksdb::DB,\n    /// A raw pointer to the snapshot handle.\n    inner: *const (),\n}\n\n// We need this because we have a raw pointer to a RocksDB snapshot, but we\n// know that our usage of it is thread-safe:\n// https://github.com/facebook/rocksdb/blob/main/include/rocksdb/snapshot.h#L15-L16\nunsafe impl Send for StaticSnapshot {}\nunsafe impl Sync for StaticSnapshot {}\n\nimpl StaticSnapshot {\n    /// Converts the [StaticSnapshot] to a [Snapshot] by re-associating with the\n    /// database it was originally created from.\n    ///\n    /// # Safety\n    /// This will cause undefined behavior if a database other than the one\n    /// originally used to create the snapshot is passed as an argument.\n    ///\n    /// This will also cause a memory leak if the underlying RocksDB snapshot is\n    /// not dropped by calling [StaticSnapshot::drop]. Unlike most uses of\n    /// [Snapshot], the RocksDB snapshot will not be dropped when the\n    /// [Snapshot] returned by this method is dropped.\n    pub unsafe fn with_db<'a>(&self, db: &'a rocksdb::DB) -> Snapshot<'a> {\n        let db_ss = RocksDBSnapshot {\n            _db: db,\n            inner: self.inner,\n        };\n        let db_ss: rocksdb::Snapshot<'a> = std::mem::transmute(db_ss);\n\n        Snapshot {\n            ss: Some(db_ss),\n            tree: self.clone_tree(),\n            should_drop_ss: false,\n        }\n    }\n\n    /// Drops the [StaticSnapshot] and the underlying RocksDB snapshot.\n    ///\n    /// # Safety\n    /// This function is unsafe because it results in the RocksDB snapshot being\n    /// dropped, which could lead to use-after-free bugs if there are still\n    /// references to the snapshot in other [Snapshot] or [StaticSnapshot]\n    /// instances. The caller must be sure this is the last remaining reference\n    /// before calling this method.\n    pub unsafe fn drop(mut self, db: &rocksdb::DB) {\n        let mut ss = self.with_db(db);\n        ss.should_drop_ss = true;\n        self.should_drop = true;\n        // the snapshot drop implementation is now called, which includes\n        // dropping the RocksDB snapshot\n    }\n\n    /// Clones the root node of the Merk tree into a new [Tree].\n    fn clone_tree(&self) -> Cell<Option<Tree>> {\n        let tree = self.tree.take().unwrap();\n        let tree_clone = Cell::new(Some(Tree::decode(\n            tree.key().to_vec(),\n            tree.encode().as_slice(),\n        )));\n        self.tree.set(Some(tree));\n        tree_clone\n    }\n}\n\nimpl Drop for StaticSnapshot {\n    fn drop(&mut self) {\n        if !self.should_drop {\n            log::debug!(\"StaticSnapshot must be manually dropped\");\n        }\n    }\n}\n\nimpl Clone for StaticSnapshot {\n    fn clone(&self) -> Self {\n        Self {\n            tree: self.clone_tree(),\n            inner: self.inner,\n            should_drop: self.should_drop,\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::mem::transmute;\n\n    use super::RocksDBSnapshot;\n    use crate::test_utils::TempMerk;\n\n    #[test]\n    fn rocksdb_snapshot_struct_format() {\n        assert_eq!(std::mem::size_of::<rocksdb::Snapshot>(), 16);\n\n        let merk = TempMerk::new().unwrap();\n        let exptected_db_ptr = merk.db() as *const _;\n\n        let ss = merk.db().snapshot();\n        let ss: RocksDBSnapshot = unsafe { transmute(ss) };\n        let db_ptr = ss._db as *const _;\n\n        assert_eq!(exptected_db_ptr, db_ptr);\n    }\n}\n"
  },
  {
    "path": "src/owner.rs",
    "content": "use std::ops::{Deref, DerefMut};\n\n/// A container type which holds a value that may be temporarily owned by a\n/// consumer.\npub struct Owner<T> {\n    inner: Option<T>,\n}\n\nimpl<T> Owner<T> {\n    /// Creates a new `Owner` which holds the given value.\n    pub fn new(value: T) -> Owner<T> {\n        Owner { inner: Some(value) }\n    }\n\n    /// Takes temporary ownership of the contained value by passing it to `f`.\n    /// The function must return a value of the same type (the same value, or a\n    /// new value to take its place).\n    ///\n    /// # Example\n    /// ```\n    /// # use merk::owner::Owner;\n    /// # struct SomeType();\n    /// # impl SomeType {\n    /// #     fn method_which_requires_ownership(self) -> SomeType { self }\n    /// # }\n    /// #\n    /// let mut owner = Owner::new(SomeType());\n    /// owner.own(|value| {\n    ///     value.method_which_requires_ownership();\n    ///     SomeType() // now give back a value of the same type\n    /// });\n    /// ```\n    pub fn own<F: FnOnce(T) -> T>(&mut self, f: F) {\n        let old_value = unwrap(self.inner.take());\n        let new_value = f(old_value);\n        self.inner = Some(new_value);\n    }\n\n    /// Takes temporary ownership of the contained value by passing it to `f`.\n    /// The function must return a value of the same type (the same value, or a\n    /// new value to take its place).\n    ///\n    /// Like `own`, but uses a tuple return type which allows specifying a value\n    /// to return from the call to `own_return` for convenience.\n    ///\n    /// # Example\n    /// ```\n    /// # use merk::owner::Owner;\n    /// let mut owner = Owner::new(123);\n    /// let doubled = owner.own_return(|n| (n, n * 2));\n    /// ```\n    pub fn own_return<R, F>(&mut self, f: F) -> R\n    where\n        R: Sized,\n        F: FnOnce(T) -> (T, R),\n    {\n        let old_value = unwrap(self.inner.take());\n        let (new_value, return_value) = f(old_value);\n        self.inner = Some(new_value);\n        return_value\n    }\n\n    /// Takes temporary ownership of the contained value by passing it to `f`.\n    /// The function must return a value of the same type (the same value, or a\n    /// new value to take its place).\n    ///\n    /// Like `own`, but with a fallible operation.\n    ///\n    /// # Example\n    /// ```\n    /// # use merk::owner::Owner;\n    /// # use std::convert::TryFrom;\n    /// let mut owner = Owner::new(123);\n    /// let converted = owner.own_fallible(|n| u32::try_from(n));\n    /// ```\n    pub fn own_fallible<E, F: FnOnce(T) -> Result<T, E>>(&mut self, f: F) -> Result<(), E> {\n        let old_value = unwrap(self.inner.take());\n        let new_value = f(old_value)?;\n        self.inner = Some(new_value);\n        Ok(())\n    }\n\n    /// Sheds the `Owner` container and returns the value it contained.\n    pub fn into_inner(mut self) -> T {\n        unwrap(self.inner.take())\n    }\n}\n\nimpl<T> Deref for Owner<T> {\n    type Target = T;\n\n    fn deref(&self) -> &T {\n        unwrap(self.inner.as_ref())\n    }\n}\n\nimpl<T> DerefMut for Owner<T> {\n    fn deref_mut(&mut self) -> &mut T {\n        unwrap(self.inner.as_mut())\n    }\n}\n\nfn unwrap<T>(option: Option<T>) -> T {\n    match option {\n        Some(value) => value,\n        None => unreachable!(\"value should be Some\"),\n    }\n}\n\n// TODO: unit tests\n"
  },
  {
    "path": "src/proofs/chunk.rs",
    "content": "#[cfg(feature = \"full\")]\nuse {\n    super::tree::{execute, Tree as ProofTree},\n    crate::tree::Hash,\n    crate::tree::Tree,\n    rocksdb::DBRawIterator,\n};\n\nuse super::{Node, Op};\nuse crate::error::{Error, Result};\nuse crate::tree::{Fetch, RefWalker};\n\n/// The minimum number of layers the trunk will be guaranteed to have before\n/// splitting into multiple chunks.\n///\n/// If the tree's height is less than double this value, the trunk should be\n/// verified as a leaf chunk.\npub const MIN_TRUNK_HEIGHT: usize = 5;\n\nimpl<'a, S> RefWalker<'a, S>\nwhere\n    S: Fetch + Sized + Send + Clone,\n{\n    /// Generates a trunk proof by traversing the tree.\n    ///\n    /// Returns a tuple containing the produced proof, and a boolean indicating\n    /// whether or not there will be more chunks to follow. If the chunk\n    /// contains the entire tree, the boolean will be `false`, if the chunk\n    /// is abdriged and will be connected to leaf chunks, it will be `true`.\n    pub fn create_trunk_proof(&mut self) -> Result<(Vec<Op>, bool)> {\n        let approx_size = 2usize.pow((self.tree().height() / 2) as u32) * 3;\n        let mut proof = Vec::with_capacity(approx_size);\n\n        let trunk_height = self.traverse_for_height_proof(&mut proof, 1)?;\n\n        if trunk_height < MIN_TRUNK_HEIGHT {\n            proof.clear();\n            self.traverse_for_trunk(&mut proof, usize::MAX, true)?;\n            Ok((proof, false))\n        } else {\n            self.traverse_for_trunk(&mut proof, trunk_height, true)?;\n            Ok((proof, true))\n        }\n    }\n\n    /// Traverses down the left edge of the tree and pushes ops to the proof, to\n    /// act as a proof of the height of the tree. This is the first step in\n    /// generating a trunk proof.\n    fn traverse_for_height_proof(&mut self, proof: &mut Vec<Op>, depth: usize) -> Result<usize> {\n        let maybe_left = self.walk(true)?;\n        let has_left_child = maybe_left.is_some();\n\n        let trunk_height = if let Some(mut left) = maybe_left {\n            left.traverse_for_height_proof(proof, depth + 1)?\n        } else {\n            depth / 2\n        };\n\n        if depth > trunk_height {\n            proof.push(Op::Push(self.to_kvhash_node()));\n\n            if has_left_child {\n                proof.push(Op::Parent);\n            }\n\n            if let Some(right) = self.tree().link(false) {\n                proof.push(Op::Push(Node::Hash(*right.hash())));\n                proof.push(Op::Child);\n            }\n        }\n\n        Ok(trunk_height)\n    }\n\n    /// Traverses down the tree and adds KV push ops for all nodes up to a\n    /// certain depth. This expects the proof to contain a height proof as\n    /// generated by `traverse_for_height_proof`.\n    fn traverse_for_trunk(\n        &mut self,\n        proof: &mut Vec<Op>,\n        remaining_depth: usize,\n        is_leftmost: bool,\n    ) -> Result<()> {\n        if remaining_depth == 0 {\n            // return early if we have reached bottom of trunk\n\n            // for leftmost node, we already have height proof\n            if is_leftmost {\n                return Ok(());\n            }\n\n            // add this node's hash\n            proof.push(Op::Push(self.to_hash_node()));\n\n            return Ok(());\n        }\n\n        // traverse left\n        let has_left_child = self.tree().link(true).is_some();\n        if has_left_child {\n            let mut left = self.walk(true)?.unwrap();\n            left.traverse_for_trunk(proof, remaining_depth - 1, is_leftmost)?;\n        }\n\n        // add this node's data\n        proof.push(Op::Push(self.to_kv_node()));\n\n        if has_left_child {\n            proof.push(Op::Parent);\n        }\n\n        // traverse right\n        if let Some(mut right) = self.walk(false)? {\n            right.traverse_for_trunk(proof, remaining_depth - 1, false)?;\n            proof.push(Op::Child);\n        }\n\n        Ok(())\n    }\n}\n\n/// Builds a chunk proof by iterating over values in a RocksDB, ending the chunk\n/// when a node with key `end_key` is encountered.\n///\n/// Advances the iterator for all nodes in the chunk and the `end_key` (if any).\n#[cfg(feature = \"full\")]\npub(crate) fn get_next_chunk(iter: &mut DBRawIterator, end_key: Option<&[u8]>) -> Result<Vec<Op>> {\n    let mut chunk = Vec::with_capacity(512);\n    let mut stack = Vec::with_capacity(32);\n    let mut node = Tree::new(vec![], vec![])?;\n\n    while iter.valid() {\n        let key = iter.key().unwrap();\n\n        if let Some(end_key) = end_key {\n            if key == end_key {\n                break;\n            }\n        }\n\n        let encoded_node = iter.value().unwrap();\n        Tree::decode_into(&mut node, vec![], encoded_node);\n\n        let kv = Node::KV(key.to_vec(), node.value().to_vec());\n        chunk.push(Op::Push(kv));\n\n        if node.link(true).is_some() {\n            chunk.push(Op::Parent);\n        }\n\n        if let Some(child) = node.link(false) {\n            stack.push(child.key().to_vec());\n        } else {\n            while let Some(top_key) = stack.last() {\n                if key < top_key.as_slice() {\n                    break;\n                }\n                stack.pop();\n                chunk.push(Op::Child);\n            }\n        }\n\n        iter.next();\n    }\n\n    if iter.valid() {\n        iter.next();\n    }\n\n    Ok(chunk)\n}\n\n/// Verifies a leaf chunk proof by executing its operators. Checks that there\n/// were no abridged nodes (Hash or KVHash) and the proof hashes to\n/// `expected_hash`.\n#[cfg(feature = \"full\")]\npub(crate) fn verify_leaf<I: Iterator<Item = Result<Op>>>(\n    ops: I,\n    expected_hash: Hash,\n) -> Result<ProofTree> {\n    let tree = execute(ops, false, |node| match node {\n        Node::KV(_, _) => Ok(()),\n        _ => Err(Error::Tree(\"Leaf chunks must contain full subtree\".into())),\n    })?;\n\n    if tree.hash()? != expected_hash {\n        return Err(Error::HashMismatch(expected_hash, tree.hash()?));\n    }\n\n    Ok(tree)\n}\n\n/// Verifies a trunk chunk proof by executing its operators. Ensures the\n/// resulting tree contains a valid height proof, the trunk is the correct\n/// height, and all of its inner nodes are not abridged. Returns the tree and\n/// the height given by the height proof.\n#[cfg(feature = \"full\")]\npub(crate) fn verify_trunk<I: Iterator<Item = Result<Op>>>(ops: I) -> Result<(ProofTree, usize)> {\n    fn verify_height_proof(tree: &ProofTree) -> Result<usize> {\n        let mut height = 1;\n        let mut cursor = tree;\n        while let Some(child) = cursor.child(true) {\n            if let Node::Hash(_) = child.tree.node {\n                return Err(Error::UnexpectedNode(\n                    \"Expected height proof to only contain KV and KVHash\n        nodes\"\n                        .into(),\n                ));\n            }\n            height += 1;\n            cursor = &child.tree;\n        }\n        Ok(height)\n    }\n\n    fn verify_completeness(tree: &ProofTree, remaining_depth: usize, leftmost: bool) -> Result<()> {\n        let recurse = |left, leftmost| {\n            if let Some(child) = tree.child(left) {\n                verify_completeness(&child.tree, remaining_depth - 1, left && leftmost)?;\n            }\n            Ok(())\n        };\n\n        if remaining_depth > 0 {\n            match tree.node {\n                Node::KV(_, _) => {}\n                _ => {\n                    return Err(Error::UnexpectedNode(\n                        \"Expected trunk inner nodes to contain keys and values\".into(),\n                    ));\n                }\n            }\n            recurse(true, leftmost)?;\n            recurse(false, false)\n        } else if !leftmost {\n            match tree.node {\n                Node::Hash(_) => Ok(()),\n                _ => Err(Error::UnexpectedNode(\n                    \"Expected trunk leaves to contain Hash nodes\".into(),\n                )),\n            }\n        } else {\n            match &tree.node {\n                Node::KVHash(_) => Ok(()),\n                _ => Err(Error::UnexpectedNode(\n                    \"Expected leftmost trunk leaf to contain KVHash node\".into(),\n                )),\n            }\n        }\n    }\n\n    let mut kv_only = true;\n    let tree = execute(ops, false, |node| {\n        kv_only &= matches!(node, Node::KV(_, _));\n        Ok(())\n    })?;\n\n    let height = verify_height_proof(&tree)?;\n    if height > 64 {\n        // This is a sanity check to prevent stack overflows in `verify_completeness`,\n        // but any tree above 64 is probably an error (~3.7e19 nodes).\n        return Err(Error::Tree(\"Tree is too large\".into()));\n    }\n    let trunk_height = height / 2;\n\n    if trunk_height < MIN_TRUNK_HEIGHT {\n        if !kv_only {\n            return Err(Error::Tree(\"Leaf chunks must contain full subtree\".into()));\n        }\n    } else {\n        verify_completeness(&tree, trunk_height, true)?;\n    }\n\n    Ok((tree, height))\n}\n\n#[cfg(test)]\nmod tests {\n    use super::super::tree::Tree;\n    use super::*;\n    use crate::test_utils::*;\n    use crate::tree::{NoopCommit, PanicSource, Tree as BaseTree};\n    use ed::Encode;\n\n    #[derive(Default)]\n    struct NodeCounts {\n        hash: usize,\n        kvhash: usize,\n        kv: usize,\n    }\n\n    fn count_node_types(tree: Tree) -> NodeCounts {\n        let mut counts = NodeCounts::default();\n\n        tree.visit_nodes(&mut |node| {\n            match node {\n                Node::Hash(_) => counts.hash += 1,\n                Node::KVHash(_) => counts.kvhash += 1,\n                Node::KV(_, _) => counts.kv += 1,\n            };\n        });\n\n        counts\n    }\n\n    #[test]\n    fn small_trunk_roundtrip() {\n        let mut tree = make_tree_seq(31);\n        let mut walker = RefWalker::new(&mut tree, PanicSource {});\n\n        let (proof, has_more) = walker.create_trunk_proof().unwrap();\n        assert!(!has_more);\n\n        println!(\"{:?}\", &proof);\n        let (trunk, _) = verify_trunk(proof.into_iter().map(Ok)).unwrap();\n\n        let counts = count_node_types(trunk);\n        assert_eq!(counts.hash, 0);\n        assert_eq!(counts.kv, 32);\n        assert_eq!(counts.kvhash, 0);\n    }\n\n    #[test]\n    fn big_trunk_roundtrip() {\n        let mut tree = make_tree_seq(2u64.pow(MIN_TRUNK_HEIGHT as u32 * 2 + 1) - 1);\n        let mut walker = RefWalker::new(&mut tree, PanicSource {});\n\n        let (proof, has_more) = walker.create_trunk_proof().unwrap();\n        assert!(has_more);\n        let (trunk, _) = verify_trunk(proof.into_iter().map(Ok)).unwrap();\n\n        let counts = count_node_types(trunk);\n        // are these formulas correct for all values of `MIN_TRUNK_HEIGHT`? 🤔\n        assert_eq!(\n            counts.hash,\n            2usize.pow(MIN_TRUNK_HEIGHT as u32) + MIN_TRUNK_HEIGHT - 1\n        );\n        assert_eq!(counts.kv, 2usize.pow(MIN_TRUNK_HEIGHT as u32) - 1);\n        assert_eq!(counts.kvhash, MIN_TRUNK_HEIGHT + 1);\n    }\n\n    #[test]\n    fn one_node_tree_trunk_roundtrip() -> Result<()> {\n        let mut tree = BaseTree::new(vec![0], vec![])?;\n        tree.commit(&mut NoopCommit {}).unwrap();\n\n        let mut walker = RefWalker::new(&mut tree, PanicSource {});\n        let (proof, has_more) = walker.create_trunk_proof().unwrap();\n        assert!(!has_more);\n\n        let (trunk, _) = verify_trunk(proof.into_iter().map(Ok)).unwrap();\n        let counts = count_node_types(trunk);\n        assert_eq!(counts.hash, 0);\n        assert_eq!(counts.kv, 1);\n        assert_eq!(counts.kvhash, 0);\n        Ok(())\n    }\n\n    #[test]\n    fn two_node_right_heavy_tree_trunk_roundtrip() -> Result<()> {\n        // 0\n        //  \\\n        //   1\n        let mut tree =\n            BaseTree::new(vec![0], vec![])?.attach(false, Some(BaseTree::new(vec![1], vec![])?));\n        tree.commit(&mut NoopCommit {}).unwrap();\n        let mut walker = RefWalker::new(&mut tree, PanicSource {});\n        let (proof, has_more) = walker.create_trunk_proof().unwrap();\n        assert!(!has_more);\n\n        let (trunk, _) = verify_trunk(proof.into_iter().map(Ok)).unwrap();\n        let counts = count_node_types(trunk);\n        assert_eq!(counts.hash, 0);\n        assert_eq!(counts.kv, 2);\n        assert_eq!(counts.kvhash, 0);\n        Ok(())\n    }\n\n    #[test]\n    fn two_node_left_heavy_tree_trunk_roundtrip() -> Result<()> {\n        //   1\n        //  /\n        // 0\n        let mut tree =\n            BaseTree::new(vec![1], vec![])?.attach(true, Some(BaseTree::new(vec![0], vec![])?));\n        tree.commit(&mut NoopCommit {}).unwrap();\n        let mut walker = RefWalker::new(&mut tree, PanicSource {});\n        let (proof, has_more) = walker.create_trunk_proof().unwrap();\n        assert!(!has_more);\n\n        let (trunk, _) = verify_trunk(proof.into_iter().map(Ok)).unwrap();\n        let counts = count_node_types(trunk);\n        assert_eq!(counts.hash, 0);\n        assert_eq!(counts.kv, 2);\n        assert_eq!(counts.kvhash, 0);\n        Ok(())\n    }\n\n    #[test]\n    fn three_node_tree_trunk_roundtrip() -> Result<()> {\n        //   1\n        //  / \\\n        // 0   2\n        let mut tree = BaseTree::new(vec![1], vec![])?\n            .attach(true, Some(BaseTree::new(vec![0], vec![])?))\n            .attach(false, Some(BaseTree::new(vec![2], vec![])?));\n        tree.commit(&mut NoopCommit {}).unwrap();\n\n        let mut walker = RefWalker::new(&mut tree, PanicSource {});\n        let (proof, has_more) = walker.create_trunk_proof().unwrap();\n        assert!(!has_more);\n\n        let (trunk, _) = verify_trunk(proof.into_iter().map(Ok)).unwrap();\n        let counts = count_node_types(trunk);\n        assert_eq!(counts.hash, 0);\n        assert_eq!(counts.kv, 3);\n        assert_eq!(counts.kvhash, 0);\n        Ok(())\n    }\n\n    #[test]\n    fn leaf_chunk_roundtrip() {\n        let mut merk = TempMerk::new().unwrap();\n        let batch = make_batch_seq(0..31);\n        merk.apply(batch.as_slice(), &[]).unwrap();\n\n        let root_node = merk.tree.read().unwrap();\n        let root_key = root_node.as_ref().unwrap().key().to_vec();\n\n        // whole tree as 1 leaf\n        let mut iter = merk.db.raw_iterator();\n        iter.seek_to_first();\n        let chunk = get_next_chunk(&mut iter, None).unwrap();\n        let ops = chunk.into_iter().map(Ok);\n        let chunk = verify_leaf(ops, merk.root_hash()).unwrap();\n        let counts = count_node_types(chunk);\n        assert_eq!(counts.kv, 31);\n        assert_eq!(counts.hash, 0);\n        assert_eq!(counts.kvhash, 0);\n        drop(iter);\n\n        let mut iter = merk.db.raw_iterator();\n        iter.seek_to_first();\n\n        // left leaf\n        let chunk = get_next_chunk(&mut iter, Some(root_key.as_slice())).unwrap();\n        let ops = chunk.into_iter().map(Ok);\n        let chunk = verify_leaf(\n            ops,\n            [\n                222, 93, 128, 149, 117, 136, 34, 175, 204, 82, 228, 113, 242, 144, 152, 190, 210,\n                27, 195, 34, 24, 196, 210, 99, 250, 119, 219, 114, 52, 167, 191, 249,\n            ],\n        )\n        .unwrap();\n        let counts = count_node_types(chunk);\n        assert_eq!(counts.kv, 15);\n        assert_eq!(counts.hash, 0);\n        assert_eq!(counts.kvhash, 0);\n\n        // right leaf\n        let chunk = get_next_chunk(&mut iter, None).unwrap();\n        let ops = chunk.into_iter().map(Ok);\n        let chunk = verify_leaf(\n            ops,\n            [\n                128, 158, 92, 80, 118, 253, 48, 241, 74, 154, 213, 187, 92, 243, 154, 28, 164, 235,\n                156, 122, 174, 226, 84, 170, 233, 166, 27, 79, 100, 10, 88, 184,\n            ],\n        )\n        .unwrap();\n        let counts = count_node_types(chunk);\n        assert_eq!(counts.kv, 15);\n        assert_eq!(counts.hash, 0);\n        assert_eq!(counts.kvhash, 0);\n    }\n\n    #[test]\n    #[should_panic(expected = \"Tree is too large\")]\n    fn test_verify_height_stack_overflow() {\n        let height = 5_000u32;\n        let push_op = |i: u32| Op::Push(Node::KV(i.to_be_bytes().to_vec(), vec![]));\n        let mut ops = Vec::with_capacity((height * 2) as usize);\n        ops.push(push_op(0));\n        for i in 1..height {\n            ops.push(push_op(i));\n            ops.push(Op::Parent)\n        }\n        assert!(ops.encoding_length().unwrap() < 50_000);\n        println!(\"Len: {}\", ops.encoding_length().unwrap());\n        let (_, result_height) = verify_trunk(ops.into_iter().map(Ok)).unwrap();\n        assert_eq!(height, result_height as u32);\n    }\n}\n"
  },
  {
    "path": "src/proofs/encoding.rs",
    "content": "use std::io::{Read, Write};\n\nuse ed::{Decode, Encode, Terminated};\n\nuse super::{Node, Op};\nuse crate::error::Result;\nuse crate::tree::HASH_LENGTH;\n\nimpl Encode for Op {\n    fn encode_into<W: Write>(&self, dest: &mut W) -> ed::Result<()> {\n        match self {\n            Op::Push(Node::Hash(hash)) => {\n                dest.write_all(&[0x01])?;\n                dest.write_all(hash)?;\n            }\n            Op::Push(Node::KVHash(kv_hash)) => {\n                dest.write_all(&[0x02])?;\n                dest.write_all(kv_hash)?;\n            }\n            Op::Push(Node::KV(key, value)) => {\n                debug_assert!(key.len() < 65536);\n                debug_assert!(value.len() < 65536);\n                dest.write_all(&[0x03])?;\n                (key.len() as u16).encode_into(dest)?;\n                dest.write_all(key)?;\n                (value.len() as u16).encode_into(dest)?;\n                dest.write_all(value)?;\n            }\n            Op::Parent => dest.write_all(&[0x10])?,\n            Op::Child => dest.write_all(&[0x11])?,\n        };\n        Ok(())\n    }\n\n    fn encoding_length(&self) -> ed::Result<usize> {\n        Ok(match self {\n            Op::Push(Node::Hash(_)) => 1 + HASH_LENGTH,\n            Op::Push(Node::KVHash(_)) => 1 + HASH_LENGTH,\n            Op::Push(Node::KV(key, value)) => 5 + key.len() + value.len(),\n            Op::Parent => 1,\n            Op::Child => 1,\n        })\n    }\n}\n\nimpl Decode for Op {\n    fn decode<R: Read>(mut input: R) -> ed::Result<Self> {\n        let variant: u8 = Decode::decode(&mut input)?;\n\n        Ok(match variant {\n            0x01 => {\n                let mut hash = [0; HASH_LENGTH];\n                input.read_exact(&mut hash)?;\n                Op::Push(Node::Hash(hash))\n            }\n            0x02 => {\n                let mut hash = [0; HASH_LENGTH];\n                input.read_exact(&mut hash)?;\n                Op::Push(Node::KVHash(hash))\n            }\n            0x03 => {\n                let key_len: u16 = Decode::decode(&mut input)?;\n                let mut key = vec![0; key_len as usize];\n                input.read_exact(key.as_mut_slice())?;\n\n                let value_len: u16 = Decode::decode(&mut input)?;\n                let mut value = vec![0; value_len as usize];\n                input.read_exact(value.as_mut_slice())?;\n\n                Op::Push(Node::KV(key, value))\n            }\n            0x10 => Op::Parent,\n            0x11 => Op::Child,\n            byte => {\n                return Err(ed::Error::UnexpectedByte(byte));\n            }\n        })\n    }\n}\n\nimpl Terminated for Op {}\n\nimpl Op {\n    fn encode_into<W: Write>(&self, dest: &mut W) -> Result<()> {\n        Ok(Encode::encode_into(self, dest)?)\n    }\n\n    fn encoding_length(&self) -> usize {\n        Encode::encoding_length(self).unwrap()\n    }\n\n    pub fn decode(bytes: &[u8]) -> Result<Self> {\n        Ok(Decode::decode(bytes)?)\n    }\n}\n\npub fn encode_into<'a, T: Iterator<Item = &'a Op>>(ops: T, output: &mut Vec<u8>) {\n    for op in ops {\n        op.encode_into(output).unwrap();\n    }\n}\n\npub struct Decoder<'a> {\n    offset: usize,\n    bytes: &'a [u8],\n}\n\nimpl<'a> Decoder<'a> {\n    pub fn new(proof_bytes: &'a [u8]) -> Self {\n        Decoder {\n            offset: 0,\n            bytes: proof_bytes,\n        }\n    }\n}\n\nimpl<'a> Iterator for Decoder<'a> {\n    type Item = Result<Op>;\n\n    fn next(&mut self) -> Option<Self::Item> {\n        if self.offset >= self.bytes.len() {\n            return None;\n        }\n\n        Some((|| {\n            let bytes = &self.bytes[self.offset..];\n            let op = Op::decode(bytes)?;\n            self.offset += op.encoding_length();\n            Ok(op)\n        })())\n    }\n}\n\n#[cfg(test)]\nmod test {\n    use super::super::{Node, Op};\n    use crate::tree::HASH_LENGTH;\n\n    #[test]\n    fn encode_push_hash() {\n        let op = Op::Push(Node::Hash([123; HASH_LENGTH]));\n        assert_eq!(op.encoding_length(), 1 + HASH_LENGTH);\n\n        let mut bytes = vec![];\n        op.encode_into(&mut bytes).unwrap();\n        assert_eq!(\n            bytes,\n            vec![\n                0x01, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123\n            ]\n        );\n    }\n\n    #[test]\n    fn encode_push_kvhash() {\n        let op = Op::Push(Node::KVHash([123; HASH_LENGTH]));\n        assert_eq!(op.encoding_length(), 1 + HASH_LENGTH);\n\n        let mut bytes = vec![];\n        op.encode_into(&mut bytes).unwrap();\n        assert_eq!(\n            bytes,\n            vec![\n                0x02, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n                123\n            ]\n        );\n    }\n\n    #[test]\n    fn encode_push_kv() {\n        let op = Op::Push(Node::KV(vec![1, 2, 3], vec![4, 5, 6]));\n        assert_eq!(op.encoding_length(), 11);\n\n        let mut bytes = vec![];\n        op.encode_into(&mut bytes).unwrap();\n        assert_eq!(bytes, vec![0x03, 0, 3, 1, 2, 3, 0, 3, 4, 5, 6]);\n    }\n\n    #[test]\n    fn encode_parent() {\n        let op = Op::Parent;\n        assert_eq!(op.encoding_length(), 1);\n\n        let mut bytes = vec![];\n        op.encode_into(&mut bytes).unwrap();\n        assert_eq!(bytes, vec![0x10]);\n    }\n\n    #[test]\n    fn encode_child() {\n        let op = Op::Child;\n        assert_eq!(op.encoding_length(), 1);\n\n        let mut bytes = vec![];\n        op.encode_into(&mut bytes).unwrap();\n        assert_eq!(bytes, vec![0x11]);\n    }\n\n    #[test]\n    #[should_panic]\n    fn encode_push_kv_long_key() {\n        let op = Op::Push(Node::KV(vec![123; 70_000], vec![4, 5, 6]));\n        let mut bytes = vec![];\n        op.encode_into(&mut bytes).unwrap();\n    }\n\n    #[test]\n    fn decode_push_hash() {\n        let bytes = [\n            0x01, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n            123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n        ];\n        let op = Op::decode(&bytes[..]).expect(\"decode failed\");\n        assert_eq!(op, Op::Push(Node::Hash([123; HASH_LENGTH])));\n    }\n\n    #[test]\n    fn decode_push_kvhash() {\n        let bytes = [\n            0x02, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n            123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123,\n        ];\n        let op = Op::decode(&bytes[..]).expect(\"decode failed\");\n        assert_eq!(op, Op::Push(Node::KVHash([123; HASH_LENGTH])));\n    }\n\n    #[test]\n    fn decode_push_kv() {\n        let bytes = [0x03, 0, 3, 1, 2, 3, 0, 3, 4, 5, 6];\n        let op = Op::decode(&bytes[..]).expect(\"decode failed\");\n        assert_eq!(op, Op::Push(Node::KV(vec![1, 2, 3], vec![4, 5, 6])));\n    }\n\n    #[test]\n    fn decode_parent() {\n        let bytes = [0x10];\n        let op = Op::decode(&bytes[..]).expect(\"decode failed\");\n        assert_eq!(op, Op::Parent);\n    }\n\n    #[test]\n    fn decode_child() {\n        let bytes = [0x11];\n        let op = Op::decode(&bytes[..]).expect(\"decode failed\");\n        assert_eq!(op, Op::Child);\n    }\n\n    #[test]\n    fn decode_unknown() {\n        let bytes = [0x88];\n        assert!(Op::decode(&bytes[..]).is_err());\n    }\n}\n"
  },
  {
    "path": "src/proofs/mod.rs",
    "content": "pub mod chunk;\npub mod encoding;\npub mod query;\npub mod tree;\n\nuse crate::tree::Hash;\n\npub use encoding::{encode_into, Decoder};\npub use query::Query;\npub use tree::Tree;\n\n/// A proof operator, executed to verify the data in a Merkle proof.\n#[derive(Debug, PartialEq)]\npub enum Op {\n    /// Pushes a node on the stack.\n    Push(Node),\n\n    /// Pops the top stack item as `parent`. Pops the next top stack item as\n    /// `child`. Attaches `child` as the left child of `parent`. Pushes the\n    /// updated `parent` back on the stack.\n    Parent,\n\n    /// Pops the top stack item as `child`. Pops the next top stack item as\n    /// `parent`. Attaches `child` as the right child of `parent`. Pushes the\n    /// updated `parent` back on the stack.\n    Child,\n}\n\n/// A selected piece of data about a single tree node, to be contained in a\n/// `Push` operator in a proof.\n#[derive(Clone, Debug, PartialEq)]\npub enum Node {\n    /// Represents the hash of a tree node.\n    Hash(Hash),\n\n    /// Represents the hash of the key/value pair of a tree node.\n    KVHash(Hash),\n\n    /// Represents the key and value of a tree node.\n    KV(Vec<u8>, Vec<u8>),\n}\n"
  },
  {
    "path": "src/proofs/query/map.rs",
    "content": "use super::super::Node;\nuse crate::{Error, Result};\nuse std::collections::btree_map;\nuse std::collections::BTreeMap;\nuse std::iter::Peekable;\nuse std::ops::{Bound, RangeBounds};\n\n/// `MapBuilder` allows a consumer to construct a `Map` by inserting the nodes\n/// contained in a proof, in key-order.\npub(crate) struct MapBuilder(Map);\n\nimpl MapBuilder {\n    /// Creates a new `MapBuilder` with an empty internal `Map`.\n    pub fn new() -> Self {\n        MapBuilder(Map {\n            entries: Default::default(),\n            right_edge: true,\n        })\n    }\n\n    /// Adds the node's data to the uncerlying `Map` (if node is type `KV`), or\n    /// makes a note of non-contiguous data (if node is type `KVHash` or\n    /// `Hash`).\n    pub fn insert(&mut self, node: &Node) -> Result<()> {\n        match node {\n            Node::KV(key, value) => {\n                if let Some((prev_key, _)) = self.0.entries.last_key_value() {\n                    if key <= prev_key {\n                        return Err(Error::Key(\n                            \"Expected nodes to be in increasing key order\".into(),\n                        ));\n                    }\n                }\n\n                let value = (self.0.right_edge, value.clone());\n                self.0.entries.insert(key.clone(), value);\n                self.0.right_edge = true;\n            }\n            _ => self.0.right_edge = false,\n        }\n\n        Ok(())\n    }\n\n    /// Consumes the `MapBuilder` and returns its internal `Map`.\n    pub fn build(self) -> Map {\n        self.0\n    }\n}\n\n/// `Map` stores data extracted from a proof.\n///\n/// The data (which has already been verified against a known root hash) can be\n/// accessed by a consumer by looking up individual keys using the `get` method,\n/// or iterating over ranges using the `range` method.\n#[derive(Clone, Debug)]\npub struct Map {\n    entries: BTreeMap<Vec<u8>, (bool, Vec<u8>)>,\n    right_edge: bool,\n}\n\nimpl Map {\n    /// Gets the value for a single key, or `None` if the key was proven to not\n    /// exist in the tree. If the proof does not include the data and also does\n    /// not prove that the key is absent in the tree (meaning the proof is not\n    /// valid), an error will be returned.\n    pub fn get<'a>(&'a self, key: &'a [u8]) -> Result<Option<&'a [u8]>> {\n        // if key is in proof just get from entries\n        if let Some((_, value)) = self.entries.get(key) {\n            return Ok(Some(value.as_slice()));\n        }\n\n        // otherwise, use range which only includes exact key match to check\n        // absence proof\n        let entry = self\n            .range((Bound::Included(key), Bound::Included(key)))\n            .next()\n            .transpose()?\n            .map(|(_, value)| value);\n        Ok(entry)\n    }\n\n    /// Returns an iterator over all (key, value) entries in the requested range\n    /// of keys. If during iteration we encounter a gap in the data (e.g. the\n    /// proof did not include all nodes within the range), the iterator will\n    /// yield an error.\n    pub fn range<'a>(&self, bounds: impl RangeBounds<&'a [u8]>) -> Range {\n        let start_bound = bound_to_inner(bounds.start_bound());\n        let end_bound = bound_to_inner(bounds.end_bound());\n        let outer_bounds = (\n            start_bound.map_or(Bound::Unbounded, |k| {\n                self.entries\n                    .range(..=k.to_vec())\n                    .next_back()\n                    .map_or(Bound::Unbounded, |prev| Bound::Included(prev.0.clone()))\n            }),\n            end_bound.map_or(Bound::Unbounded, |k| {\n                self.entries\n                    .range(k.to_vec()..)\n                    .next()\n                    .map_or(Bound::Unbounded, |next| Bound::Included(next.0.clone()))\n            }),\n        );\n\n        Range {\n            map: self,\n            bounds: bounds_to_vec(bounds),\n            done: false,\n            iter: self.entries.range(outer_bounds).peekable(),\n        }\n    }\n\n    /// Joins two `Map`s together, combining the data in both.\n    ///\n    /// If the maps contain contiguous iteration ranges, the contiguous ranges\n    /// will be joined. If the maps have differing values for the same key, this\n    /// will panic (this should never happen if the queries came from the same\n    /// root and the proofs were verified).\n    pub fn join(self, other: Map) -> Map {\n        // TODO: join at the partial tree level, joining with only Map data means\n        // data from different joins which happen to be contiguous (without explicitly\n        // querying based on next/prev) will be marked as non-contiguous\n        let mut entries = self.entries.clone();\n        entries.extend(other.entries);\n        for (key, (contiguous, val)) in entries.iter_mut() {\n            if let Some(shadowed) = self.entries.get(key) {\n                assert_eq!(val, &shadowed.1, \"Maps have different values\",);\n                *contiguous = *contiguous || shadowed.0;\n            }\n        }\n\n        Map {\n            entries,\n            right_edge: self.right_edge || other.right_edge,\n        }\n    }\n\n    /// Returns `true` if the [Map] can verify that there is no unproven data\n    /// between `key` and the node to its right (or the global tree edge).\n    ///\n    /// For example, if the underlying tree contains the key `[a, b, c, d]` and\n    /// the map contains the keys `[a, b, d]`, then `contiguous_right(a)` will\n    /// return `true`, `contiguous_right(b)` and `contiguous_right(c)` will\n    /// return `false`, and `contiguous_right(d)` will return `true`.\n    fn contiguous_right(&self, key: &[u8]) -> bool {\n        self.entries\n            .range((Bound::Excluded(key.to_vec()), Bound::Unbounded))\n            .next()\n            .map_or(self.right_edge, |(_, (contiguous, _))| *contiguous)\n    }\n}\n\n/// Returns `None` for `Bound::Unbounded`, or the inner key value for\n/// `Bound::Included` and `Bound::Excluded`.\nfn bound_to_inner<T>(bound: Bound<T>) -> Option<T> {\n    match bound {\n        Bound::Unbounded => None,\n        Bound::Included(key) | Bound::Excluded(key) => Some(key),\n    }\n}\n\n/// Converts the inner key value of a `Bound` from a byte slice to a `Vec<u8>`.\nfn bound_to_vec(bound: Bound<&&[u8]>) -> Bound<Vec<u8>> {\n    match bound {\n        Bound::Unbounded => Bound::Unbounded,\n        Bound::Excluded(k) => Bound::Excluded(k.to_vec()),\n        Bound::Included(k) => Bound::Included(k.to_vec()),\n    }\n}\n\n/// Converts the inner key values of a [RangeBounds] from byte slices to\n/// `Vec<u8>`.\nfn bounds_to_vec<'a, R: RangeBounds<&'a [u8]>>(bounds: R) -> (Bound<Vec<u8>>, Bound<Vec<u8>>) {\n    (\n        bound_to_vec(bounds.start_bound()),\n        bound_to_vec(bounds.end_bound()),\n    )\n}\n\n/// An iterator over (key, value) entries as extracted from a verified proof.\n///\n/// If during iteration we encounter a gap in the data (e.g. the proof did not\n/// include all nodes within the range), the iterator will yield an error.\npub struct Range<'a> {\n    map: &'a Map,\n    bounds: (Bound<Vec<u8>>, Bound<Vec<u8>>),\n    done: bool,\n    iter: Peekable<InnerRange<'a>>,\n}\n\ntype InnerRange<'a> = btree_map::Range<'a, Vec<u8>, (bool, Vec<u8>)>;\n\nimpl<'a> Range<'a> {\n    fn yield_entry_if_contiguous(\n        &mut self,\n        entry: (&'a Vec<u8>, &'a (bool, Vec<u8>)),\n        contiguous: bool,\n        forward: bool,\n    ) -> Option<Result<(&'a [u8], &'a [u8])>> {\n        if !contiguous {\n            self.done = true;\n            return Some(Err(Error::MissingData));\n        }\n\n        self.yield_entry(entry, forward)\n    }\n\n    fn yield_entry(\n        &mut self,\n        entry: (&'a Vec<u8>, &'a (bool, Vec<u8>)),\n        forward: bool,\n    ) -> Option<Result<(&'a [u8], &'a [u8])>> {\n        let (key, (_, value)) = entry;\n        if forward {\n            self.bounds.0 = Bound::Excluded(key.clone());\n        } else {\n            self.bounds.1 = Bound::Excluded(key.clone());\n        }\n        Some(Ok((key.as_slice(), value.as_slice())))\n    }\n\n    fn yield_none_if_contiguous(\n        &mut self,\n        contiguous: bool,\n    ) -> Option<Result<(&'a [u8], &'a [u8])>> {\n        self.done = true;\n\n        if !contiguous {\n            return Some(Err(Error::MissingData));\n        }\n\n        None\n    }\n\n    fn yield_next_if_contiguous(&mut self) -> Option<Result<(&'a [u8], &'a [u8])>> {\n        if let Some((_, (contiguous, _))) = self.iter.peek() {\n            if !contiguous {\n                self.done = true;\n                return Some(Err(Error::MissingData));\n            }\n        }\n\n        self.next()\n    }\n\n    fn yield_next_back_if_contiguous(\n        &mut self,\n        contiguous: bool,\n    ) -> Option<Result<(&'a [u8], &'a [u8])>> {\n        if !contiguous {\n            self.done = true;\n            return Some(Err(Error::MissingData));\n        }\n\n        self.next_back()\n    }\n}\n\nimpl<'a> Iterator for Range<'a> {\n    type Item = Result<(&'a [u8], &'a [u8])>;\n\n    fn next(&mut self) -> Option<Self::Item> {\n        if self.done {\n            return None;\n        }\n\n        let entry = match self.iter.next() {\n            None => return self.yield_none_if_contiguous(self.map.right_edge),\n            Some(entry) => entry,\n        };\n        let (key, (contiguous, _)) = entry;\n\n        let past_start = match bound_to_inner(self.bounds.0.clone()) {\n            None => true,\n            Some(ref start_bound) => key > start_bound,\n        };\n        let at_start = match self.bounds.0 {\n            Bound::Unbounded => true,\n            Bound::Included(ref start_bound) => key == start_bound,\n            Bound::Excluded(_) => false,\n        };\n        let past_end = match self.bounds.1 {\n            Bound::Unbounded => false,\n            Bound::Included(ref end_bound) => key > end_bound,\n            Bound::Excluded(ref end_bound) => key >= end_bound,\n        };\n\n        if past_end {\n            self.yield_none_if_contiguous(*contiguous)\n        } else if past_start {\n            self.yield_entry_if_contiguous(entry, *contiguous, true)\n        } else if at_start {\n            self.yield_entry(entry, true)\n        } else {\n            self.yield_next_if_contiguous()\n        }\n    }\n}\n\nimpl<'a> DoubleEndedIterator for Range<'a> {\n    fn next_back(&mut self) -> Option<Self::Item> {\n        if self.done {\n            return None;\n        }\n\n        let entry = match self.iter.next_back() {\n            None => return self.yield_none_if_contiguous(self.map.contiguous_right(&[])),\n            Some(entry) => entry,\n        };\n        let (key, (contiguous_l, _)) = entry;\n        let contiguous_r = self.map.contiguous_right(key);\n\n        let past_end = match bound_to_inner(self.bounds.1.clone()) {\n            None => true,\n            Some(ref end_bound) => key < end_bound,\n        };\n        let at_end = match self.bounds.1 {\n            Bound::Unbounded => true,\n            Bound::Included(ref end_bound) => key == end_bound,\n            Bound::Excluded(_) => false,\n        };\n        let past_start = match self.bounds.0 {\n            Bound::Unbounded => false,\n            Bound::Included(ref start_bound) => key < start_bound,\n            Bound::Excluded(ref start_bound) => key <= start_bound,\n        };\n\n        if past_start {\n            self.yield_none_if_contiguous(contiguous_r)\n        } else if past_end {\n            self.yield_entry_if_contiguous(entry, contiguous_r, false)\n        } else if at_end {\n            self.yield_entry(entry, false)\n        } else {\n            self.yield_next_back_if_contiguous(*contiguous_l)\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::HASH_LENGTH;\n\n    #[test]\n    #[should_panic(expected = \"Expected nodes to be in increasing key order\")]\n    fn mapbuilder_insert_out_of_order() {\n        let mut builder = MapBuilder::new();\n        builder.insert(&Node::KV(vec![1, 2, 3], vec![])).unwrap();\n        builder.insert(&Node::KV(vec![1, 2, 2], vec![])).unwrap();\n    }\n\n    #[test]\n    #[should_panic(expected = \"Expected nodes to be in increasing key order\")]\n    fn mapbuilder_insert_dupe() {\n        let mut builder = MapBuilder::new();\n        builder.insert(&Node::KV(vec![1, 2, 3], vec![])).unwrap();\n        builder.insert(&Node::KV(vec![1, 2, 3], vec![])).unwrap();\n    }\n\n    #[test]\n    fn mapbuilder_insert_including_edge() {\n        let mut builder = MapBuilder::new();\n        builder.insert(&Node::Hash([0; HASH_LENGTH])).unwrap();\n        builder.insert(&Node::KV(vec![1, 2, 4], vec![])).unwrap();\n\n        assert!(builder.0.right_edge);\n    }\n\n    #[test]\n    fn mapbuilder_insert_abridged_edge() {\n        let mut builder = MapBuilder::new();\n        builder.insert(&Node::KV(vec![1, 2, 3], vec![])).unwrap();\n        builder.insert(&Node::Hash([0; HASH_LENGTH])).unwrap();\n\n        assert!(!builder.0.right_edge);\n    }\n\n    #[test]\n    fn mapbuilder_build() {\n        let mut builder = MapBuilder::new();\n        builder.insert(&Node::KV(vec![1, 2, 3], vec![1])).unwrap();\n        builder.insert(&Node::Hash([0; HASH_LENGTH])).unwrap();\n        builder.insert(&Node::KV(vec![1, 2, 4], vec![2])).unwrap();\n\n        let map = builder.build();\n        let mut entries = map.entries.iter();\n        assert_eq!(entries.next(), Some((&vec![1, 2, 3], &(true, vec![1]))));\n        assert_eq!(entries.next(), Some((&vec![1, 2, 4], &(false, vec![2]))));\n        assert_eq!(entries.next(), None);\n        assert!(map.right_edge);\n    }\n\n    #[test]\n    fn map_get_included() {\n        let mut builder = MapBuilder::new();\n        builder.insert(&Node::KV(vec![1, 2, 3], vec![1])).unwrap();\n        builder.insert(&Node::Hash([0; HASH_LENGTH])).unwrap();\n        builder.insert(&Node::KV(vec![1, 2, 4], vec![2])).unwrap();\n\n        let map = builder.build();\n        assert_eq!(map.get(&[1, 2, 3]).unwrap().unwrap(), vec![1],);\n        assert_eq!(map.get(&[1, 2, 4]).unwrap().unwrap(), vec![2],);\n    }\n\n    #[test]\n    #[should_panic(expected = \"MissingData\")]\n    fn map_get_missing_absence_proof() {\n        let mut builder = MapBuilder::new();\n        builder.insert(&Node::KV(vec![1, 2, 3], vec![1])).unwrap();\n        builder.insert(&Node::Hash([0; HASH_LENGTH])).unwrap();\n        builder.insert(&Node::KV(vec![1, 2, 4], vec![2])).unwrap();\n\n        let map = builder.build();\n        map.get(&[1, 2, 3, 4]).unwrap();\n    }\n\n    #[test]\n    fn map_get_valid_absence_proof() {\n        let mut builder = MapBuilder::new();\n        builder.insert(&Node::KV(vec![1, 2, 3], vec![1])).unwrap();\n        builder.insert(&Node::KV(vec![1, 2, 4], vec![2])).unwrap();\n\n        let map = builder.build();\n        assert!(map.get(&[1, 2, 3, 4]).unwrap().is_none());\n    }\n\n    #[test]\n    #[should_panic(expected = \"MissingData\")]\n    fn range_abridged() {\n        let mut builder = MapBuilder::new();\n        builder.insert(&Node::KV(vec![1, 2, 3], vec![1])).unwrap();\n        builder.insert(&Node::Hash([0; HASH_LENGTH])).unwrap();\n        builder.insert(&Node::KV(vec![1, 2, 4], vec![2])).unwrap();\n\n        let map = builder.build();\n        let mut range = map.range(&[1u8, 2, 3][..]..&[1u8, 2, 4][..]);\n        assert_eq!(range.next().unwrap().unwrap(), (&[1, 2, 3][..], &[1][..]));\n        range.next().unwrap().unwrap();\n    }\n\n    #[test]\n    fn range_ok() {\n        let mut builder = MapBuilder::new();\n        builder.insert(&Node::KV(vec![1, 2, 3], vec![1])).unwrap();\n        builder.insert(&Node::KV(vec![1, 2, 4], vec![2])).unwrap();\n        builder.insert(&Node::KV(vec![1, 2, 5], vec![3])).unwrap();\n\n        let map = builder.build();\n        let mut range = map.range(&[1u8, 2, 3][..]..&[1u8, 2, 5][..]);\n        assert_eq!(range.next().unwrap().unwrap(), (&[1, 2, 3][..], &[1][..]));\n        assert_eq!(range.next().unwrap().unwrap(), (&[1, 2, 4][..], &[2][..]));\n        assert!(range.next().is_none());\n        assert!(range.next_back().is_none());\n        assert!(range.next().is_none());\n    }\n\n    #[test]\n    fn range_empty() {\n        let map = MapBuilder::new().build();\n        let mut range = map.range(..);\n        assert!(range.next().is_none());\n        assert!(range.next_back().is_none());\n    }\n\n    #[test]\n    #[should_panic(expected = \"MissingData\")]\n    fn range_lower_unbounded_map_non_contiguous() {\n        let mut builder = MapBuilder::new();\n        builder.insert(&Node::KV(vec![1, 2, 3], vec![1])).unwrap();\n        builder.insert(&Node::Hash([1; HASH_LENGTH])).unwrap();\n        builder.insert(&Node::KV(vec![1, 2, 4], vec![1])).unwrap();\n\n        let map = builder.build();\n\n        let mut range = map.range(..&[1u8, 2, 5][..]);\n        range.next().unwrap().unwrap();\n        range.next().unwrap().unwrap();\n    }\n\n    #[test]\n    fn range_reach_proof_end() {\n        let mut builder = MapBuilder::new();\n        builder.insert(&Node::KV(vec![1, 2, 3], vec![1])).unwrap();\n        builder.insert(&Node::KV(vec![1, 2, 4], vec![2])).unwrap();\n\n        let map = builder.build();\n        let mut range = map.range(&[1u8, 2, 3][..]..);\n        assert_eq!(range.next().unwrap().unwrap(), (&[1, 2, 3][..], &[1][..]));\n        assert_eq!(range.next().unwrap().unwrap(), (&[1, 2, 4][..], &[2][..]));\n        assert!(range.next().is_none());\n    }\n\n    #[test]\n    fn range_unbounded() {\n        let mut builder = MapBuilder::new();\n        builder.insert(&Node::KV(vec![1, 2, 3], vec![1])).unwrap();\n        builder.insert(&Node::KV(vec![1, 2, 4], vec![2])).unwrap();\n\n        let map = builder.build();\n        let mut range = map.range(..);\n        assert_eq!(range.next().unwrap().unwrap(), (&[1, 2, 3][..], &[1][..]));\n        assert_eq!(range.next().unwrap().unwrap(), (&[1, 2, 4][..], &[2][..]));\n        assert!(range.next().is_none());\n    }\n\n    #[test]\n    #[should_panic(expected = \"MissingData\")]\n    fn range_abridged_rev() {\n        let mut builder = MapBuilder::new();\n        builder.insert(&Node::KV(vec![1, 2, 3], vec![1])).unwrap();\n        builder.insert(&Node::Hash([0; HASH_LENGTH])).unwrap();\n        builder.insert(&Node::KV(vec![1, 2, 4], vec![2])).unwrap();\n\n        let map = builder.build();\n        let mut range = map.range(&[1u8, 2, 3][..]..=&[1u8, 2, 4][..]).rev();\n        assert_eq!(range.next().unwrap().unwrap(), (&[1, 2, 4][..], &[2][..]));\n        range.next().unwrap().unwrap();\n    }\n\n    #[test]\n    fn range_ok_rev() {\n        let mut builder = MapBuilder::new();\n        builder.insert(&Node::KV(vec![1, 2, 3], vec![1])).unwrap();\n        builder.insert(&Node::KV(vec![1, 2, 4], vec![2])).unwrap();\n        builder.insert(&Node::KV(vec![1, 2, 5], vec![3])).unwrap();\n\n        let map = builder.build();\n        let mut range = map.range(&[1u8, 2, 3][..]..&[1u8, 2, 5][..]).rev();\n        assert_eq!(range.next().unwrap().unwrap(), (&[1, 2, 4][..], &[2][..]));\n        assert_eq!(range.next().unwrap().unwrap(), (&[1, 2, 3][..], &[1][..]));\n        assert!(range.next().is_none());\n        assert!(range.next_back().is_none());\n    }\n\n    #[test]\n    #[should_panic(expected = \"MissingData\")]\n    fn range_upper_unbounded_map_non_contiguous() {\n        let mut builder = MapBuilder::new();\n        builder.insert(&Node::KV(vec![1, 2, 3], vec![1])).unwrap();\n        builder.insert(&Node::Hash([1; HASH_LENGTH])).unwrap();\n        builder.insert(&Node::KV(vec![1, 2, 4], vec![1])).unwrap();\n\n        let map = builder.build();\n\n        let mut range = map.range(&[1u8, 2, 3][..]..).rev();\n        range.next().unwrap().unwrap();\n        range.next().unwrap().unwrap();\n    }\n\n    #[test]\n    fn range_reach_proof_end_rev() {\n        let mut builder = MapBuilder::new();\n        builder.insert(&Node::KV(vec![1, 2, 3], vec![1])).unwrap();\n        builder.insert(&Node::KV(vec![1, 2, 4], vec![2])).unwrap();\n\n        let map = builder.build();\n        let mut range = map.range(..&[1u8, 2, 5][..]).rev();\n        assert_eq!(range.next().unwrap().unwrap(), (&[1, 2, 4][..], &[2][..]));\n        assert_eq!(range.next().unwrap().unwrap(), (&[1, 2, 3][..], &[1][..]));\n        assert!(range.next().is_none());\n    }\n\n    #[test]\n    fn range_unbounded_rev() {\n        let mut builder = MapBuilder::new();\n        builder.insert(&Node::KV(vec![1, 2, 3], vec![1])).unwrap();\n        builder.insert(&Node::KV(vec![1, 2, 4], vec![2])).unwrap();\n\n        let map = builder.build();\n        let mut range = map.range(..).rev();\n        assert_eq!(range.next().unwrap().unwrap(), (&[1, 2, 4][..], &[2][..]));\n        assert_eq!(range.next().unwrap().unwrap(), (&[1, 2, 3][..], &[1][..]));\n        assert!(range.next().is_none());\n    }\n\n    #[test]\n    fn map_join() {\n        let mut builder = MapBuilder::new();\n        builder.insert(&Node::KV(vec![1], vec![1])).unwrap();\n        builder.insert(&Node::KV(vec![2], vec![1])).unwrap();\n        builder.insert(&Node::KV(vec![3], vec![1])).unwrap();\n        builder.insert(&Node::Hash([0; HASH_LENGTH])).unwrap();\n        builder.insert(&Node::KV(vec![5], vec![1])).unwrap();\n        let a = builder.build();\n\n        let mut builder = MapBuilder::new();\n        builder.insert(&Node::KV(vec![1], vec![1])).unwrap();\n        builder.insert(&Node::Hash([0; HASH_LENGTH])).unwrap();\n        builder.insert(&Node::KV(vec![3], vec![1])).unwrap();\n        builder.insert(&Node::KV(vec![4], vec![1])).unwrap();\n        builder.insert(&Node::Hash([0; HASH_LENGTH])).unwrap();\n        let b = builder.build();\n\n        let joined = a.join(b);\n\n        let mut range = joined.range(..=&[4][..]);\n        assert_eq!(range.next().unwrap().unwrap(), (&[1][..], &[1][..]));\n        assert_eq!(range.next().unwrap().unwrap(), (&[2][..], &[1][..]));\n        assert_eq!(range.next().unwrap().unwrap(), (&[3][..], &[1][..]));\n        assert_eq!(range.next().unwrap().unwrap(), (&[4][..], &[1][..]));\n        assert!(range.next().is_none());\n\n        let mut range = joined.range(&[5][..]..);\n        assert_eq!(range.next().unwrap().unwrap(), (&[5][..], &[1][..]));\n        assert!(range.next().is_none());\n    }\n}\n"
  },
  {
    "path": "src/proofs/query/mod.rs",
    "content": "mod map;\n\n#[cfg(feature = \"full\")]\nuse {super::Op, std::collections::LinkedList};\n\nuse super::tree::execute;\nuse super::{Decoder, Node};\nuse crate::error::{Error, Result};\nuse crate::tree::{Fetch, Hash, Link, RefWalker};\nuse std::cmp::{max, min, Ordering};\nuse std::collections::BTreeSet;\nuse std::ops::RangeInclusive;\n\npub use map::*;\n\n/// `Query` represents one or more keys or ranges of keys, which can be used to\n/// resolve a proof which will include all of the requested values.\n#[derive(Default)]\npub struct Query {\n    items: BTreeSet<QueryItem>,\n}\n\nimpl Query {\n    /// Creates a new query which contains no items.\n    pub fn new() -> Self {\n        Default::default()\n    }\n\n    pub(crate) fn len(&self) -> usize {\n        self.items.len()\n    }\n\n    pub(crate) fn iter(&self) -> impl Iterator<Item = &QueryItem> {\n        self.items.iter()\n    }\n\n    /// Adds an individual key to the query, so that its value (or its absence)\n    /// in the tree will be included in the resulting proof.\n    ///\n    /// If the key or a range including the key already exists in the query,\n    /// this will have no effect. If the query already includes a range that has\n    /// a non-inclusive bound equal to the key, the bound will be changed to be\n    /// inclusive.\n    pub fn insert_key(&mut self, key: Vec<u8>) {\n        let key = QueryItem::Key(key);\n        self.items.insert(key);\n    }\n\n    /// Adds a range to the query, so that all the entries in the tree with keys\n    /// in the range will be included in the resulting proof.\n    ///\n    /// If a range including the range already exists in the query, this will\n    /// have no effect. If the query already includes a range that overlaps with\n    /// the range, the ranges will be joined together.\n    pub fn insert_range(&mut self, range: std::ops::Range<Vec<u8>>) {\n        let range = QueryItem::Range(range);\n        self.insert_item(range);\n    }\n\n    /// Adds an inclusive range to the query, so that all the entries in the\n    /// tree with keys in the range will be included in the resulting proof.\n    ///\n    /// If a range including the range already exists in the query, this will\n    /// have no effect. If the query already includes a range that overlaps with\n    /// the range, the ranges will be merged together.\n    pub fn insert_range_inclusive(&mut self, range: RangeInclusive<Vec<u8>>) {\n        let range = QueryItem::RangeInclusive(range);\n        self.insert_item(range);\n    }\n\n    /// Adds the `QueryItem` to the query, first checking to see if it collides\n    /// with any existing ranges or keys. All colliding items will be removed\n    /// then merged together so that the query includes the minimum number of\n    /// items (with no items covering any duplicate parts of keyspace) while\n    /// still including every key or range that has been added to the query.\n    pub fn insert_item(&mut self, mut item: QueryItem) {\n        // since `QueryItem::eq` considers items equal if they collide at all\n        // (including keys within ranges or ranges which partially overlap),\n        // `items.take` will remove the first item which collides\n        while let Some(existing) = self.items.take(&item) {\n            item = item.merge(existing);\n        }\n\n        self.items.insert(item);\n    }\n}\n\nimpl<Q: Into<QueryItem>> From<Vec<Q>> for Query {\n    fn from(other: Vec<Q>) -> Self {\n        let items = other.into_iter().map(Into::into).collect();\n        Query { items }\n    }\n}\n\nimpl From<Query> for Vec<QueryItem> {\n    fn from(q: Query) -> Vec<QueryItem> {\n        q.into_iter().collect()\n    }\n}\n\nimpl IntoIterator for Query {\n    type Item = QueryItem;\n    type IntoIter = <BTreeSet<QueryItem> as IntoIterator>::IntoIter;\n\n    fn into_iter(self) -> Self::IntoIter {\n        self.items.into_iter()\n    }\n}\n\n/// A `QueryItem` represents a key or range of keys to be included in a proof.\n#[derive(Clone, Debug)]\npub enum QueryItem {\n    Key(Vec<u8>),\n    Range(std::ops::Range<Vec<u8>>),\n    RangeInclusive(RangeInclusive<Vec<u8>>),\n}\n\nimpl QueryItem {\n    pub fn lower_bound(&self) -> &[u8] {\n        match self {\n            QueryItem::Key(key) => key.as_slice(),\n            QueryItem::Range(range) => range.start.as_ref(),\n            QueryItem::RangeInclusive(range) => range.start().as_ref(),\n        }\n    }\n\n    pub fn upper_bound(&self) -> (&[u8], bool) {\n        match self {\n            QueryItem::Key(key) => (key.as_slice(), true),\n            QueryItem::Range(range) => (range.end.as_ref(), false),\n            QueryItem::RangeInclusive(range) => (range.end().as_ref(), true),\n        }\n    }\n\n    pub fn contains(&self, key: &[u8]) -> bool {\n        let (bound, inclusive) = self.upper_bound();\n        return key >= self.lower_bound() && (key < bound || (key == bound && inclusive));\n    }\n\n    fn merge(self, other: QueryItem) -> QueryItem {\n        // TODO: don't copy into new vecs\n        let start = min(self.lower_bound(), other.lower_bound()).to_vec();\n        let end = max(self.upper_bound(), other.upper_bound());\n        if end.1 {\n            QueryItem::RangeInclusive(RangeInclusive::new(start, end.0.to_vec()))\n        } else {\n            QueryItem::Range(std::ops::Range {\n                start,\n                end: end.0.to_vec(),\n            })\n        }\n    }\n}\n\nimpl PartialEq for QueryItem {\n    fn eq(&self, other: &QueryItem) -> bool {\n        self.cmp(other) == Ordering::Equal\n    }\n}\n\nimpl PartialEq<&[u8]> for QueryItem {\n    fn eq(&self, other: &&[u8]) -> bool {\n        matches!(self.partial_cmp(other), Some(Ordering::Equal))\n    }\n}\n\nimpl Eq for QueryItem {}\n\nimpl Ord for QueryItem {\n    fn cmp(&self, other: &QueryItem) -> Ordering {\n        let cmp_lu = self.lower_bound().cmp(other.upper_bound().0);\n        let cmp_ul = self.upper_bound().0.cmp(other.lower_bound());\n        let self_inclusive = self.upper_bound().1;\n        let other_inclusive = other.upper_bound().1;\n\n        match (cmp_lu, cmp_ul) {\n            (Ordering::Less, Ordering::Less) => Ordering::Less,\n            (Ordering::Less, Ordering::Equal) => match self_inclusive {\n                true => Ordering::Equal,\n                false => Ordering::Less,\n            },\n            (Ordering::Less, Ordering::Greater) => Ordering::Equal,\n            (Ordering::Equal, _) => match other_inclusive {\n                true => Ordering::Equal,\n                false => Ordering::Greater,\n            },\n            (Ordering::Greater, _) => Ordering::Greater,\n        }\n    }\n}\n\nimpl PartialOrd for QueryItem {\n    fn partial_cmp(&self, other: &QueryItem) -> Option<Ordering> {\n        Some(self.cmp(other))\n    }\n}\n\nimpl PartialOrd<&[u8]> for QueryItem {\n    fn partial_cmp(&self, other: &&[u8]) -> Option<Ordering> {\n        let other = QueryItem::Key(other.to_vec());\n        Some(self.cmp(&other))\n    }\n}\n\nimpl From<Vec<u8>> for QueryItem {\n    fn from(key: Vec<u8>) -> Self {\n        QueryItem::Key(key)\n    }\n}\n\nimpl Link {\n    /// Creates a `Node::Hash` from this link. Panics if the link is of variant\n    /// `Link::Modified` since its hash has not yet been computed.\n    #[cfg(feature = \"full\")]\n    fn to_hash_node(&self) -> Node {\n        let hash = match self {\n            Link::Reference { hash, .. } => hash,\n            Link::Modified { .. } => {\n                panic!(\"Cannot convert Link::Modified to proof hash node\");\n            }\n            Link::Uncommitted { hash, .. } => hash,\n            Link::Loaded { hash, .. } => hash,\n        };\n        Node::Hash(*hash)\n    }\n}\n\nimpl<'a, S> RefWalker<'a, S>\nwhere\n    S: Fetch + Sized + Send + Clone,\n{\n    /// Creates a `Node::KV` from the key/value pair of the root node.\n    pub(crate) fn to_kv_node(&self) -> Node {\n        Node::KV(self.tree().key().to_vec(), self.tree().value().to_vec())\n    }\n\n    /// Creates a `Node::KVHash` from the hash of the key/value pair of the root\n    /// node.\n    pub(crate) fn to_kvhash_node(&self) -> Node {\n        Node::KVHash(*self.tree().kv_hash())\n    }\n\n    /// Creates a `Node::Hash` from the hash of the node.\n    pub(crate) fn to_hash_node(&self) -> Node {\n        Node::Hash(self.tree().hash())\n    }\n\n    /// Generates a proof for the list of queried keys. Returns a tuple\n    /// containing the generated proof operators, and a tuple representing if\n    /// any keys were queried were less than the left edge or greater than the\n    /// right edge, respectively.\n    #[cfg(feature = \"full\")]\n    pub(crate) fn create_proof(\n        &mut self,\n        query: &[QueryItem],\n    ) -> Result<(LinkedList<Op>, (bool, bool))> {\n        // TODO: don't copy into vec, support comparing QI to byte slice\n        let node_key = QueryItem::Key(self.tree().key().to_vec());\n        let search = query.binary_search_by(|key| key.cmp(&node_key));\n\n        let (left_items, right_items) = match search {\n            Ok(index) => {\n                let item = &query[index];\n                let left_bound = item.lower_bound();\n                let right_bound = item.upper_bound().0;\n\n                // if range starts before this node's key, include it in left\n                // child's query\n                let left_query = if left_bound < self.tree().key() {\n                    &query[..=index]\n                } else {\n                    &query[..index]\n                };\n\n                // if range ends after this node's key, include it in right\n                // child's query\n                let right_query = if right_bound > self.tree().key() {\n                    &query[index..]\n                } else {\n                    &query[index + 1..]\n                };\n\n                (left_query, right_query)\n            }\n            Err(index) => (&query[..index], &query[index..]),\n        };\n\n        let (mut proof, left_absence) = self.create_child_proof(true, left_items)?;\n        let (mut right_proof, right_absence) = self.create_child_proof(false, right_items)?;\n\n        let (has_left, has_right) = (!proof.is_empty(), !right_proof.is_empty());\n\n        proof.push_back(match search {\n            Ok(_) => Op::Push(self.to_kv_node()),\n            Err(_) => {\n                if left_absence.1 || right_absence.0 {\n                    Op::Push(self.to_kv_node())\n                } else {\n                    Op::Push(self.to_kvhash_node())\n                }\n            }\n        });\n\n        if has_left {\n            proof.push_back(Op::Parent);\n        }\n\n        if has_right {\n            proof.append(&mut right_proof);\n            proof.push_back(Op::Child);\n        }\n\n        Ok((proof, (left_absence.0, right_absence.1)))\n    }\n\n    /// Similar to `create_proof`. Recurses into the child on the given side and\n    /// generates a proof for the queried keys.\n    #[cfg(feature = \"full\")]\n    fn create_child_proof(\n        &mut self,\n        left: bool,\n        query: &[QueryItem],\n    ) -> Result<(LinkedList<Op>, (bool, bool))> {\n        Ok(if !query.is_empty() {\n            if let Some(mut child) = self.walk(left)? {\n                child.create_proof(query)?\n            } else {\n                (LinkedList::new(), (true, true))\n            }\n        } else if let Some(link) = self.tree().link(left) {\n            let mut proof = LinkedList::new();\n            proof.push_back(Op::Push(link.to_hash_node()));\n            (proof, (false, false))\n        } else {\n            (LinkedList::new(), (false, false))\n        })\n    }\n}\n\npub fn verify(bytes: &[u8], expected_hash: Hash) -> Result<Map> {\n    let ops = Decoder::new(bytes);\n    let mut map_builder = MapBuilder::new();\n\n    let root = execute(ops, true, |node| map_builder.insert(node))?;\n\n    if root.hash()? != expected_hash {\n        return Err(Error::HashMismatch(expected_hash, root.hash()?));\n    }\n\n    Ok(map_builder.build())\n}\n\n/// Verifies the encoded proof with the given query and expected hash.\n///\n/// Every key in `keys` is checked to either have a key/value pair in the proof,\n/// or to have its absence in the tree proven.\n///\n/// Returns `Err` if the proof is invalid, or a list of proven values associated\n/// with `keys`. For example, if `keys` contains keys `A` and `B`, the returned\n/// list will contain 2 elements, the value of `A` and the value of `B`. Keys\n/// proven to be absent in the tree will have an entry of `None`, keys that have\n/// a proven value will have an entry of `Some(value)`.\n#[deprecated]\npub fn verify_query(\n    bytes: &[u8],\n    query: &Query,\n    expected_hash: Hash,\n) -> Result<Vec<(Vec<u8>, Vec<u8>)>> {\n    let mut output = Vec::with_capacity(query.len());\n    let mut last_push = None;\n    let mut query = query.iter().peekable();\n    let mut in_range = false;\n\n    let ops = Decoder::new(bytes);\n\n    let root = execute(ops, true, |node| {\n        if let Node::KV(key, value) = node {\n            while let Some(item) = query.peek() {\n                // get next item in query\n                let query_item = *item;\n                // we have not reached next queried part of tree\n                if *query_item > key.as_slice() {\n                    // continue to next push\n                    break;\n                }\n\n                if !in_range {\n                    // this is the first data we have encountered for this query\n                    // item. ensure lower bound of query item is proven\n                    match last_push {\n                        // lower bound is proven - we have an exact match\n                        _ if key == query_item.lower_bound() => {}\n\n                        // lower bound is proven - this is the leftmost node\n                        // in the tree\n                        None => {}\n\n                        // lower bound is proven - the preceding tree node\n                        // is lower than the bound\n                        Some(Node::KV(_, _)) => {}\n\n                        // cannot verify lower bound - we have an abridged\n                        // tree so we cannot tell what the preceding key was\n                        Some(_) => {\n                            return Err(Error::Bound(\n                                \"Cannot verify lower bound of queried range\".into(),\n                            ));\n                        }\n                    }\n                }\n\n                if key.as_slice() >= query_item.upper_bound().0 {\n                    // at or past upper bound of range (or this was an exact\n                    // match on a single-key queryitem), advance to next query\n                    // item\n                    query.next();\n                    in_range = false;\n                } else {\n                    // have not reached upper bound, we expect more values\n                    // to be proven in the range (and all pushes should be\n                    // unabridged until we reach end of range)\n                    in_range = true;\n                }\n\n                // this push matches the queried item\n                if query_item.contains(key) {\n                    // add data to output\n                    output.push((key.clone(), value.clone()));\n\n                    // continue to next push\n                    break;\n                }\n\n                // continue to next queried item\n            }\n        } else if in_range {\n            // we encountered a queried range but the proof was abridged (saw a\n            // non-KV push), we are missing some part of the range\n            return Err(Error::MissingData);\n        }\n\n        last_push = Some(node.clone());\n\n        Ok(())\n    })?;\n\n    // we have remaining query items, check absence proof against right edge of\n    // tree\n    if query.peek().is_some() {\n        match last_push {\n            // last node in tree was less than queried item\n            Some(Node::KV(_, _)) => {}\n\n            // proof contains abridged data so we cannot verify absence of\n            // remaining query items\n            _ => {\n                return Err(Error::MissingData);\n            }\n        }\n    }\n\n    if root.hash()? != expected_hash {\n        return Err(Error::HashMismatch(expected_hash, root.hash()?));\n    }\n\n    Ok(output)\n}\n\n#[allow(deprecated)]\n#[cfg(test)]\nmod test {\n    use super::super::encoding::encode_into;\n    use super::super::*;\n    use super::*;\n    use crate::test_utils::make_tree_seq;\n    use crate::tree::{NoopCommit, PanicSource, RefWalker, Tree};\n    use ed::Encode;\n\n    fn make_3_node_tree() -> Result<Tree> {\n        let mut tree = Tree::new(vec![5], vec![5])?\n            .attach(true, Some(Tree::new(vec![3], vec![3])?))\n            .attach(false, Some(Tree::new(vec![7], vec![7])?));\n        tree.commit(&mut NoopCommit {}).expect(\"commit failed\");\n        Ok(tree)\n    }\n\n    fn verify_keys_test(keys: Vec<Vec<u8>>, expected_result: Vec<Option<Vec<u8>>>) -> Result<()> {\n        let mut tree = make_3_node_tree()?;\n        let mut walker = RefWalker::new(&mut tree, PanicSource {});\n\n        let (proof, _) = walker\n            .create_proof(\n                keys.clone()\n                    .into_iter()\n                    .map(QueryItem::Key)\n                    .collect::<Vec<_>>()\n                    .as_slice(),\n            )\n            .expect(\"failed to create proof\");\n        let mut bytes = vec![];\n        encode_into(proof.iter(), &mut bytes);\n\n        let expected_hash = [\n            185, 181, 28, 21, 108, 13, 202, 48, 129, 184, 3, 8, 157, 78, 213, 241, 94, 200, 205,\n            95, 179, 177, 195, 177, 216, 233, 164, 73, 102, 32, 141, 37,\n        ];\n\n        let mut query = Query::new();\n        for key in keys.iter() {\n            query.insert_key(key.clone());\n        }\n\n        let result = verify_query(bytes.as_slice(), &query, expected_hash).expect(\"verify failed\");\n\n        let mut values = std::collections::HashMap::new();\n        for (key, value) in result {\n            assert!(values.insert(key, value).is_none());\n        }\n\n        for (key, expected_value) in keys.iter().zip(expected_result.iter()) {\n            assert_eq!(values.get(key), expected_value.as_ref());\n        }\n        Ok(())\n    }\n\n    #[test]\n    fn root_verify() -> Result<()> {\n        verify_keys_test(vec![vec![5]], vec![Some(vec![5])])\n    }\n\n    #[test]\n    fn single_verify() -> Result<()> {\n        verify_keys_test(vec![vec![3]], vec![Some(vec![3])])\n    }\n\n    #[test]\n    fn double_verify() -> Result<()> {\n        verify_keys_test(vec![vec![3], vec![5]], vec![Some(vec![3]), Some(vec![5])])\n    }\n\n    #[test]\n    fn double_verify_2() -> Result<()> {\n        verify_keys_test(vec![vec![3], vec![7]], vec![Some(vec![3]), Some(vec![7])])\n    }\n\n    #[test]\n    fn triple_verify() -> Result<()> {\n        verify_keys_test(\n            vec![vec![3], vec![5], vec![7]],\n            vec![Some(vec![3]), Some(vec![5]), Some(vec![7])],\n        )\n    }\n\n    #[test]\n    fn left_edge_absence_verify() -> Result<()> {\n        verify_keys_test(vec![vec![2]], vec![None])\n    }\n\n    #[test]\n    fn right_edge_absence_verify() -> Result<()> {\n        verify_keys_test(vec![vec![8]], vec![None])\n    }\n\n    #[test]\n    fn inner_absence_verify() -> Result<()> {\n        verify_keys_test(vec![vec![6]], vec![None])\n    }\n\n    #[test]\n    fn absent_and_present_verify() -> Result<()> {\n        verify_keys_test(vec![vec![5], vec![6]], vec![Some(vec![5]), None])\n    }\n\n    #[test]\n    fn empty_proof() -> Result<()> {\n        let mut tree = make_3_node_tree()?;\n        let mut walker = RefWalker::new(&mut tree, PanicSource {});\n\n        let (proof, absence) = walker\n            .create_proof(vec![].as_slice())\n            .expect(\"create_proof errored\");\n\n        let mut iter = proof.iter();\n        assert_eq!(\n            iter.next(),\n            Some(&Op::Push(Node::Hash([\n                203, 210, 184, 52, 29, 56, 76, 7, 155, 239, 81, 16, 54, 13, 106, 27, 44, 218, 198,\n                245, 203, 189, 15, 203, 55, 184, 75, 146, 127, 38, 143, 214\n            ])))\n        );\n        assert_eq!(\n            iter.next(),\n            Some(&Op::Push(Node::KVHash([\n                169, 4, 73, 65, 62, 49, 160, 159, 37, 166, 195, 249, 63, 31, 23, 11, 169, 0, 24,\n                104, 179, 211, 218, 38, 108, 129, 117, 232, 65, 101, 194, 157\n            ])))\n        );\n        assert_eq!(iter.next(), Some(&Op::Parent));\n        assert_eq!(\n            iter.next(),\n            Some(&Op::Push(Node::Hash([\n                219, 24, 98, 131, 160, 47, 139, 94, 223, 118, 217, 187, 42, 215, 213, 101, 213,\n                225, 169, 57, 224, 210, 17, 135, 220, 63, 160, 42, 148, 0, 121, 115\n            ])))\n        );\n        assert_eq!(iter.next(), Some(&Op::Child));\n        assert!(iter.next().is_none());\n        assert_eq!(absence, (false, false));\n\n        let mut bytes = vec![];\n        encode_into(proof.iter(), &mut bytes);\n        let res = verify_query(bytes.as_slice(), &Query::new(), tree.hash()).unwrap();\n        assert!(res.is_empty());\n        Ok(())\n    }\n\n    #[test]\n    fn root_proof() -> Result<()> {\n        let mut tree = make_3_node_tree()?;\n        let mut walker = RefWalker::new(&mut tree, PanicSource {});\n\n        let queryitems = vec![QueryItem::Key(vec![5])];\n        let (proof, absence) = walker\n            .create_proof(queryitems.as_slice())\n            .expect(\"create_proof errored\");\n\n        let mut iter = proof.iter();\n        assert_eq!(\n            iter.next(),\n            Some(&Op::Push(Node::Hash([\n                203, 210, 184, 52, 29, 56, 76, 7, 155, 239, 81, 16, 54, 13, 106, 27, 44, 218, 198,\n                245, 203, 189, 15, 203, 55, 184, 75, 146, 127, 38, 143, 214\n            ])))\n        );\n        assert_eq!(iter.next(), Some(&Op::Push(Node::KV(vec![5], vec![5]))));\n        assert_eq!(iter.next(), Some(&Op::Parent));\n        assert_eq!(\n            iter.next(),\n            Some(&Op::Push(Node::Hash([\n                219, 24, 98, 131, 160, 47, 139, 94, 223, 118, 217, 187, 42, 215, 213, 101, 213,\n                225, 169, 57, 224, 210, 17, 135, 220, 63, 160, 42, 148, 0, 121, 115\n            ])))\n        );\n        assert_eq!(iter.next(), Some(&Op::Child));\n        assert!(iter.next().is_none());\n        assert_eq!(absence, (false, false));\n\n        let mut bytes = vec![];\n        encode_into(proof.iter(), &mut bytes);\n        let mut query = Query::new();\n        for item in queryitems {\n            query.insert_item(item);\n        }\n        let res = verify_query(bytes.as_slice(), &query, tree.hash()).unwrap();\n        assert_eq!(res, vec![(vec![5], vec![5])]);\n        Ok(())\n    }\n\n    #[test]\n    fn leaf_proof() -> Result<()> {\n        let mut tree = make_3_node_tree()?;\n        let mut walker = RefWalker::new(&mut tree, PanicSource {});\n\n        let queryitems = vec![QueryItem::Key(vec![3])];\n        let (proof, absence) = walker\n            .create_proof(queryitems.as_slice())\n            .expect(\"create_proof errored\");\n\n        let mut iter = proof.iter();\n        assert_eq!(iter.next(), Some(&Op::Push(Node::KV(vec![3], vec![3]))));\n        assert_eq!(\n            iter.next(),\n            Some(&Op::Push(Node::KVHash([\n                169, 4, 73, 65, 62, 49, 160, 159, 37, 166, 195, 249, 63, 31, 23, 11, 169, 0, 24,\n                104, 179, 211, 218, 38, 108, 129, 117, 232, 65, 101, 194, 157\n            ])))\n        );\n        assert_eq!(iter.next(), Some(&Op::Parent));\n        assert_eq!(\n            iter.next(),\n            Some(&Op::Push(Node::Hash([\n                219, 24, 98, 131, 160, 47, 139, 94, 223, 118, 217, 187, 42, 215, 213, 101, 213,\n                225, 169, 57, 224, 210, 17, 135, 220, 63, 160, 42, 148, 0, 121, 115\n            ])))\n        );\n        assert_eq!(iter.next(), Some(&Op::Child));\n        assert!(iter.next().is_none());\n        assert_eq!(absence, (false, false));\n\n        let mut bytes = vec![];\n        encode_into(proof.iter(), &mut bytes);\n        let mut query = Query::new();\n        for item in queryitems {\n            query.insert_item(item);\n        }\n        let res = verify_query(bytes.as_slice(), &query, tree.hash()).unwrap();\n        assert_eq!(res, vec![(vec![3], vec![3])]);\n        Ok(())\n    }\n\n    #[test]\n    fn double_leaf_proof() -> Result<()> {\n        let mut tree = make_3_node_tree()?;\n        let mut walker = RefWalker::new(&mut tree, PanicSource {});\n\n        let queryitems = vec![QueryItem::Key(vec![3]), QueryItem::Key(vec![7])];\n        let (proof, absence) = walker\n            .create_proof(queryitems.as_slice())\n            .expect(\"create_proof errored\");\n\n        let mut iter = proof.iter();\n        assert_eq!(iter.next(), Some(&Op::Push(Node::KV(vec![3], vec![3]))));\n        assert_eq!(\n            iter.next(),\n            Some(&Op::Push(Node::KVHash([\n                169, 4, 73, 65, 62, 49, 160, 159, 37, 166, 195, 249, 63, 31, 23, 11, 169, 0, 24,\n                104, 179, 211, 218, 38, 108, 129, 117, 232, 65, 101, 194, 157\n            ])))\n        );\n        assert_eq!(iter.next(), Some(&Op::Parent));\n        assert_eq!(iter.next(), Some(&Op::Push(Node::KV(vec![7], vec![7]))));\n        assert_eq!(iter.next(), Some(&Op::Child));\n        assert!(iter.next().is_none());\n        assert_eq!(absence, (false, false));\n\n        let mut bytes = vec![];\n        encode_into(proof.iter(), &mut bytes);\n        let mut query = Query::new();\n        for item in queryitems {\n            query.insert_item(item);\n        }\n        let res = verify_query(bytes.as_slice(), &query, tree.hash()).unwrap();\n        assert_eq!(res, vec![(vec![3], vec![3]), (vec![7], vec![7]),]);\n        Ok(())\n    }\n\n    #[test]\n    fn all_nodes_proof() -> Result<()> {\n        let mut tree = make_3_node_tree()?;\n        let mut walker = RefWalker::new(&mut tree, PanicSource {});\n\n        let queryitems = vec![\n            QueryItem::Key(vec![3]),\n            QueryItem::Key(vec![5]),\n            QueryItem::Key(vec![7]),\n        ];\n        let (proof, absence) = walker\n            .create_proof(queryitems.as_slice())\n            .expect(\"create_proof errored\");\n\n        let mut iter = proof.iter();\n        assert_eq!(iter.next(), Some(&Op::Push(Node::KV(vec![3], vec![3]))));\n        assert_eq!(iter.next(), Some(&Op::Push(Node::KV(vec![5], vec![5]))));\n        assert_eq!(iter.next(), Some(&Op::Parent));\n        assert_eq!(iter.next(), Some(&Op::Push(Node::KV(vec![7], vec![7]))));\n        assert_eq!(iter.next(), Some(&Op::Child));\n        assert!(iter.next().is_none());\n        assert_eq!(absence, (false, false));\n\n        let mut bytes = vec![];\n        encode_into(proof.iter(), &mut bytes);\n        let mut query = Query::new();\n        for item in queryitems {\n            query.insert_item(item);\n        }\n        let res = verify_query(bytes.as_slice(), &query, tree.hash()).unwrap();\n        assert_eq!(\n            res,\n            vec![(vec![3], vec![3]), (vec![5], vec![5]), (vec![7], vec![7]),]\n        );\n        Ok(())\n    }\n\n    #[test]\n    fn global_edge_absence_proof() -> Result<()> {\n        let mut tree = make_3_node_tree()?;\n        let mut walker = RefWalker::new(&mut tree, PanicSource {});\n\n        let queryitems = vec![QueryItem::Key(vec![8])];\n        let (proof, absence) = walker\n            .create_proof(queryitems.as_slice())\n            .expect(\"create_proof errored\");\n\n        let mut iter = proof.iter();\n        assert_eq!(\n            iter.next(),\n            Some(&Op::Push(Node::Hash([\n                203, 210, 184, 52, 29, 56, 76, 7, 155, 239, 81, 16, 54, 13, 106, 27, 44, 218, 198,\n                245, 203, 189, 15, 203, 55, 184, 75, 146, 127, 38, 143, 214\n            ])))\n        );\n        assert_eq!(\n            iter.next(),\n            Some(&Op::Push(Node::KVHash([\n                169, 4, 73, 65, 62, 49, 160, 159, 37, 166, 195, 249, 63, 31, 23, 11, 169, 0, 24,\n                104, 179, 211, 218, 38, 108, 129, 117, 232, 65, 101, 194, 157\n            ])))\n        );\n        assert_eq!(iter.next(), Some(&Op::Parent));\n        assert_eq!(iter.next(), Some(&Op::Push(Node::KV(vec![7], vec![7]))));\n        assert_eq!(iter.next(), Some(&Op::Child));\n        assert!(iter.next().is_none());\n        assert_eq!(absence, (false, true));\n\n        let mut bytes = vec![];\n        encode_into(proof.iter(), &mut bytes);\n        let mut query = Query::new();\n        for item in queryitems {\n            query.insert_item(item);\n        }\n        let res = verify_query(bytes.as_slice(), &query, tree.hash()).unwrap();\n        assert_eq!(res, vec![]);\n        Ok(())\n    }\n\n    #[test]\n    fn absence_proof() -> Result<()> {\n        let mut tree = make_3_node_tree()?;\n        let mut walker = RefWalker::new(&mut tree, PanicSource {});\n\n        let queryitems = vec![QueryItem::Key(vec![6])];\n        let (proof, absence) = walker\n            .create_proof(queryitems.as_slice())\n            .expect(\"create_proof errored\");\n\n        let mut iter = proof.iter();\n        assert_eq!(\n            iter.next(),\n            Some(&Op::Push(Node::Hash([\n                203, 210, 184, 52, 29, 56, 76, 7, 155, 239, 81, 16, 54, 13, 106, 27, 44, 218, 198,\n                245, 203, 189, 15, 203, 55, 184, 75, 146, 127, 38, 143, 214\n            ])))\n        );\n        assert_eq!(iter.next(), Some(&Op::Push(Node::KV(vec![5], vec![5]))));\n        assert_eq!(iter.next(), Some(&Op::Parent));\n        assert_eq!(iter.next(), Some(&Op::Push(Node::KV(vec![7], vec![7]))));\n        assert_eq!(iter.next(), Some(&Op::Child));\n        assert!(iter.next().is_none());\n        assert_eq!(absence, (false, false));\n\n        let mut bytes = vec![];\n        encode_into(proof.iter(), &mut bytes);\n        let mut query = Query::new();\n        for item in queryitems {\n            query.insert_item(item);\n        }\n        let res = verify_query(bytes.as_slice(), &query, tree.hash()).unwrap();\n        assert_eq!(res, vec![]);\n        Ok(())\n    }\n\n    #[test]\n    fn doc_proof() -> Result<()> {\n        let mut tree = Tree::new(vec![5], vec![5])?\n            .attach(\n                true,\n                Some(\n                    Tree::new(vec![2], vec![2])?\n                        .attach(true, Some(Tree::new(vec![1], vec![1])?))\n                        .attach(\n                            false,\n                            Some(\n                                Tree::new(vec![4], vec![4])?\n                                    .attach(true, Some(Tree::new(vec![3], vec![3])?)),\n                            ),\n                        ),\n                ),\n            )\n            .attach(\n                false,\n                Some(\n                    Tree::new(vec![9], vec![9])?\n                        .attach(\n                            true,\n                            Some(\n                                Tree::new(vec![7], vec![7])?\n                                    .attach(true, Some(Tree::new(vec![6], vec![6])?))\n                                    .attach(false, Some(Tree::new(vec![8], vec![8])?)),\n                            ),\n                        )\n                        .attach(\n                            false,\n                            Some(\n                                Tree::new(vec![11], vec![11])?\n                                    .attach(true, Some(Tree::new(vec![10], vec![10])?)),\n                            ),\n                        ),\n                ),\n            );\n        tree.commit(&mut NoopCommit {}).unwrap();\n\n        let mut walker = RefWalker::new(&mut tree, PanicSource {});\n\n        let queryitems = vec![\n            QueryItem::Key(vec![1]),\n            QueryItem::Key(vec![2]),\n            QueryItem::Key(vec![3]),\n            QueryItem::Key(vec![4]),\n        ];\n        let (proof, absence) = walker\n            .create_proof(queryitems.as_slice())\n            .expect(\"create_proof errored\");\n\n        let mut iter = proof.iter();\n        assert_eq!(iter.next(), Some(&Op::Push(Node::KV(vec![1], vec![1]))));\n        assert_eq!(iter.next(), Some(&Op::Push(Node::KV(vec![2], vec![2]))));\n        assert_eq!(iter.next(), Some(&Op::Parent));\n        assert_eq!(iter.next(), Some(&Op::Push(Node::KV(vec![3], vec![3]))));\n        assert_eq!(iter.next(), Some(&Op::Push(Node::KV(vec![4], vec![4]))));\n        assert_eq!(iter.next(), Some(&Op::Parent));\n        assert_eq!(iter.next(), Some(&Op::Child));\n        assert_eq!(\n            iter.next(),\n            Some(&Op::Push(Node::KVHash([\n                169, 4, 73, 65, 62, 49, 160, 159, 37, 166, 195, 249, 63, 31, 23, 11, 169, 0, 24,\n                104, 179, 211, 218, 38, 108, 129, 117, 232, 65, 101, 194, 157\n            ])))\n        );\n        assert_eq!(iter.next(), Some(&Op::Parent));\n        assert_eq!(\n            iter.next(),\n            Some(&Op::Push(Node::Hash([\n                148, 241, 151, 144, 247, 220, 92, 79, 70, 252, 168, 222, 27, 218, 53, 156, 0, 136,\n                161, 107, 83, 78, 150, 246, 51, 230, 164, 248, 17, 30, 147, 91\n            ])))\n        );\n        assert_eq!(iter.next(), Some(&Op::Child));\n        assert!(iter.next().is_none());\n        assert_eq!(absence, (false, false));\n\n        let mut bytes = vec![];\n        encode_into(proof.iter(), &mut bytes);\n        assert_eq!(\n            bytes,\n            vec![\n                3, 0, 1, 1, 0, 1, 1, 3, 0, 1, 2, 0, 1, 2, 16, 3, 0, 1, 3, 0, 1, 3, 3, 0, 1, 4, 0,\n                1, 4, 16, 17, 2, 169, 4, 73, 65, 62, 49, 160, 159, 37, 166, 195, 249, 63, 31, 23,\n                11, 169, 0, 24, 104, 179, 211, 218, 38, 108, 129, 117, 232, 65, 101, 194, 157, 16,\n                1, 148, 241, 151, 144, 247, 220, 92, 79, 70, 252, 168, 222, 27, 218, 53, 156, 0,\n                136, 161, 107, 83, 78, 150, 246, 51, 230, 164, 248, 17, 30, 147, 91, 17\n            ]\n        );\n\n        let mut bytes = vec![];\n        encode_into(proof.iter(), &mut bytes);\n        let mut query = Query::new();\n        for item in queryitems {\n            query.insert_item(item);\n        }\n        let res = verify_query(bytes.as_slice(), &query, tree.hash()).unwrap();\n        assert_eq!(\n            res,\n            vec![\n                (vec![1], vec![1]),\n                (vec![2], vec![2]),\n                (vec![3], vec![3]),\n                (vec![4], vec![4]),\n            ]\n        );\n        Ok(())\n    }\n\n    #[test]\n    fn query_item_cmp() {\n        assert!(QueryItem::Key(vec![10]) < QueryItem::Key(vec![20]));\n        assert!(QueryItem::Key(vec![10]) == QueryItem::Key(vec![10]));\n        assert!(QueryItem::Key(vec![20]) > QueryItem::Key(vec![10]));\n\n        assert!(QueryItem::Key(vec![10]) < QueryItem::Range(vec![20]..vec![30]));\n        assert!(QueryItem::Key(vec![10]) == QueryItem::Range(vec![10]..vec![20]));\n        assert!(QueryItem::Key(vec![15]) == QueryItem::Range(vec![10]..vec![20]));\n        assert!(QueryItem::Key(vec![20]) > QueryItem::Range(vec![10]..vec![20]));\n        assert!(QueryItem::Key(vec![20]) == QueryItem::RangeInclusive(vec![10]..=vec![20]));\n        assert!(QueryItem::Key(vec![30]) > QueryItem::Range(vec![10]..vec![20]));\n\n        assert!(QueryItem::Range(vec![10]..vec![20]) < QueryItem::Range(vec![30]..vec![40]));\n        assert!(QueryItem::Range(vec![10]..vec![20]) < QueryItem::Range(vec![20]..vec![30]));\n        assert!(\n            QueryItem::RangeInclusive(vec![10]..=vec![20]) == QueryItem::Range(vec![20]..vec![30])\n        );\n        assert!(QueryItem::Range(vec![15]..vec![25]) == QueryItem::Range(vec![20]..vec![30]));\n        assert!(QueryItem::Range(vec![20]..vec![30]) > QueryItem::Range(vec![10]..vec![20]));\n    }\n\n    #[test]\n    fn query_item_merge() {\n        let mine = QueryItem::Range(vec![10]..vec![30]);\n        let other = QueryItem::Range(vec![15]..vec![20]);\n        assert_eq!(mine.merge(other), QueryItem::Range(vec![10]..vec![30]));\n\n        let mine = QueryItem::RangeInclusive(vec![10]..=vec![30]);\n        let other = QueryItem::Range(vec![20]..vec![30]);\n        assert_eq!(\n            mine.merge(other),\n            QueryItem::RangeInclusive(vec![10]..=vec![30])\n        );\n\n        let mine = QueryItem::Key(vec![5]);\n        let other = QueryItem::Range(vec![1]..vec![10]);\n        assert_eq!(mine.merge(other), QueryItem::Range(vec![1]..vec![10]));\n\n        let mine = QueryItem::Key(vec![10]);\n        let other = QueryItem::RangeInclusive(vec![1]..=vec![10]);\n        assert_eq!(\n            mine.merge(other),\n            QueryItem::RangeInclusive(vec![1]..=vec![10])\n        );\n    }\n\n    #[test]\n    fn query_insert() {\n        let mut query = Query::new();\n        query.insert_key(vec![2]);\n        query.insert_range(vec![3]..vec![5]);\n        query.insert_range_inclusive(vec![5]..=vec![7]);\n        query.insert_range(vec![4]..vec![6]);\n        query.insert_key(vec![5]);\n\n        let mut iter = query.items.iter();\n        assert_eq!(format!(\"{:?}\", iter.next()), \"Some(Key([2]))\");\n        assert_eq!(\n            format!(\"{:?}\", iter.next()),\n            \"Some(RangeInclusive([3]..=[7]))\"\n        );\n        assert_eq!(iter.next(), None);\n    }\n\n    #[test]\n    fn range_proof() {\n        let mut tree = make_tree_seq(10);\n        let mut walker = RefWalker::new(&mut tree, PanicSource {});\n\n        let queryitems = vec![QueryItem::Range(\n            vec![0, 0, 0, 0, 0, 0, 0, 5]..vec![0, 0, 0, 0, 0, 0, 0, 7],\n        )];\n        let (proof, absence) = walker\n            .create_proof(queryitems.as_slice())\n            .expect(\"create_proof errored\");\n\n        let mut iter = proof.iter();\n        assert_eq!(\n            iter.next(),\n            Some(&Op::Push(Node::Hash([\n                131, 182, 249, 107, 5, 43, 253, 172, 175, 5, 92, 100, 112, 7, 61, 179, 216, 127,\n                180, 104, 127, 239, 76, 175, 20, 208, 82, 101, 163, 177, 107, 229\n            ])))\n        );\n        assert_eq!(\n            iter.next(),\n            Some(&Op::Push(Node::KVHash([\n                252, 83, 231, 211, 74, 65, 100, 80, 251, 110, 182, 76, 90, 44, 213, 30, 241, 239,\n                2, 5, 216, 202, 184, 130, 47, 53, 146, 68, 179, 22, 45, 30\n            ])))\n        );\n        assert_eq!(iter.next(), Some(&Op::Parent));\n        assert_eq!(\n            iter.next(),\n            Some(&Op::Push(Node::Hash([\n                71, 142, 184, 184, 188, 130, 2, 241, 17, 17, 179, 82, 112, 27, 31, 20, 92, 69, 145,\n                176, 112, 235, 30, 16, 54, 157, 64, 114, 154, 54, 63, 253\n            ])))\n        );\n        assert_eq!(\n            iter.next(),\n            Some(&Op::Push(Node::KV(\n                vec![0, 0, 0, 0, 0, 0, 0, 5],\n                vec![123; 60]\n            )))\n        );\n        assert_eq!(iter.next(), Some(&Op::Parent));\n        assert_eq!(\n            iter.next(),\n            Some(&Op::Push(Node::KV(\n                vec![0, 0, 0, 0, 0, 0, 0, 6],\n                vec![123; 60]\n            )))\n        );\n        assert_eq!(iter.next(), Some(&Op::Child));\n        assert_eq!(\n            iter.next(),\n            Some(&Op::Push(Node::KV(\n                vec![0, 0, 0, 0, 0, 0, 0, 7],\n                vec![123; 60]\n            )))\n        );\n        assert_eq!(iter.next(), Some(&Op::Parent));\n        assert_eq!(\n            iter.next(),\n            Some(&Op::Push(Node::Hash([\n                150, 100, 68, 82, 53, 2, 5, 199, 230, 152, 77, 216, 114, 30, 205, 210, 226, 140,\n                161, 62, 235, 10, 116, 142, 115, 201, 56, 218, 44, 151, 86, 154\n            ])))\n        );\n        assert_eq!(iter.next(), Some(&Op::Child));\n        assert_eq!(iter.next(), Some(&Op::Child));\n        assert!(iter.next().is_none());\n        assert_eq!(absence, (false, false));\n\n        let mut bytes = vec![];\n        encode_into(proof.iter(), &mut bytes);\n        let mut query = Query::new();\n        for item in queryitems {\n            query.insert_item(item);\n        }\n        let res = verify_query(bytes.as_slice(), &query, tree.hash()).unwrap();\n        assert_eq!(\n            res,\n            vec![\n                (vec![0, 0, 0, 0, 0, 0, 0, 5], vec![123; 60]),\n                (vec![0, 0, 0, 0, 0, 0, 0, 6], vec![123; 60]),\n            ]\n        );\n    }\n\n    #[test]\n    fn range_proof_inclusive() {\n        let mut tree = make_tree_seq(10);\n        let mut walker = RefWalker::new(&mut tree, PanicSource {});\n\n        let queryitems = vec![QueryItem::RangeInclusive(\n            vec![0, 0, 0, 0, 0, 0, 0, 5]..=vec![0, 0, 0, 0, 0, 0, 0, 7],\n        )];\n        let (proof, absence) = walker\n            .create_proof(queryitems.as_slice())\n            .expect(\"create_proof errored\");\n\n        let mut iter = proof.iter();\n        assert_eq!(\n            iter.next(),\n            Some(&Op::Push(Node::Hash([\n                131, 182, 249, 107, 5, 43, 253, 172, 175, 5, 92, 100, 112, 7, 61, 179, 216, 127,\n                180, 104, 127, 239, 76, 175, 20, 208, 82, 101, 163, 177, 107, 229\n            ])))\n        );\n        assert_eq!(\n            iter.next(),\n            Some(&Op::Push(Node::KVHash([\n                252, 83, 231, 211, 74, 65, 100, 80, 251, 110, 182, 76, 90, 44, 213, 30, 241, 239,\n                2, 5, 216, 202, 184, 130, 47, 53, 146, 68, 179, 22, 45, 30\n            ])))\n        );\n        assert_eq!(iter.next(), Some(&Op::Parent));\n        assert_eq!(\n            iter.next(),\n            Some(&Op::Push(Node::Hash([\n                71, 142, 184, 184, 188, 130, 2, 241, 17, 17, 179, 82, 112, 27, 31, 20, 92, 69, 145,\n                176, 112, 235, 30, 16, 54, 157, 64, 114, 154, 54, 63, 253\n            ])))\n        );\n        assert_eq!(\n            iter.next(),\n            Some(&Op::Push(Node::KV(\n                vec![0, 0, 0, 0, 0, 0, 0, 5],\n                vec![123; 60]\n            )))\n        );\n        assert_eq!(iter.next(), Some(&Op::Parent));\n        assert_eq!(\n            iter.next(),\n            Some(&Op::Push(Node::KV(\n                vec![0, 0, 0, 0, 0, 0, 0, 6],\n                vec![123; 60]\n            )))\n        );\n        assert_eq!(iter.next(), Some(&Op::Child));\n        assert_eq!(\n            iter.next(),\n            Some(&Op::Push(Node::KV(\n                vec![0, 0, 0, 0, 0, 0, 0, 7],\n                vec![123; 60]\n            )))\n        );\n        assert_eq!(iter.next(), Some(&Op::Parent));\n        assert_eq!(\n            iter.next(),\n            Some(&Op::Push(Node::Hash([\n                150, 100, 68, 82, 53, 2, 5, 199, 230, 152, 77, 216, 114, 30, 205, 210, 226, 140,\n                161, 62, 235, 10, 116, 142, 115, 201, 56, 218, 44, 151, 86, 154\n            ])))\n        );\n        assert_eq!(iter.next(), Some(&Op::Child));\n        assert_eq!(iter.next(), Some(&Op::Child));\n        assert!(iter.next().is_none());\n        assert_eq!(absence, (false, false));\n\n        let mut bytes = vec![];\n        encode_into(proof.iter(), &mut bytes);\n        let mut query = Query::new();\n        for item in queryitems {\n            query.insert_item(item);\n        }\n        let res = verify_query(bytes.as_slice(), &query, tree.hash()).unwrap();\n        assert_eq!(\n            res,\n            vec![\n                (vec![0, 0, 0, 0, 0, 0, 0, 5], vec![123; 60]),\n                (vec![0, 0, 0, 0, 0, 0, 0, 6], vec![123; 60]),\n                (vec![0, 0, 0, 0, 0, 0, 0, 7], vec![123; 60]),\n            ]\n        );\n    }\n\n    #[test]\n    fn range_proof_missing_upper_bound() {\n        let mut tree = make_tree_seq(10);\n        let mut walker = RefWalker::new(&mut tree, PanicSource {});\n\n        let queryitems = vec![QueryItem::Range(\n            vec![0, 0, 0, 0, 0, 0, 0, 5]..vec![0, 0, 0, 0, 0, 0, 0, 6, 5],\n        )];\n        let (proof, absence) = walker\n            .create_proof(queryitems.as_slice())\n            .expect(\"create_proof errored\");\n\n        let mut iter = proof.iter();\n        assert_eq!(\n            iter.next(),\n            Some(&Op::Push(Node::Hash([\n                131, 182, 249, 107, 5, 43, 253, 172, 175, 5, 92, 100, 112, 7, 61, 179, 216, 127,\n                180, 104, 127, 239, 76, 175, 20, 208, 82, 101, 163, 177, 107, 229\n            ])))\n        );\n        assert_eq!(\n            iter.next(),\n            Some(&Op::Push(Node::KVHash([\n                252, 83, 231, 211, 74, 65, 100, 80, 251, 110, 182, 76, 90, 44, 213, 30, 241, 239,\n                2, 5, 216, 202, 184, 130, 47, 53, 146, 68, 179, 22, 45, 30\n            ])))\n        );\n        assert_eq!(iter.next(), Some(&Op::Parent));\n        assert_eq!(\n            iter.next(),\n            Some(&Op::Push(Node::Hash([\n                71, 142, 184, 184, 188, 130, 2, 241, 17, 17, 179, 82, 112, 27, 31, 20, 92, 69, 145,\n                176, 112, 235, 30, 16, 54, 157, 64, 114, 154, 54, 63, 253\n            ])))\n        );\n        assert_eq!(\n            iter.next(),\n            Some(&Op::Push(Node::KV(\n                vec![0, 0, 0, 0, 0, 0, 0, 5],\n                vec![123; 60]\n            )))\n        );\n        assert_eq!(iter.next(), Some(&Op::Parent));\n        assert_eq!(\n            iter.next(),\n            Some(&Op::Push(Node::KV(\n                vec![0, 0, 0, 0, 0, 0, 0, 6],\n                vec![123; 60]\n            )))\n        );\n        assert_eq!(iter.next(), Some(&Op::Child));\n        assert_eq!(\n            iter.next(),\n            Some(&Op::Push(Node::KV(\n                vec![0, 0, 0, 0, 0, 0, 0, 7],\n                vec![123; 60]\n            )))\n        );\n        assert_eq!(iter.next(), Some(&Op::Parent));\n        assert_eq!(\n            iter.next(),\n            Some(&Op::Push(Node::Hash([\n                150, 100, 68, 82, 53, 2, 5, 199, 230, 152, 77, 216, 114, 30, 205, 210, 226, 140,\n                161, 62, 235, 10, 116, 142, 115, 201, 56, 218, 44, 151, 86, 154\n            ])))\n        );\n        assert_eq!(iter.next(), Some(&Op::Child));\n        assert_eq!(iter.next(), Some(&Op::Child));\n        assert!(iter.next().is_none());\n        assert_eq!(absence, (false, false));\n\n        let mut bytes = vec![];\n        encode_into(proof.iter(), &mut bytes);\n        let mut query = Query::new();\n        for item in queryitems {\n            query.insert_item(item);\n        }\n        let res = verify_query(bytes.as_slice(), &query, tree.hash()).unwrap();\n        assert_eq!(\n            res,\n            vec![\n                (vec![0, 0, 0, 0, 0, 0, 0, 5], vec![123; 60]),\n                (vec![0, 0, 0, 0, 0, 0, 0, 6], vec![123; 60]),\n            ]\n        );\n    }\n\n    #[test]\n    fn range_proof_missing_lower_bound() {\n        let mut tree = make_tree_seq(10);\n        let mut walker = RefWalker::new(&mut tree, PanicSource {});\n\n        let queryitems = vec![\n            // 7 is not inclusive\n            QueryItem::Range(vec![0, 0, 0, 0, 0, 0, 0, 5, 5]..vec![0, 0, 0, 0, 0, 0, 0, 7]),\n        ];\n        let (proof, absence) = walker\n            .create_proof(queryitems.as_slice())\n            .expect(\"create_proof errored\");\n\n        let mut iter = proof.iter();\n        assert_eq!(\n            iter.next(),\n            Some(&Op::Push(Node::Hash([\n                131, 182, 249, 107, 5, 43, 253, 172, 175, 5, 92, 100, 112, 7, 61, 179, 216, 127,\n                180, 104, 127, 239, 76, 175, 20, 208, 82, 101, 163, 177, 107, 229\n            ])))\n        );\n        assert_eq!(\n            iter.next(),\n            Some(&Op::Push(Node::KVHash([\n                252, 83, 231, 211, 74, 65, 100, 80, 251, 110, 182, 76, 90, 44, 213, 30, 241, 239,\n                2, 5, 216, 202, 184, 130, 47, 53, 146, 68, 179, 22, 45, 30\n            ])))\n        );\n        assert_eq!(iter.next(), Some(&Op::Parent));\n        assert_eq!(\n            iter.next(),\n            Some(&Op::Push(Node::Hash([\n                71, 142, 184, 184, 188, 130, 2, 241, 17, 17, 179, 82, 112, 27, 31, 20, 92, 69, 145,\n                176, 112, 235, 30, 16, 54, 157, 64, 114, 154, 54, 63, 253\n            ])))\n        );\n        assert_eq!(\n            iter.next(),\n            Some(&Op::Push(Node::KV(\n                vec![0, 0, 0, 0, 0, 0, 0, 5],\n                vec![123; 60]\n            )))\n        );\n        assert_eq!(iter.next(), Some(&Op::Parent));\n        assert_eq!(\n            iter.next(),\n            Some(&Op::Push(Node::KV(\n                vec![0, 0, 0, 0, 0, 0, 0, 6],\n                vec![123; 60]\n            )))\n        );\n        assert_eq!(iter.next(), Some(&Op::Child));\n        assert_eq!(\n            iter.next(),\n            Some(&Op::Push(Node::KV(\n                vec![0, 0, 0, 0, 0, 0, 0, 7],\n                vec![123; 60]\n            )))\n        );\n        assert_eq!(iter.next(), Some(&Op::Parent));\n        assert_eq!(\n            iter.next(),\n            Some(&Op::Push(Node::Hash([\n                150, 100, 68, 82, 53, 2, 5, 199, 230, 152, 77, 216, 114, 30, 205, 210, 226, 140,\n                161, 62, 235, 10, 116, 142, 115, 201, 56, 218, 44, 151, 86, 154\n            ])))\n        );\n        assert_eq!(iter.next(), Some(&Op::Child));\n        assert_eq!(iter.next(), Some(&Op::Child));\n        assert!(iter.next().is_none());\n        assert_eq!(absence, (false, false));\n\n        let mut bytes = vec![];\n        encode_into(proof.iter(), &mut bytes);\n        let mut query = Query::new();\n        for item in queryitems {\n            query.insert_item(item);\n        }\n        let res = verify_query(bytes.as_slice(), &query, tree.hash()).unwrap();\n        assert_eq!(res, vec![(vec![0, 0, 0, 0, 0, 0, 0, 6], vec![123; 60]),]);\n    }\n\n    #[test]\n    fn query_from_vec() {\n        let queryitems = vec![QueryItem::Range(\n            vec![0, 0, 0, 0, 0, 0, 0, 5, 5]..vec![0, 0, 0, 0, 0, 0, 0, 7],\n        )];\n        let query = Query::from(queryitems);\n\n        let mut expected = BTreeSet::new();\n        expected.insert(QueryItem::Range(\n            vec![0, 0, 0, 0, 0, 0, 0, 5, 5]..vec![0, 0, 0, 0, 0, 0, 0, 7],\n        ));\n        assert_eq!(query.items, expected);\n    }\n\n    #[test]\n    fn query_into_vec() {\n        let mut query = Query::new();\n        query.insert_item(QueryItem::Range(\n            vec![0, 0, 0, 0, 0, 0, 5, 5]..vec![0, 0, 0, 0, 0, 0, 0, 7],\n        ));\n        let query_vec: Vec<QueryItem> = query.into();\n        let expected = vec![QueryItem::Range(\n            vec![0, 0, 0, 0, 0, 0, 5, 5]..vec![0, 0, 0, 0, 0, 0, 0, 7],\n        )];\n        assert_eq!(\n            query_vec.get(0).unwrap().lower_bound(),\n            expected.get(0).unwrap().lower_bound()\n        );\n        assert_eq!(\n            query_vec.get(0).unwrap().upper_bound(),\n            expected.get(0).unwrap().upper_bound()\n        );\n    }\n\n    #[test]\n    fn query_item_from_vec_u8() {\n        let queryitems: Vec<u8> = vec![42];\n        let query = QueryItem::from(queryitems);\n\n        let expected = QueryItem::Key(vec![42]);\n        assert_eq!(query, expected);\n    }\n\n    #[test]\n    fn verify_ops() -> Result<()> {\n        let mut tree = Tree::new(vec![5], vec![5])?;\n        tree.commit(&mut NoopCommit {}).expect(\"commit failed\");\n\n        let root_hash = tree.hash();\n        let mut walker = RefWalker::new(&mut tree, PanicSource {});\n\n        let (proof, _) = walker\n            .create_proof(vec![QueryItem::Key(vec![5])].as_slice())\n            .expect(\"failed to create proof\");\n        let mut bytes = vec![];\n\n        encode_into(proof.iter(), &mut bytes);\n\n        let map = verify(&bytes, root_hash).unwrap();\n        assert_eq!(\n            map.get(vec![5].as_slice()).unwrap().unwrap(),\n            vec![5].as_slice()\n        );\n        Ok(())\n    }\n\n    #[test]\n    #[should_panic(expected = \"verify failed\")]\n    fn verify_ops_mismatched_hash() {\n        let mut tree = Tree::new(vec![5], vec![5]).expect(\"tree construction failed\");\n        tree.commit(&mut NoopCommit {}).expect(\"commit failed\");\n\n        let mut walker = RefWalker::new(&mut tree, PanicSource {});\n\n        let (proof, _) = walker\n            .create_proof(vec![QueryItem::Key(vec![5])].as_slice())\n            .expect(\"failed to create proof\");\n        let mut bytes = vec![];\n\n        encode_into(proof.iter(), &mut bytes);\n\n        let _map = verify(&bytes, [42; 32]).expect(\"verify failed\");\n    }\n\n    #[test]\n    #[should_panic(expected = \"verify failed\")]\n    fn verify_query_mismatched_hash() {\n        let mut tree = make_3_node_tree().expect(\"tree construction failed\");\n        let mut walker = RefWalker::new(&mut tree, PanicSource {});\n        let keys = vec![vec![5], vec![7]];\n        let (proof, _) = walker\n            .create_proof(\n                keys.clone()\n                    .into_iter()\n                    .map(QueryItem::Key)\n                    .collect::<Vec<_>>()\n                    .as_slice(),\n            )\n            .expect(\"failed to create proof\");\n        let mut bytes = vec![];\n        encode_into(proof.iter(), &mut bytes);\n\n        let mut query = Query::new();\n        for key in keys.iter() {\n            query.insert_key(key.clone());\n        }\n\n        let _result = verify_query(bytes.as_slice(), &query, [42; 32]).expect(\"verify failed\");\n    }\n\n    #[test]\n    #[should_panic(expected = \"Tried to attach to Hash node\")]\n    fn hash_attach() {\n        let mut target = make_3_node_tree().expect(\"tree construction failed\");\n\n        let mut proof = Vec::new();\n        proof.push(Op::Push(Node::KV(vec![42], vec![42])));\n        proof.push(Op::Push(Node::Hash(target.hash())));\n        proof.push(Op::Parent);\n\n        let map = verify(&proof.encode().unwrap(), target.hash()).unwrap();\n        assert_eq!(map.get(&[42]).unwrap().unwrap(), &[42])\n    }\n}\n"
  },
  {
    "path": "src/proofs/tree.rs",
    "content": "use super::{Node, Op};\nuse crate::error::{Error, Result};\nuse crate::tree::{kv_hash, node_hash, Hash, Hasher, NULL_HASH};\n\n/// Contains a tree's child node and its hash. The hash can always be assumed to\n/// be up-to-date.\n#[derive(Debug)]\npub struct Child {\n    /// The child node.\n    pub tree: Box<Tree>,\n    /// The hash of the child node.\n    pub hash: Hash,\n}\n\n/// A binary tree data structure used to represent a select subset of a tree\n/// when verifying Merkle proofs.\n#[derive(Debug)]\npub struct Tree {\n    /// The node at the root of this tree.\n    pub node: Node,\n    /// The left child of this tree.\n    pub left: Option<Child>,\n    /// The right child of this tree.\n    pub right: Option<Child>,\n    /// The height of this tree.\n    pub height: usize,\n}\n\nimpl From<Node> for Tree {\n    /// Creates a childless tree with the target node as the `node` field.\n    fn from(node: Node) -> Self {\n        Tree {\n            node,\n            left: None,\n            right: None,\n            height: 1,\n        }\n    }\n}\n\nimpl PartialEq for Tree {\n    /// Checks equality for the root hashes of the two trees.\n    fn eq(&self, other: &Self) -> bool {\n        self.hash()\n            .and_then(|this_hash| other.hash().map(|other_hash| this_hash == other_hash))\n            .unwrap_or_default()\n    }\n}\n\nimpl Tree {\n    /// Gets or computes the hash for this tree node.\n    pub fn hash(&self) -> Result<Hash> {\n        fn compute_hash(tree: &Tree, kv_hash: Hash) -> Hash {\n            node_hash::<Hasher>(&kv_hash, &tree.child_hash(true), &tree.child_hash(false))\n        }\n\n        match &self.node {\n            Node::Hash(hash) => Ok(*hash),\n            Node::KVHash(kv_hash) => Ok(compute_hash(self, *kv_hash)),\n            Node::KV(key, value) => kv_hash::<Hasher>(key.as_slice(), value.as_slice())\n                .map(|kv_hash| compute_hash(self, kv_hash))\n                .map_err(Into::into),\n        }\n    }\n\n    /// Creates an iterator that yields the in-order traversal of the nodes at\n    /// the given depth.\n    pub fn layer(&self, depth: usize) -> LayerIter {\n        LayerIter::new(self, depth)\n    }\n\n    /// Consumes the `Tree` and does an in-order traversal over all the nodes in\n    /// the tree, calling `visit_node` for each.\n    pub fn visit_nodes<F: FnMut(Node)>(mut self, visit_node: &mut F) {\n        if let Some(child) = self.left.take() {\n            child.tree.visit_nodes(visit_node);\n        }\n\n        let maybe_right_child = self.right.take();\n        visit_node(self.node);\n\n        if let Some(child) = maybe_right_child {\n            child.tree.visit_nodes(visit_node);\n        }\n    }\n\n    /// Does an in-order traversal over references to all the nodes in the tree,\n    /// calling `visit_node` for each.\n    pub fn visit_refs<F: FnMut(&Tree)>(&self, visit_node: &mut F) {\n        if let Some(child) = &self.left {\n            child.tree.visit_refs(visit_node);\n        }\n\n        visit_node(self);\n\n        if let Some(child) = &self.right {\n            child.tree.visit_refs(visit_node);\n        }\n    }\n\n    /// Returns an immutable reference to the child on the given side, if any.\n    pub fn child(&self, left: bool) -> Option<&Child> {\n        if left {\n            self.left.as_ref()\n        } else {\n            self.right.as_ref()\n        }\n    }\n\n    /// Returns a mutable reference to the child on the given side, if any.\n    pub(crate) fn child_mut(&mut self, left: bool) -> &mut Option<Child> {\n        if left {\n            &mut self.left\n        } else {\n            &mut self.right\n        }\n    }\n\n    /// Attaches the child to the `Tree`'s given side. Returns an error if\n    /// there is already a child attached to this side.\n    pub(crate) fn attach(&mut self, left: bool, child: Tree) -> Result<()> {\n        if self.child(left).is_some() {\n            return Err(Error::Attach(\n                \"Tried to attach to left child, but it is already Some\".into(),\n            ));\n        }\n\n        if let Node::Hash(_) = self.node {\n            return Err(Error::Attach(\"Tried to attach to Hash node\".into()));\n        }\n\n        self.height = self.height.max(child.height + 1);\n\n        let hash = child.hash()?;\n        let tree = Box::new(child);\n        *self.child_mut(left) = Some(Child { tree, hash });\n\n        Ok(())\n    }\n\n    /// Returns the already-computed hash for this tree node's child on the\n    /// given side, if any. If there is no child, returns the null hash\n    /// (zero-filled).\n    #[inline]\n    fn child_hash(&self, left: bool) -> Hash {\n        self.child(left).map_or(NULL_HASH, |c| c.hash)\n    }\n\n    /// Consumes the tree node, calculates its hash, and returns a `Node::Hash`\n    /// variant.\n    fn try_into_hash(self) -> Result<Tree> {\n        self.hash().map(Node::Hash).map(Into::into)\n    }\n\n    #[cfg(feature = \"full\")]\n    pub(crate) fn key(&self) -> &[u8] {\n        match self.node {\n            Node::KV(ref key, _) => key,\n            _ => panic!(\"Expected node to be type KV\"),\n        }\n    }\n}\n\n/// `LayerIter` iterates over the nodes in a `Tree` at a given depth. Nodes are\n/// visited in order.\npub struct LayerIter<'a> {\n    stack: Vec<&'a Tree>,\n    depth: usize,\n}\n\nimpl<'a> LayerIter<'a> {\n    /// Creates a new `LayerIter` that iterates over `tree` at the given depth.\n    fn new(tree: &'a Tree, depth: usize) -> Self {\n        let mut iter = LayerIter {\n            stack: Vec::with_capacity(depth),\n            depth,\n        };\n\n        iter.traverse_to_start(tree, depth);\n        iter\n    }\n\n    /// Builds up the stack by traversing through left children to the desired\n    /// depth.\n    fn traverse_to_start(&mut self, tree: &'a Tree, remaining_depth: usize) {\n        self.stack.push(tree);\n\n        if remaining_depth == 0 {\n            return;\n        }\n\n        if let Some(child) = tree.child(true) {\n            self.traverse_to_start(&child.tree, remaining_depth - 1)\n        } else {\n            panic!(\"Could not traverse to given layer\")\n        }\n    }\n}\n\nimpl<'a> Iterator for LayerIter<'a> {\n    type Item = &'a Tree;\n\n    fn next(&mut self) -> Option<Self::Item> {\n        let item = self.stack.pop();\n        let mut popped = item;\n\n        loop {\n            if self.stack.is_empty() {\n                return item;\n            }\n\n            let parent = self.stack.last().unwrap();\n            let left_child = parent.child(true).unwrap();\n            let right_child = parent.child(false).unwrap();\n\n            if left_child.tree.as_ref() == popped.unwrap() {\n                self.stack.push(&right_child.tree);\n\n                while self.stack.len() - 1 < self.depth {\n                    let parent = self.stack.last().unwrap();\n                    let left_child = parent.child(true).unwrap();\n                    self.stack.push(&left_child.tree);\n                }\n\n                return item;\n            } else {\n                popped = self.stack.pop();\n            }\n        }\n    }\n}\n\n/// Executes a proof by stepping through its operators, modifying the\n/// verification stack as it goes. The resulting stack item is returned.\n///\n/// If the `collapse` option is set to `true`, nodes will be hashed and pruned\n/// from memory during execution. This results in the minimum amount of memory\n/// usage, and the returned `Tree` will only contain a single node of type\n/// `Node::Hash`. If `false`, the returned `Tree` will contain the entire\n/// subtree contained in the proof.\n///\n/// `visit_node` will be called once for every push operation in the proof, in\n/// key-order. If `visit_node` returns an `Err` result, it will halt the\n/// execution and `execute` will return the error.\npub(crate) fn execute<I, F>(ops: I, collapse: bool, mut visit_node: F) -> Result<Tree>\nwhere\n    I: IntoIterator<Item = Result<Op>>,\n    F: FnMut(&Node) -> Result<()>,\n{\n    let mut stack: Vec<Tree> = Vec::with_capacity(32);\n    let mut maybe_last_key = None;\n\n    fn try_pop(stack: &mut Vec<Tree>) -> Result<Tree> {\n        match stack.pop() {\n            None => Err(Error::StackUnderflow),\n            Some(tree) => Ok(tree),\n        }\n    }\n\n    for op in ops {\n        match op? {\n            Op::Parent => {\n                let (mut parent, child) = (try_pop(&mut stack)?, try_pop(&mut stack)?);\n                parent.attach(\n                    true,\n                    if collapse {\n                        child.try_into_hash()?\n                    } else {\n                        child\n                    },\n                )?;\n                stack.push(parent);\n            }\n            Op::Child => {\n                let (child, mut parent) = (try_pop(&mut stack)?, try_pop(&mut stack)?);\n                parent.attach(\n                    false,\n                    if collapse {\n                        child.try_into_hash()?\n                    } else {\n                        child\n                    },\n                )?;\n                stack.push(parent);\n            }\n            Op::Push(node) => {\n                if let Node::KV(key, _) = &node {\n                    // keys should always increase\n                    if let Some(last_key) = &maybe_last_key {\n                        if key <= last_key {\n                            return Err(Error::Key(\"Incorrect key ordering\".into()));\n                        }\n                    }\n\n                    maybe_last_key = Some(key.clone());\n                }\n\n                visit_node(&node)?;\n\n                let tree: Tree = node.into();\n                stack.push(tree);\n            }\n        }\n    }\n\n    if stack.len() != 1 {\n        return Err(Error::Proof(\n            \"Expected proof to result in exactly on stack item\".into(),\n        ));\n    }\n\n    Ok(stack.pop().unwrap())\n}\n\n#[cfg(test)]\nmod test {\n    use super::super::*;\n    use super::Tree as ProofTree;\n    use super::*;\n\n    fn make_7_node_prooftree() -> ProofTree {\n        let make_node = |i| -> super::super::tree::Tree { Node::KV(vec![i], vec![]).into() };\n\n        let mut tree = make_node(3);\n        let mut left = make_node(1);\n        left.attach(true, make_node(0)).unwrap();\n        left.attach(false, make_node(2)).unwrap();\n        let mut right = make_node(5);\n        right.attach(true, make_node(4)).unwrap();\n        right.attach(false, make_node(6)).unwrap();\n        tree.attach(true, left).unwrap();\n        tree.attach(false, right).unwrap();\n\n        tree\n    }\n\n    #[test]\n    fn height_counting() {\n        fn recurse(tree: &super::Tree, expected_height: usize) {\n            assert_eq!(tree.height, expected_height);\n            tree.left\n                .as_ref()\n                .into_iter()\n                .for_each(|l| recurse(&l.tree, expected_height - 1));\n            tree.right\n                .as_ref()\n                .into_iter()\n                .for_each(|r| recurse(&r.tree, expected_height - 1));\n        }\n\n        let tree = make_7_node_prooftree();\n        recurse(&tree, 3);\n    }\n\n    #[test]\n    fn layer_iter() {\n        let tree = make_7_node_prooftree();\n\n        let assert_node = |node: &Tree, i| match node.node {\n            Node::KV(ref key, _) => assert_eq!(key[0], i),\n            _ => unreachable!(),\n        };\n\n        let mut iter = tree.layer(0);\n        assert_node(iter.next().unwrap(), 3);\n        assert!(iter.next().is_none());\n\n        let mut iter = tree.layer(1);\n        assert_node(iter.next().unwrap(), 1);\n        assert_node(iter.next().unwrap(), 5);\n        assert!(iter.next().is_none());\n\n        let mut iter = tree.layer(2);\n        assert_node(iter.next().unwrap(), 0);\n        assert_node(iter.next().unwrap(), 2);\n        assert_node(iter.next().unwrap(), 4);\n        assert_node(iter.next().unwrap(), 6);\n        assert!(iter.next().is_none());\n    }\n\n    #[test]\n    fn visit_nodes() {\n        let tree = make_7_node_prooftree();\n\n        let assert_node = |node: Node, i| match node {\n            Node::KV(ref key, _) => assert_eq!(key[0], i),\n            _ => unreachable!(),\n        };\n\n        let mut visited = vec![];\n        tree.visit_nodes(&mut |node| visited.push(node));\n\n        let mut iter = visited.into_iter();\n        for i in 0..7 {\n            assert_node(iter.next().unwrap(), i);\n        }\n        assert!(iter.next().is_none());\n    }\n}\n"
  },
  {
    "path": "src/test_utils/crash_merk.rs",
    "content": "use crate::{Merk, Result};\nuse std::fs;\nuse std::mem::ManuallyDrop;\nuse std::ops::{Deref, DerefMut};\nuse std::path::Path;\n\n/// Wraps a Merk instance and drops it without flushing once it goes out of\n/// scope.\npub struct CrashMerk {\n    inner: Option<ManuallyDrop<Merk>>,\n    path: Box<Path>,\n}\n\nimpl CrashMerk {\n    /// Opens a `CrashMerk` at the given file path, creating a new one if it\n    /// does not exist.\n    pub fn open<P: AsRef<Path>>(path: P) -> Result<CrashMerk> {\n        let merk = Merk::open(&path)?;\n        let inner = Some(ManuallyDrop::new(merk));\n        Ok(CrashMerk {\n            inner,\n            path: path.as_ref().into(),\n        })\n    }\n\n    #[allow(clippy::missing_safety_doc)]\n    pub unsafe fn crash(&mut self) -> Result<()> {\n        ManuallyDrop::drop(&mut self.inner.take().unwrap());\n\n        // rename to invalidate rocksdb's lock\n        let file_name = format!(\n            \"{}_crashed\",\n            self.path.file_name().unwrap().to_str().unwrap()\n        );\n        let new_path = self.path.with_file_name(file_name);\n        fs::rename(&self.path, &new_path)?;\n\n        let mut new_merk = CrashMerk::open(&new_path)?;\n        self.inner = new_merk.inner.take();\n        self.path = new_merk.path;\n        Ok(())\n    }\n\n    pub fn into_inner(self) -> Merk {\n        ManuallyDrop::into_inner(self.inner.unwrap())\n    }\n\n    pub fn destroy(self) -> Result<()> {\n        self.into_inner().destroy()\n    }\n}\n\nimpl Deref for CrashMerk {\n    type Target = Merk;\n\n    fn deref(&self) -> &Merk {\n        self.inner.as_ref().unwrap()\n    }\n}\n\nimpl DerefMut for CrashMerk {\n    fn deref_mut(&mut self) -> &mut Merk {\n        self.inner.as_mut().unwrap()\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::CrashMerk;\n    use crate::Op;\n\n    #[test]\n    #[ignore] // currently this still works because we enabled the WAL\n    fn crash() {\n        let path = std::thread::current().name().unwrap().to_owned();\n\n        let mut merk = CrashMerk::open(path).expect(\"failed to open merk\");\n        merk.apply(&[(vec![1, 2, 3], Op::Put(vec![4, 5, 6]))], &[])\n            .expect(\"apply failed\");\n        unsafe {\n            merk.crash().unwrap();\n        }\n        assert_eq!(merk.get(&[1, 2, 3]).expect(\"failed to get\"), None);\n        merk.into_inner().destroy().unwrap();\n    }\n}\n"
  },
  {
    "path": "src/test_utils/mod.rs",
    "content": "#![allow(missing_docs)]\n\nmod crash_merk;\nmod temp_merk;\n\nuse crate::tree::{Batch, BatchEntry, NoopCommit, Op, PanicSource, Tree, Walker};\nuse rand::prelude::*;\nuse std::convert::TryInto;\nuse std::ops::Range;\n\npub use crash_merk::CrashMerk;\npub use temp_merk::TempMerk;\n\npub fn assert_tree_invariants(tree: &Tree) {\n    assert!(tree.balance_factor().abs() < 2);\n\n    let maybe_left = tree.link(true);\n    if let Some(left) = maybe_left {\n        assert!(left.key() < tree.key());\n        assert!(!left.is_modified());\n    }\n\n    let maybe_right = tree.link(false);\n    if let Some(right) = maybe_right {\n        assert!(right.key() > tree.key());\n        assert!(!right.is_modified());\n    }\n\n    if let Some(left) = tree.child(true) {\n        assert_tree_invariants(left);\n    }\n    if let Some(right) = tree.child(false) {\n        assert_tree_invariants(right);\n    }\n}\n\npub fn apply_memonly_unchecked(tree: Tree, batch: &Batch) -> Tree {\n    let walker = Walker::<PanicSource>::new(tree, PanicSource {});\n    let mut tree = Walker::<PanicSource>::apply_to(Some(walker), batch, PanicSource {})\n        .expect(\"apply failed\")\n        .0\n        .expect(\"expected tree\");\n    tree.commit(&mut NoopCommit {}).expect(\"commit failed\");\n    tree\n}\n\npub fn apply_memonly(tree: Tree, batch: &Batch) -> Tree {\n    let tree = apply_memonly_unchecked(tree, batch);\n    assert_tree_invariants(&tree);\n    tree\n}\n\npub fn apply_to_memonly(maybe_tree: Option<Tree>, batch: &Batch) -> Option<Tree> {\n    let maybe_walker = maybe_tree.map(|tree| Walker::<PanicSource>::new(tree, PanicSource {}));\n    Walker::<PanicSource>::apply_to(maybe_walker, batch, PanicSource {})\n        .expect(\"apply failed\")\n        .0\n        .map(|mut tree| {\n            tree.commit(&mut NoopCommit {}).expect(\"commit failed\");\n            println!(\"{:?}\", &tree);\n            assert_tree_invariants(&tree);\n            tree\n        })\n}\n\npub fn seq_key(n: u64) -> Vec<u8> {\n    n.to_be_bytes().to_vec()\n}\n\npub fn put_entry_value() -> Vec<u8> {\n    vec![123; 60]\n}\n\npub fn put_entry(n: u64) -> BatchEntry {\n    (seq_key(n), Op::Put(put_entry_value()))\n}\n\npub fn del_entry(n: u64) -> BatchEntry {\n    (seq_key(n), Op::Delete)\n}\n\npub fn make_batch_seq(range: Range<u64>) -> Vec<BatchEntry> {\n    let mut batch = Vec::with_capacity((range.end - range.start).try_into().unwrap());\n    for n in range {\n        batch.push(put_entry(n));\n    }\n    batch\n}\n\npub fn make_del_batch_seq(range: Range<u64>) -> Vec<BatchEntry> {\n    let mut batch = Vec::with_capacity((range.end - range.start).try_into().unwrap());\n    for n in range {\n        batch.push(del_entry(n));\n    }\n    batch\n}\n\npub fn make_batch_rand(size: u64, seed: u64) -> Vec<BatchEntry> {\n    let mut rng: SmallRng = SeedableRng::seed_from_u64(seed);\n    let mut batch = Vec::with_capacity(size.try_into().unwrap());\n    for _ in 0..size {\n        let n = rng.gen::<u64>();\n        batch.push(put_entry(n));\n    }\n    batch.sort_by(|a, b| a.0.cmp(&b.0));\n    batch\n}\n\npub fn make_del_batch_rand(size: u64, seed: u64) -> Vec<BatchEntry> {\n    let mut rng: SmallRng = SeedableRng::seed_from_u64(seed);\n    let mut batch = Vec::with_capacity(size.try_into().unwrap());\n    for _ in 0..size {\n        let n = rng.gen::<u64>();\n        batch.push(del_entry(n));\n    }\n    batch.sort_by(|a, b| a.0.cmp(&b.0));\n    batch\n}\n\npub fn make_tree_rand(node_count: u64, batch_size: u64, initial_seed: u64) -> Tree {\n    assert!(node_count >= batch_size);\n    assert!((node_count % batch_size) == 0);\n\n    let value = vec![123; 60];\n    let mut tree = Tree::new(vec![0; 20], value).expect(\"Tree construction failed\");\n\n    let mut seed = initial_seed;\n\n    let batch_count = node_count / batch_size;\n    for _ in 0..batch_count {\n        let batch = make_batch_rand(batch_size, seed);\n        tree = apply_memonly(tree, &batch);\n        seed += 1;\n    }\n\n    tree\n}\n\npub fn make_tree_seq(node_count: u64) -> Tree {\n    let batch_size = if node_count >= 10_000 {\n        assert!(node_count % 10_000 == 0);\n        10_000\n    } else {\n        node_count\n    };\n\n    let value = vec![123; 60];\n    let mut tree = Tree::new(vec![0; 20], value).expect(\"Tree construction failed\");\n\n    let batch_count = node_count / batch_size;\n    for i in 0..batch_count {\n        let batch = make_batch_seq((i * batch_size)..((i + 1) * batch_size));\n        tree = apply_memonly(tree, &batch);\n    }\n\n    tree\n}\n"
  },
  {
    "path": "src/test_utils/temp_merk.rs",
    "content": "use crate::{Merk, Result};\nuse std::env::temp_dir;\nuse std::ops::{Deref, DerefMut};\nuse std::path::{Path, PathBuf};\nuse std::time::SystemTime;\n\n/// Wraps a Merk instance and deletes it from disk it once it goes out of scope.\npub struct TempMerk {\n    inner: Option<Merk>,\n}\n\nimpl TempMerk {\n    /// Opens a `TempMerk` at the given file path, creating a new one if it does\n    /// not exist.\n    pub fn open<P: AsRef<Path>>(path: P) -> Result<TempMerk> {\n        let inner = Some(Merk::open(path)?);\n        Ok(TempMerk { inner })\n    }\n\n    /// Opens a `TempMerk` at an autogenerated, temporary file path.\n    pub fn new() -> Result<TempMerk> {\n        TempMerk::open(Self::create_path())\n    }\n\n    pub fn create_path() -> PathBuf {\n        let time = SystemTime::now()\n            .duration_since(SystemTime::UNIX_EPOCH)\n            .unwrap()\n            .as_nanos();\n        let mut path = temp_dir();\n        path.push(format!(\"merk-temp–{time}\"));\n        path\n    }\n}\n\nimpl Drop for TempMerk {\n    fn drop(&mut self) {\n        self.inner\n            .take()\n            .unwrap()\n            .destroy()\n            .expect(\"failed to delete db\");\n    }\n}\n\nimpl Deref for TempMerk {\n    type Target = Merk;\n\n    fn deref(&self) -> &Merk {\n        self.inner.as_ref().unwrap()\n    }\n}\n\nimpl DerefMut for TempMerk {\n    fn deref_mut(&mut self) -> &mut Merk {\n        self.inner.as_mut().unwrap()\n    }\n}\n"
  },
  {
    "path": "src/tree/commit.rs",
    "content": "use super::Tree;\nuse crate::error::Result;\n\n/// To be used when committing a tree (writing it to a store after applying the\n/// changes).\npub trait Commit {\n    /// Called once per updated node when a finalized tree is to be written to a\n    /// backing store or cache.\n    fn write(&mut self, tree: &Tree) -> Result<()>;\n\n    /// Called once per node after writing a node and its children. The returned\n    /// tuple specifies whether or not to prune the left and right child nodes,\n    /// respectively. For example, returning `(true, true)` will prune both\n    /// nodes, removing them from memory.\n    fn prune(&self, _tree: &Tree) -> (bool, bool) {\n        (true, true)\n    }\n}\n\n/// A `Commit` implementation which does not write to a store and does not prune\n/// any nodes from the Tree. Useful when only keeping a tree in memory.\npub struct NoopCommit {}\nimpl Commit for NoopCommit {\n    fn write(&mut self, _tree: &Tree) -> Result<()> {\n        Ok(())\n    }\n\n    fn prune(&self, _tree: &Tree) -> (bool, bool) {\n        (false, false)\n    }\n}\n"
  },
  {
    "path": "src/tree/debug.rs",
    "content": "use super::{Link, Tree};\nuse colored::Colorize;\nuse std::fmt::{Debug, Formatter, Result};\n\nimpl Debug for Tree {\n    // TODO: unwraps should be results that bubble up\n    fn fmt(&self, f: &mut Formatter) -> Result {\n        fn traverse(\n            f: &mut Formatter,\n            cursor: &Tree,\n            stack: &mut Vec<(Vec<u8>, Vec<u8>)>,\n            left: bool,\n        ) {\n            if let Some(child_link) = cursor.link(true) {\n                stack.push((child_link.key().to_vec(), cursor.key().to_vec()));\n                if let Some(child_tree) = child_link.tree() {\n                    traverse(f, child_tree, stack, true);\n                } else {\n                    traverse_pruned(f, child_link, stack, true);\n                }\n                stack.pop();\n            }\n\n            let depth = stack.len();\n\n            if depth > 0 {\n                // draw ancestor's vertical lines\n                for (low, high) in stack.iter().take(depth - 1) {\n                    let draw_line = cursor.key() > low && cursor.key() < high;\n                    write!(f, \"{}\", if draw_line { \" │  \" } else { \"    \" }.dimmed()).unwrap();\n                }\n            }\n\n            let prefix = if depth == 0 {\n                \"\"\n            } else if left {\n                \" ┌-\"\n            } else {\n                \" └-\"\n            };\n            writeln!(\n                f,\n                \"{}{}\",\n                prefix.dimmed(),\n                format!(\"{:?}\", cursor.key()).on_bright_black()\n            )\n            .unwrap();\n\n            if let Some(child_link) = cursor.link(false) {\n                stack.push((cursor.key().to_vec(), child_link.key().to_vec()));\n                if let Some(child_tree) = child_link.tree() {\n                    traverse(f, child_tree, stack, false);\n                } else {\n                    traverse_pruned(f, child_link, stack, false);\n                }\n                stack.pop();\n            }\n        }\n\n        fn traverse_pruned(\n            f: &mut Formatter,\n            link: &Link,\n            stack: &mut [(Vec<u8>, Vec<u8>)],\n            left: bool,\n        ) {\n            let depth = stack.len();\n\n            if depth > 0 {\n                // draw ancestor's vertical lines\n                for (low, high) in stack.iter().take(depth - 1) {\n                    let draw_line = link.key() > low && link.key() < high;\n                    write!(f, \"{}\", if draw_line { \" │  \" } else { \"    \" }.dimmed()).unwrap();\n                }\n            }\n\n            let prefix = if depth == 0 {\n                \"\"\n            } else if left {\n                \" ┌-\"\n            } else {\n                \" └-\"\n            };\n            writeln!(\n                f,\n                \"{}{}\",\n                prefix.dimmed(),\n                format!(\"{:?}\", link.key()).blue()\n            )\n            .unwrap();\n        }\n\n        let mut stack = vec![];\n        traverse(f, self, &mut stack, false);\n        writeln!(f)\n    }\n}\n"
  },
  {
    "path": "src/tree/encoding.rs",
    "content": "use std::io::Read;\n\nuse crate::Result;\n\nuse super::{kv::KV, Link, Tree, TreeInner};\nuse ed::{Decode, Encode};\n\nimpl Tree {\n    #[inline]\n    pub fn encode(&self) -> Vec<u8> {\n        // operation is infallible so it's ok to unwrap\n        Encode::encode(self).unwrap()\n    }\n\n    #[inline]\n    pub fn encode_into(&self, dest: &mut Vec<u8>) {\n        // operation is infallible so it's ok to unwrap\n        Encode::encode_into(self, dest).unwrap()\n    }\n\n    #[inline]\n    pub fn encoding_length(&self) -> usize {\n        // operation is infallible so it's ok to unwrap\n        Encode::encoding_length(self).unwrap()\n    }\n\n    #[inline]\n    pub fn decode_into(&mut self, key: Vec<u8>, input: &[u8]) {\n        // operation is infallible so it's ok to unwrap\n        Decode::decode_into(self, input).unwrap();\n        self.inner.kv.key = key;\n    }\n\n    #[inline]\n    pub fn decode(key: Vec<u8>, input: &[u8]) -> Tree {\n        // operation is infallible so it's ok to unwrap\n        let mut tree: Tree = Decode::decode(input).unwrap();\n        tree.inner.kv.key = key;\n        tree\n    }\n\n    pub fn decode_v0<R: Read>(mut input: R) -> Result<Self> {\n        let mut read_link_v0 = || -> Result<Option<Link>> {\n            let some = bool::decode(&mut input)?;\n            if some {\n                let link = Link::decode_v0(&mut input)?;\n                Ok(Some(link))\n            } else {\n                Ok(None)\n            }\n        };\n\n        let maybe_left = read_link_v0()?;\n        let maybe_right = read_link_v0()?;\n        let kv = KV::decode(&mut input)?;\n\n        Ok(Tree {\n            inner: Box::new(TreeInner {\n                left: maybe_left,\n                right: maybe_right,\n                kv,\n            }),\n        })\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::super::Link;\n    use super::*;\n    use crate::error::Result;\n\n    #[test]\n    fn encode_leaf_tree() {\n        let tree = Tree::from_fields(vec![0], vec![1], [55; 32], None, None);\n        assert_eq!(tree.encoding_length(), 35);\n        assert_eq!(\n            tree.encode(),\n            vec![\n                0, 0, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55,\n                55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 1,\n            ]\n        );\n    }\n\n    #[test]\n    #[should_panic]\n    fn encode_modified_tree() {\n        let tree = Tree::from_fields(\n            vec![0],\n            vec![1],\n            [55; 32],\n            Some(Link::Modified {\n                pending_writes: 1,\n                child_heights: (123, 124),\n                tree: Tree::new(vec![2], vec![3]).unwrap(),\n            }),\n            None,\n        );\n        tree.encode();\n    }\n\n    #[test]\n    fn encode_loaded_tree() -> Result<()> {\n        let tree = Tree::from_fields(\n            vec![0],\n            vec![1],\n            [55; 32],\n            Some(Link::Loaded {\n                hash: [66; 32],\n                child_heights: (123, 124),\n                tree: Tree::new(vec![2], vec![3])?,\n            }),\n            None,\n        );\n        assert_eq!(\n            tree.encode(),\n            vec![\n                1, 0, 1, 2, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66,\n                66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 123, 124, 0, 55, 55, 55,\n                55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55,\n                55, 55, 55, 55, 55, 55, 55, 55, 1\n            ]\n        );\n        Ok(())\n    }\n\n    #[test]\n    fn encode_uncommitted_tree() -> Result<()> {\n        let tree = Tree::from_fields(\n            vec![0],\n            vec![1],\n            [55; 32],\n            Some(Link::Uncommitted {\n                hash: [66; 32],\n                child_heights: (123, 124),\n                tree: Tree::new(vec![2], vec![3])?,\n            }),\n            None,\n        );\n        assert_eq!(\n            tree.encode(),\n            vec![\n                1, 0, 1, 2, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66,\n                66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 123, 124, 0, 55, 55, 55,\n                55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55,\n                55, 55, 55, 55, 55, 55, 55, 55, 1\n            ]\n        );\n        Ok(())\n    }\n\n    #[test]\n    fn encode_reference_tree() {\n        let tree = Tree::from_fields(\n            vec![0],\n            vec![1],\n            [55; 32],\n            Some(Link::Reference {\n                hash: [66; 32],\n                child_heights: (123, 124),\n                key: vec![2],\n            }),\n            None,\n        );\n        assert_eq!(tree.encoding_length(), 71);\n        assert_eq!(\n            tree.encode(),\n            vec![\n                1, 0, 1, 2, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66,\n                66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 123, 124, 0, 55, 55, 55,\n                55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55,\n                55, 55, 55, 55, 55, 55, 55, 55, 1\n            ]\n        );\n    }\n\n    #[test]\n    fn decode_leaf_tree() {\n        let bytes = vec![\n            0, 0, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55,\n            1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n        ];\n        let tree = Tree::decode(vec![0], bytes.as_slice());\n        assert_eq!(tree.key(), &[0]);\n        assert_eq!(tree.value(), &[1]);\n    }\n\n    #[test]\n    fn decode_reference_tree() {\n        let bytes = vec![\n            1, 0, 1, 2, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66,\n            66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 123, 124, 0, 55, 55, 55, 55, 55,\n            55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55,\n            55, 55, 55, 55, 55, 1,\n        ];\n        let tree = Tree::decode(vec![0], bytes.as_slice());\n        assert_eq!(tree.key(), &[0]);\n        assert_eq!(tree.value(), &[1]);\n        if let Some(Link::Reference {\n            key,\n            child_heights,\n            hash,\n        }) = tree.link(true)\n        {\n            assert_eq!(*key, [2]);\n            assert_eq!(*child_heights, (123_u8, 124_u8));\n            assert_eq!(*hash, [66_u8; 32]);\n        } else {\n            panic!(\"Expected Link::Reference\");\n        }\n    }\n}\n"
  },
  {
    "path": "src/tree/fuzz_tests.rs",
    "content": "#![cfg(test)]\n\nuse crate::test_utils::*;\nuse crate::tree::*;\nuse rand::prelude::*;\nuse std::cell::RefCell;\nuse std::collections::BTreeMap;\nuse std::iter::FromIterator;\n\nconst ITERATIONS: usize = 2_000;\ntype Map = BTreeMap<Vec<u8>, Vec<u8>>;\n\n#[test]\nfn fuzz() {\n    let mut rng = thread_rng();\n\n    for _ in 0..ITERATIONS {\n        let seed = rng.gen::<u64>();\n        fuzz_case(seed);\n    }\n}\n\n#[test]\nfn fuzz_17391518417409062786() {\n    fuzz_case(17391518417409062786);\n}\n\n#[test]\nfn fuzz_396148930387069749() {\n    fuzz_case(396148930387069749);\n}\n\nfn fuzz_case(seed: u64) {\n    let mut rng: SmallRng = SeedableRng::seed_from_u64(seed);\n    let initial_size = (rng.gen::<u64>() % 10) + 1;\n    let tree = make_tree_rand(initial_size, initial_size, seed);\n    let mut map = Map::from_iter(tree.iter());\n    let mut maybe_tree = Some(tree);\n    println!(\"====== MERK FUZZ ======\");\n    println!(\"SEED: {}\", seed);\n    println!(\"{:?}\", maybe_tree.as_ref().unwrap());\n\n    for j in 0..3 {\n        let batch_size = (rng.gen::<u64>() % 3) + 1;\n        let batch = make_batch(maybe_tree.as_ref(), batch_size, rng.gen::<u64>());\n        println!(\"BATCH {}\", j);\n        println!(\"{:?}\", batch);\n        maybe_tree = apply_to_memonly(maybe_tree, &batch);\n        apply_to_map(&mut map, &batch);\n        assert_map(maybe_tree.as_ref(), &map);\n        if let Some(tree) = &maybe_tree {\n            println!(\"{:?}\", &tree);\n        } else {\n            println!(\"(Empty tree)\");\n        }\n    }\n}\n\nfn make_batch(maybe_tree: Option<&Tree>, size: u64, seed: u64) -> Vec<BatchEntry> {\n    let rng: RefCell<SmallRng> = RefCell::new(SeedableRng::seed_from_u64(seed));\n    let mut batch = Vec::with_capacity(size as usize);\n\n    let get_random_key = || {\n        let tree = maybe_tree.as_ref().unwrap();\n        let entries: Vec<_> = tree.iter().collect();\n        let index = rng.borrow_mut().gen::<u64>() as usize % entries.len();\n        entries[index].0.clone()\n    };\n\n    let random_value = |size| {\n        let mut value = vec![0; size];\n        rng.borrow_mut().fill_bytes(&mut value[..]);\n        value\n    };\n\n    let insert = || (random_value(2), Op::Put(random_value(2)));\n    let update = || {\n        let key = get_random_key();\n        (key.to_vec(), Op::Put(random_value(2)))\n    };\n    let delete = || {\n        let key = get_random_key();\n        (key.to_vec(), Op::Delete)\n    };\n\n    for _ in 0..size {\n        let entry = if maybe_tree.is_some() {\n            let kind = rng.borrow_mut().gen::<u64>() % 3;\n            if kind == 0 {\n                insert()\n            } else if kind == 1 {\n                update()\n            } else {\n                delete()\n            }\n        } else {\n            insert()\n        };\n        batch.push(entry);\n    }\n    batch.sort_by(|a, b| a.0.cmp(&b.0));\n\n    // remove dupes\n    let mut maybe_prev_key: Option<Vec<u8>> = None;\n    let mut deduped_batch = Vec::with_capacity(batch.len());\n    for entry in batch {\n        if let Some(prev_key) = &maybe_prev_key {\n            if *prev_key == entry.0 {\n                continue;\n            }\n        }\n\n        maybe_prev_key = Some(entry.0.clone());\n        deduped_batch.push(entry);\n    }\n    deduped_batch\n}\n\nfn apply_to_map(map: &mut Map, batch: &Batch) {\n    for entry in batch.iter() {\n        match entry {\n            (key, Op::Put(value)) => {\n                map.insert(key.to_vec(), value.to_vec());\n            }\n            (key, Op::Delete) => {\n                map.remove(key);\n            }\n        }\n    }\n}\n\nfn assert_map(maybe_tree: Option<&Tree>, map: &Map) {\n    if map.is_empty() {\n        assert!(maybe_tree.is_none(), \"expected tree to be None\");\n        return;\n    }\n\n    let tree = maybe_tree.expect(\"expected tree to be Some\");\n\n    let map_iter = map.iter();\n    let tree_iter = tree.iter();\n    for (tree_kv, map_kv) in tree_iter.zip(map_iter) {\n        assert_eq!(tree_kv.0, *map_kv.0);\n        assert_eq!(tree_kv.1, *map_kv.1);\n    }\n\n    assert_eq!(tree.iter().count(), map.len());\n}\n"
  },
  {
    "path": "src/tree/hash.rs",
    "content": "use sha2::{Digest, Sha512_256};\nuse std::{convert::TryFrom, num::TryFromIntError};\n\n/// The hash algorithm used for both KV hashes and node hashes.\npub type Hasher = Sha512_256;\n\n/// The length of a `Hash` (in bytes).\npub const HASH_LENGTH: usize = 32;\n\n/// A zero-filled `Hash`.\npub const NULL_HASH: Hash = [0; HASH_LENGTH];\n\n/// A cryptographic hash digest.\npub type Hash = [u8; HASH_LENGTH];\n\n/// Hashes a key/value pair.\npub fn kv_hash<D: Digest>(key: &[u8], value: &[u8]) -> Result<Hash, TryFromIntError> {\n    let mut hasher = D::new();\n    hasher.update([0]);\n\n    u32::try_from(key.len())\n        .and_then(|key| u32::try_from(value.len()).map(|value| (key, value)))\n        .map(|(key_length, val_length)| {\n            hasher.update(key_length.to_le_bytes());\n            hasher.update(key);\n\n            hasher.update(val_length.to_le_bytes());\n            hasher.update(value);\n\n            let res = hasher.finalize();\n            let mut hash: Hash = Default::default();\n            hash.copy_from_slice(&res[..]);\n            hash\n        })\n}\n\n/// Hashes a node based on the hash of its left child (if any), its key/value\n/// pair, and the hash of its right child (if any).\npub fn node_hash<D: Digest>(kv: &Hash, left: &Hash, right: &Hash) -> Hash {\n    let mut hasher = D::new();\n    hasher.update([1]);\n    hasher.update(left);\n    hasher.update(kv);\n    hasher.update(right);\n\n    let res = hasher.finalize();\n    let mut hash: Hash = Default::default();\n    hash.copy_from_slice(&res[..]);\n    hash\n}\n"
  },
  {
    "path": "src/tree/iter.rs",
    "content": "use super::Tree;\n\n/// An entry stored on an `Iter`'s stack, containing a reference to a `Tree`,\n/// and its traversal state.\n///\n/// The `traversed` field represents whether or not the left child, self, and\n/// right child have been visited, respectively (`(left, self, right)`).\nstruct StackItem<'a> {\n    tree: &'a Tree,\n    traversed: (bool, bool, bool),\n}\n\nimpl<'a> StackItem<'a> {\n    /// Creates a new `StackItem` for the given tree. The `traversed` state will\n    /// be `false` since the children and self have not been visited yet, but\n    /// will default to `true` for sides that do not have a child.\n    fn new(tree: &'a Tree) -> Self {\n        StackItem {\n            tree,\n            traversed: (\n                tree.child(true).is_none(),\n                false,\n                tree.child(false).is_none(),\n            ),\n        }\n    }\n\n    /// Gets a tuple to yield from an `Iter`, `(key, value)`.\n    fn to_entry(&self) -> (Vec<u8>, Vec<u8>) {\n        (self.tree.key().to_vec(), self.tree.value().to_vec())\n    }\n}\n\n/// An iterator which yields the key/value pairs of the tree, in order, skipping\n/// any parts of the tree which are pruned (not currently retained in memory).\npub struct Iter<'a> {\n    stack: Vec<StackItem<'a>>,\n}\n\nimpl<'a> Iter<'a> {\n    /// Creates a new iterator for the given tree.\n    pub fn new(tree: &'a Tree) -> Self {\n        let stack = vec![StackItem::new(tree)];\n        Iter { stack }\n    }\n}\n\nimpl<'a> Tree {\n    /// Creates an iterator which yields `(key, value)` tuples for all of the\n    /// tree's nodes which are retained in memory (skipping pruned subtrees).\n    pub fn iter(&'a self) -> Iter<'a> {\n        Iter::new(self)\n    }\n}\n\nimpl<'a> Iterator for Iter<'a> {\n    type Item = (Vec<u8>, Vec<u8>);\n\n    /// Traverses to and yields the next key/value pair, in key order.\n    fn next(&mut self) -> Option<Self::Item> {\n        if self.stack.is_empty() {\n            return None;\n        }\n\n        let last = self.stack.last_mut().unwrap();\n        if !last.traversed.0 {\n            last.traversed.0 = true;\n            let tree = last.tree.child(true).unwrap();\n            self.stack.push(StackItem::new(tree));\n            self.next()\n        } else if !last.traversed.1 {\n            last.traversed.1 = true;\n            Some(last.to_entry())\n        } else if !last.traversed.2 {\n            last.traversed.2 = true;\n            let tree = last.tree.child(false).unwrap();\n            self.stack.push(StackItem::new(tree));\n            self.next()\n        } else {\n            self.stack.pop();\n            self.next()\n        }\n    }\n}\n"
  },
  {
    "path": "src/tree/kv.rs",
    "content": "use super::hash::{kv_hash, Hash, Hasher, HASH_LENGTH, NULL_HASH};\nuse ed::{Decode, Encode, Result};\nuse std::{\n    io::{Read, Write},\n    num::TryFromIntError,\n};\n\n// TODO: maybe use something similar to Vec but without capacity field,\n//       (should save 16 bytes per entry). also, maybe a shorter length\n//       field to save even more. also might be possible to combine key\n//       field and value field.\n\n/// Contains a key/value pair, and the hash of the key/value pair.\n#[derive(Clone, Debug, PartialEq, Eq)]\npub struct KV {\n    pub(super) key: Vec<u8>,\n    pub(super) value: Vec<u8>,\n    pub(super) hash: Hash,\n}\n\nimpl KV {\n    /// Creates a new `KV` with the given key and value and computes its hash.\n    #[inline]\n    pub fn new(key: Vec<u8>, value: Vec<u8>) -> std::result::Result<Self, TryFromIntError> {\n        kv_hash::<Hasher>(key.as_slice(), value.as_slice()).map(|hash| KV { key, value, hash })\n    }\n\n    /// Creates a new `KV` with the given key, value, and hash. The hash is not\n    /// checked to be correct for the given key/value.\n    #[inline]\n    pub fn from_fields(key: Vec<u8>, value: Vec<u8>, hash: Hash) -> Self {\n        KV { key, value, hash }\n    }\n\n    /// Replaces the `KV`'s value with the given value, updates the hash, and\n    /// returns the modified `KV`.\n    #[inline]\n    pub fn with_value(mut self, value: Vec<u8>) -> std::result::Result<Self, TryFromIntError> {\n        self.value = value;\n        self.hash = kv_hash::<Hasher>(self.key(), self.value())?;\n        Ok(self)\n    }\n\n    /// Returns the key as a slice.\n    #[inline]\n    pub fn key(&self) -> &[u8] {\n        self.key.as_slice()\n    }\n\n    /// Returns the value as a slice.\n    #[inline]\n    pub fn value(&self) -> &[u8] {\n        self.value.as_slice()\n    }\n\n    /// Returns the hash.\n    #[inline]\n    pub fn hash(&self) -> &Hash {\n        &self.hash\n    }\n\n    /// Consumes the `KV` and returns its key without allocating or cloning.\n    #[inline]\n    pub fn take_key(self) -> Vec<u8> {\n        self.key\n    }\n}\n\nimpl Encode for KV {\n    #[inline]\n    fn encode_into<W: Write>(&self, out: &mut W) -> Result<()> {\n        out.write_all(&self.hash[..])?;\n        out.write_all(self.value.as_slice())?;\n        Ok(())\n    }\n\n    #[inline]\n    fn encoding_length(&self) -> Result<usize> {\n        debug_assert!(\n            self.key().len() < 65536,\n            \"Key length must be less than 65536\"\n        );\n\n        Ok(HASH_LENGTH + self.value.len())\n    }\n}\n\nimpl Decode for KV {\n    #[inline]\n    fn decode<R: Read>(input: R) -> Result<Self> {\n        let mut kv = KV {\n            key: Vec::with_capacity(0),\n            value: Vec::with_capacity(128),\n            hash: NULL_HASH,\n        };\n        KV::decode_into(&mut kv, input)?;\n        Ok(kv)\n    }\n\n    #[inline]\n    fn decode_into<R: Read>(&mut self, mut input: R) -> Result<()> {\n        self.key.clear();\n\n        input.read_exact(&mut self.hash[..])?;\n\n        self.value.clear();\n        input.read_to_end(self.value.as_mut())?;\n\n        Ok(())\n    }\n}\n\n#[cfg(test)]\nmod test {\n    use super::*;\n\n    #[test]\n    fn new_kv() -> std::result::Result<(), TryFromIntError> {\n        let kv = KV::new(vec![1, 2, 3], vec![4, 5, 6])?;\n\n        assert_eq!(kv.key(), &[1, 2, 3]);\n        assert_eq!(kv.value(), &[4, 5, 6]);\n        assert_ne!(kv.hash(), &super::super::hash::NULL_HASH);\n        Ok(())\n    }\n\n    #[test]\n    fn with_value() -> std::result::Result<(), TryFromIntError> {\n        let kv = KV::new(vec![1, 2, 3], vec![4, 5, 6])?.with_value(vec![7, 8, 9])?;\n\n        assert_eq!(kv.key(), &[1, 2, 3]);\n        assert_eq!(kv.value(), &[7, 8, 9]);\n        assert_ne!(kv.hash(), &super::super::hash::NULL_HASH);\n        Ok(())\n    }\n}\n"
  },
  {
    "path": "src/tree/link.rs",
    "content": "use std::cmp::max;\nuse std::io::{Read, Write};\n\nuse ed::{Decode, Encode, Result, Terminated};\n\nuse super::hash::Hash;\nuse super::Tree;\n\n// TODO: optimize memory footprint\n\n/// Represents a reference to a child tree node. Links may or may not contain\n/// the child's `Tree` instance (storing its key if not).\n#[derive(Debug, Clone, PartialEq, Eq)]\npub enum Link {\n    /// Represents a child tree node which has been pruned from memory, only\n    /// retaining a reference to it (its key). The child node can always be\n    /// fetched from the backing store by this key when necessary.\n    Reference {\n        hash: Hash,\n        child_heights: (u8, u8),\n        key: Vec<u8>,\n    },\n\n    /// Represents a tree node which has been modified since the `Tree`'s last\n    /// hash computation. The child's hash is not stored since it has not yet\n    /// been recomputed. The child's `Tree` instance is stored in the link.\n    Modified {\n        pending_writes: usize, // TODO: rename to `pending_hashes`\n        child_heights: (u8, u8),\n        tree: Tree,\n    },\n\n    // Represents a tree node which has been modified since the `Tree`'s last\n    // commit, but which has an up-to-date hash. The child's `Tree` instance is\n    // stored in the link.\n    Uncommitted {\n        hash: Hash,\n        child_heights: (u8, u8),\n        tree: Tree,\n    },\n\n    /// Represents a tree node which has not been modified, has an up-to-date\n    /// hash, and which is being retained in memory.\n    Loaded {\n        hash: Hash,\n        child_heights: (u8, u8),\n        tree: Tree,\n    },\n}\n\nimpl Link {\n    /// Creates a `Link::Modified` from the given `Tree`.\n    #[inline]\n    pub fn from_modified_tree(tree: Tree) -> Self {\n        let pending_writes = 1 + tree.child_pending_writes(true) + tree.child_pending_writes(false);\n\n        Link::Modified {\n            pending_writes,\n            child_heights: tree.child_heights(),\n            tree,\n        }\n    }\n\n    /// Creates a `Link::Modified` from the given tree, if any. If `None`,\n    /// returns `None`.\n    pub fn maybe_from_modified_tree(maybe_tree: Option<Tree>) -> Option<Self> {\n        maybe_tree.map(Link::from_modified_tree)\n    }\n\n    /// Returns `true` if the link is of the `Link::Reference` variant.\n    #[inline]\n    pub fn is_reference(&self) -> bool {\n        matches!(self, Link::Reference { .. })\n    }\n\n    /// Returns `true` if the link is of the `Link::Modified` variant.\n    #[inline]\n    pub fn is_modified(&self) -> bool {\n        matches!(self, Link::Modified { .. })\n    }\n\n    /// Returns `true` if the link is of the `Link::Uncommitted` variant.\n    #[inline]\n    pub fn is_uncommitted(&self) -> bool {\n        matches!(self, Link::Uncommitted { .. })\n    }\n\n    /// Returns `true` if the link is of the `Link::Loaded` variant.\n    #[inline]\n    pub fn is_stored(&self) -> bool {\n        matches!(self, Link::Loaded { .. })\n    }\n\n    /// Returns the key of the tree referenced by this link, as a slice.\n    #[inline]\n    pub fn key(&self) -> &[u8] {\n        match self {\n            Link::Reference { key, .. } => key.as_slice(),\n            Link::Modified { tree, .. } => tree.key(),\n            Link::Uncommitted { tree, .. } => tree.key(),\n            Link::Loaded { tree, .. } => tree.key(),\n        }\n    }\n\n    /// Returns the `Tree` instance of the tree referenced by the link. If the\n    /// link is of variant `Link::Reference`, the returned value will be `None`.\n    #[inline]\n    pub fn tree(&self) -> Option<&Tree> {\n        match self {\n            // TODO: panic for Reference, don't return Option?\n            Link::Reference { .. } => None,\n            Link::Modified { tree, .. } => Some(tree),\n            Link::Uncommitted { tree, .. } => Some(tree),\n            Link::Loaded { tree, .. } => Some(tree),\n        }\n    }\n\n    /// Returns the hash of the tree referenced by the link. Panics if link is\n    /// of variant `Link::Modified` since we have not yet recomputed the tree's\n    /// hash.\n    #[inline]\n    pub fn hash(&self) -> &Hash {\n        match self {\n            Link::Modified { .. } => panic!(\"Cannot get hash from modified link\"),\n            Link::Reference { hash, .. } => hash,\n            Link::Uncommitted { hash, .. } => hash,\n            Link::Loaded { hash, .. } => hash,\n        }\n    }\n\n    /// Returns the height of the children of the tree referenced by the link,\n    /// if any (note: not the height of the referenced tree itself). Return\n    /// value is `(left_child_height, right_child_height)`.\n    #[inline]\n    pub fn height(&self) -> u8 {\n        let (left_height, right_height) = match self {\n            Link::Reference { child_heights, .. } => *child_heights,\n            Link::Modified { child_heights, .. } => *child_heights,\n            Link::Uncommitted { child_heights, .. } => *child_heights,\n            Link::Loaded { child_heights, .. } => *child_heights,\n        };\n        1 + max(left_height, right_height)\n    }\n\n    /// Returns the balance factor of the tree referenced by the link.\n    #[inline]\n    pub fn balance_factor(&self) -> i8 {\n        let (left_height, right_height) = match self {\n            Link::Reference { child_heights, .. } => *child_heights,\n            Link::Modified { child_heights, .. } => *child_heights,\n            Link::Uncommitted { child_heights, .. } => *child_heights,\n            Link::Loaded { child_heights, .. } => *child_heights,\n        };\n        right_height as i8 - left_height as i8\n    }\n\n    /// Consumes the link and converts to variant `Link::Reference`. Panics if\n    /// the link is of variant `Link::Modified` or `Link::Uncommitted`.\n    #[inline]\n    pub fn into_reference(self) -> Self {\n        match self {\n            Link::Reference { .. } => self,\n            Link::Modified { .. } => panic!(\"Cannot prune Modified tree\"),\n            Link::Uncommitted { .. } => panic!(\"Cannot prune Uncommitted tree\"),\n            Link::Loaded {\n                hash,\n                child_heights,\n                tree,\n            } => Link::Reference {\n                hash,\n                child_heights,\n                key: tree.take_key(),\n            },\n        }\n    }\n\n    #[inline]\n    #[cfg(feature = \"full\")]\n    pub(crate) fn child_heights_mut(&mut self) -> &mut (u8, u8) {\n        match self {\n            Link::Reference {\n                ref mut child_heights,\n                ..\n            } => child_heights,\n            Link::Modified {\n                ref mut child_heights,\n                ..\n            } => child_heights,\n            Link::Uncommitted {\n                ref mut child_heights,\n                ..\n            } => child_heights,\n            Link::Loaded {\n                ref mut child_heights,\n                ..\n            } => child_heights,\n        }\n    }\n}\n\nimpl Encode for Link {\n    #[inline]\n    fn encode_into<W: Write>(&self, out: &mut W) -> Result<()> {\n        let (hash, key, (left_height, right_height)) = match self {\n            Link::Reference {\n                hash,\n                key,\n                child_heights,\n            } => (hash, key.as_slice(), child_heights),\n            Link::Loaded {\n                hash,\n                tree,\n                child_heights,\n            } => (hash, tree.key(), child_heights),\n            Link::Uncommitted {\n                hash,\n                tree,\n                child_heights,\n            } => (hash, tree.key(), child_heights),\n\n            Link::Modified { .. } => panic!(\"No encoding for Link::Modified\"),\n        };\n\n        debug_assert!(\n            self.key().len() < 65536,\n            \"Key length must be less than 65536\"\n        );\n\n        out.write_all(&(key.len() as u16).to_be_bytes())?;\n        out.write_all(key)?;\n\n        out.write_all(hash)?;\n\n        out.write_all(&[*left_height, *right_height])?;\n\n        Ok(())\n    }\n\n    #[inline]\n    fn encoding_length(&self) -> Result<usize> {\n        debug_assert!(\n            self.key().len() < 65536,\n            \"Key length must be less than 65536\"\n        );\n\n        Ok(match self {\n            Link::Reference { key, .. } => 1 + key.len() + 32 + 2,\n            Link::Modified { .. } => panic!(\"No encoding for Link::Modified\"),\n            Link::Uncommitted { tree, .. } => 1 + tree.key().len() + 32 + 2,\n            Link::Loaded { tree, .. } => 1 + tree.key().len() + 32 + 2,\n        })\n    }\n}\n\nimpl Link {\n    #[inline]\n    fn default_reference() -> Self {\n        Link::Reference {\n            key: Vec::with_capacity(64),\n            hash: Default::default(),\n            child_heights: (0, 0),\n        }\n    }\n\n    pub(crate) fn decode_v0<R: Read>(mut input: R) -> Result<Self> {\n        let length = read_u8(&mut input)? as usize;\n\n        let mut key = vec![0; length];\n        input.read_exact(&mut key)?;\n\n        let mut hash = [0; 32];\n        input.read_exact(&mut hash)?;\n\n        let left_height = read_u8(&mut input)?;\n        let right_height = read_u8(input)?;\n\n        Ok(Link::Reference {\n            key,\n            hash,\n            child_heights: (left_height, right_height),\n        })\n    }\n}\n\nimpl Decode for Link {\n    #[inline]\n    fn decode<R: Read>(input: R) -> Result<Link> {\n        let mut link = Link::default_reference();\n        Link::decode_into(&mut link, input)?;\n        Ok(link)\n    }\n\n    #[inline]\n    fn decode_into<R: Read>(&mut self, mut input: R) -> Result<()> {\n        if !self.is_reference() {\n            // don't create new struct if self is already Link::Reference,\n            // so we can re-use the key vec\n            *self = Link::default_reference();\n        }\n\n        if let Link::Reference {\n            ref mut key,\n            ref mut hash,\n            ref mut child_heights,\n        } = self\n        {\n            let length = read_u16(&mut input)? as usize;\n\n            key.resize(length, 0);\n            input.read_exact(key.as_mut())?;\n\n            input.read_exact(&mut hash[..])?;\n\n            child_heights.0 = read_u8(&mut input)?;\n            child_heights.1 = read_u8(&mut input)?;\n        } else {\n            unreachable!()\n        }\n\n        Ok(())\n    }\n}\n\nimpl Terminated for Link {}\n\n#[inline]\nfn read_u16<R: Read>(mut input: R) -> Result<u16> {\n    let mut length = [0, 0];\n    input.read_exact(length.as_mut())?;\n    Ok(u16::from_be_bytes(length))\n}\n\n#[inline]\nfn read_u8<R: Read>(mut input: R) -> Result<u8> {\n    let mut length = [0];\n    input.read_exact(length.as_mut())?;\n    Ok(length[0])\n}\n\n#[cfg(test)]\nmod test {\n    use super::super::hash::NULL_HASH;\n    use super::super::Tree;\n    use super::*;\n\n    #[test]\n    fn from_modified_tree() -> std::result::Result<(), &'static str> {\n        let tree = Tree::new(vec![0], vec![1]).map_err(|_| \"tree construction failed\")?;\n        let link = Link::from_modified_tree(tree);\n        assert!(link.is_modified());\n        assert_eq!(link.height(), 1);\n        assert_eq!(link.tree().expect(\"expected tree\").key(), &[0]);\n        if let Link::Modified { pending_writes, .. } = link {\n            assert_eq!(pending_writes, 1);\n            Ok(())\n        } else {\n            Err(\"Expected Link::Modified\")\n        }\n    }\n\n    #[test]\n    fn maybe_from_modified_tree() -> std::result::Result<(), crate::error::Error> {\n        let link = Link::maybe_from_modified_tree(None);\n        assert!(link.is_none());\n\n        let tree = Tree::new(vec![0], vec![1])?;\n        let link = Link::maybe_from_modified_tree(Some(tree));\n        assert!(link.expect(\"expected link\").is_modified());\n        Ok(())\n    }\n\n    #[test]\n    fn types() -> std::result::Result<(), crate::error::Error> {\n        let hash = NULL_HASH;\n        let child_heights = (0, 0);\n        let pending_writes = 1;\n        let key = vec![0];\n        let tree = || Tree::new(vec![0], vec![1]);\n\n        let reference = Link::Reference {\n            hash,\n            child_heights,\n            key,\n        };\n        let modified = Link::Modified {\n            pending_writes,\n            child_heights,\n            tree: tree()?,\n        };\n        let uncommitted = Link::Uncommitted {\n            hash,\n            child_heights,\n            tree: tree()?,\n        };\n        let loaded = Link::Loaded {\n            hash,\n            child_heights,\n            tree: tree()?,\n        };\n\n        assert!(reference.is_reference());\n        assert!(!reference.is_modified());\n        assert!(!reference.is_uncommitted());\n        assert!(!reference.is_stored());\n        assert!(reference.tree().is_none());\n        assert_eq!(reference.hash(), &[0; 32]);\n        assert_eq!(reference.height(), 1);\n        assert!(reference.into_reference().is_reference());\n\n        assert!(!modified.is_reference());\n        assert!(modified.is_modified());\n        assert!(!modified.is_uncommitted());\n        assert!(!modified.is_stored());\n        assert!(modified.tree().is_some());\n        assert_eq!(modified.height(), 1);\n\n        assert!(!uncommitted.is_reference());\n        assert!(!uncommitted.is_modified());\n        assert!(uncommitted.is_uncommitted());\n        assert!(!uncommitted.is_stored());\n        assert!(uncommitted.tree().is_some());\n        assert_eq!(uncommitted.hash(), &[0; 32]);\n        assert_eq!(uncommitted.height(), 1);\n\n        assert!(!loaded.is_reference());\n        assert!(!loaded.is_modified());\n        assert!(!loaded.is_uncommitted());\n        assert!(loaded.is_stored());\n        assert!(loaded.tree().is_some());\n        assert_eq!(loaded.hash(), &[0; 32]);\n        assert_eq!(loaded.height(), 1);\n        assert!(loaded.into_reference().is_reference());\n        Ok(())\n    }\n\n    #[test]\n    #[should_panic(expected = \"Cannot get hash from modified link\")]\n    fn modified_hash() {\n        Tree::new(vec![0], vec![1])\n            .map(|tree| Link::Modified {\n                pending_writes: 1,\n                child_heights: (1, 1),\n                tree,\n            })\n            .map(|link| link.hash().to_vec())\n            .map(|_| ())\n            .unwrap_or_default()\n    }\n\n    #[test]\n    #[should_panic]\n    fn modified_into_reference() {\n        Link::Modified {\n            pending_writes: 1,\n            child_heights: (1, 1),\n            tree: Tree::new(vec![0], vec![1]).expect(\"tree construction failed\"),\n        }\n        .into_reference();\n    }\n\n    #[test]\n    #[should_panic]\n    fn uncommitted_into_reference() {\n        Link::Uncommitted {\n            hash: [1; 32],\n            child_heights: (1, 1),\n            tree: Tree::new(vec![0], vec![1]).expect(\"tree construction failed\"),\n        }\n        .into_reference();\n    }\n\n    #[test]\n    fn encode_link() {\n        let link = Link::Reference {\n            key: vec![1, 2, 3],\n            child_heights: (123, 124),\n            hash: [55; 32],\n        };\n        assert_eq!(link.encoding_length().unwrap(), 38);\n\n        let mut bytes = vec![];\n        link.encode_into(&mut bytes).unwrap();\n        assert_eq!(\n            bytes,\n            vec![\n                0, 3, 1, 2, 3, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55,\n                55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 123, 124\n            ]\n        );\n    }\n\n    #[test]\n    fn encode_link_long_key_valid() {\n        let link = Link::Reference {\n            key: vec![123; 60_000],\n            child_heights: (123, 124),\n            hash: [55; 32],\n        };\n        let mut bytes = vec![];\n        link.encode_into(&mut bytes).unwrap();\n\n        let decoded = Link::decode(&bytes[..]).unwrap();\n        assert_eq!(decoded, link);\n    }\n\n    #[test]\n    #[should_panic = \"Key length must be less than 65536\"]\n    fn encode_link_long_key_invalid() {\n        let link = Link::Reference {\n            key: vec![123; 70_000],\n            child_heights: (123, 124),\n            hash: [55; 32],\n        };\n        let mut bytes = vec![];\n        link.encode_into(&mut bytes).unwrap();\n    }\n}\n"
  },
  {
    "path": "src/tree/mod.rs",
    "content": "mod commit;\n#[cfg(feature = \"full\")]\nmod debug;\nmod encoding;\nmod fuzz_tests;\nmod hash;\nmod iter;\nmod kv;\nmod link;\nmod ops;\nmod walk;\n\nuse std::cmp::max;\n\nuse ed::{Decode, Encode};\n\nuse super::error::Result;\npub use commit::{Commit, NoopCommit};\npub use hash::{kv_hash, node_hash, Hash, Hasher, HASH_LENGTH, NULL_HASH};\nuse kv::KV;\npub use link::Link;\npub use ops::{Batch, BatchEntry, Op, PanicSource};\npub use walk::{Fetch, RefWalker, Walker};\n\n// TODO: remove need for `TreeInner`, and just use `Box<Self>` receiver for\n// relevant methods\n\n/// The fields of the `Tree` type, stored on the heap.\n#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)]\npub struct TreeInner {\n    left: Option<Link>,\n    right: Option<Link>,\n    kv: KV,\n}\n\n/// A binary AVL tree data structure, with Merkle hashes.\n///\n/// Trees' inner fields are stored on the heap so that nodes can recursively\n/// link to each other, and so we can detach nodes from their parents, then\n/// reattach without allocating or freeing heap memory.\n#[derive(Clone, PartialEq, Eq, Encode, Decode)]\npub struct Tree {\n    inner: Box<TreeInner>,\n}\n\nimpl Tree {\n    /// Creates a new `Tree` with the given key and value, and no children.\n    ///\n    /// Hashes the key/value pair and initializes the `kv_hash` field.\n    pub fn new(key: Vec<u8>, value: Vec<u8>) -> Result<Self> {\n        KV::new(key, value).map_err(Into::into).map(|kv| Tree {\n            inner: Box::new(TreeInner {\n                kv,\n                left: None,\n                right: None,\n            }),\n        })\n    }\n\n    /// Creates a `Tree` by supplying all the raw struct fields (mainly useful\n    /// for testing). The `kv_hash` and `Link`s are not ensured to be correct.\n    pub fn from_fields(\n        key: Vec<u8>,\n        value: Vec<u8>,\n        kv_hash: Hash,\n        left: Option<Link>,\n        right: Option<Link>,\n    ) -> Tree {\n        Tree {\n            inner: Box::new(TreeInner {\n                kv: KV::from_fields(key, value, kv_hash),\n                left,\n                right,\n            }),\n        }\n    }\n\n    /// Returns the root node's key as a slice.\n    #[inline]\n    pub fn key(&self) -> &[u8] {\n        self.inner.kv.key()\n    }\n\n    /// Consumes the tree and returns its root node's key, without having to\n    /// clone or allocate.\n    #[inline]\n    pub fn take_key(self) -> Vec<u8> {\n        self.inner.kv.take_key()\n    }\n\n    /// Returns the root node's value as a slice.\n    #[inline]\n    pub fn value(&self) -> &[u8] {\n        self.inner.kv.value()\n    }\n\n    /// Returns the hash of the root node's key/value pair.\n    #[inline]\n    pub fn kv_hash(&self) -> &Hash {\n        self.inner.kv.hash()\n    }\n\n    /// Returns a reference to the root node's `Link` on the given side, if any.\n    /// If there is no child, returns `None`.\n    #[inline]\n    pub fn link(&self, left: bool) -> Option<&Link> {\n        if left {\n            self.inner.left.as_ref()\n        } else {\n            self.inner.right.as_ref()\n        }\n    }\n\n    /// Returns a mutable reference to the root node's `Link` on the given side,\n    /// if any. If there is no child, returns `None`.\n    #[inline]\n    pub fn link_mut(&mut self, left: bool) -> Option<&mut Link> {\n        if left {\n            self.inner.left.as_mut()\n        } else {\n            self.inner.right.as_mut()\n        }\n    }\n\n    /// Returns a reference to the root node's child on the given side, if any.\n    /// If there is no child, returns `None`.\n    #[inline]\n    pub fn child(&self, left: bool) -> Option<&Self> {\n        match self.link(left) {\n            None => None,\n            Some(link) => link.tree(),\n        }\n    }\n\n    /// Returns a mutable reference to the root node's child on the given side,\n    /// if any. If there is no child, returns `None`.\n    #[inline]\n    pub fn child_mut(&mut self, left: bool) -> Option<&mut Self> {\n        match self.slot_mut(left).as_mut() {\n            None => None,\n            Some(Link::Reference { .. }) => None,\n            Some(Link::Modified { tree, .. }) => Some(tree),\n            Some(Link::Uncommitted { tree, .. }) => Some(tree),\n            Some(Link::Loaded { tree, .. }) => Some(tree),\n        }\n    }\n\n    /// Returns the hash of the root node's child on the given side, if any. If\n    /// there is no child, returns the null hash (zero-filled).\n    #[inline]\n    pub fn child_hash(&self, left: bool) -> &Hash {\n        self.link(left).map_or(&NULL_HASH, |link| link.hash())\n    }\n\n    /// Computes and returns the hash of the root node.\n    #[inline]\n    pub fn hash(&self) -> Hash {\n        node_hash::<Hasher>(\n            self.inner.kv.hash(),\n            self.child_hash(true),\n            self.child_hash(false),\n        )\n    }\n\n    /// Returns the number of pending writes for the child on the given side, if\n    /// any. If there is no child, returns 0.\n    #[inline]\n    pub fn child_pending_writes(&self, left: bool) -> usize {\n        match self.link(left) {\n            Some(Link::Modified { pending_writes, .. }) => *pending_writes,\n            _ => 0,\n        }\n    }\n\n    /// Returns the height of the child on the given side, if any. If there is\n    /// no child, returns 0.\n    #[inline]\n    pub fn child_height(&self, left: bool) -> u8 {\n        self.link(left).map_or(0, |child| child.height())\n    }\n\n    #[inline]\n    pub fn child_heights(&self) -> (u8, u8) {\n        (self.child_height(true), self.child_height(false))\n    }\n\n    /// Returns the height of the tree (the number of levels). For example, a\n    /// single node has height 1, a node with a single descendant has height 2,\n    /// etc.\n    #[inline]\n    pub fn height(&self) -> u8 {\n        1 + max(self.child_height(true), self.child_height(false))\n    }\n\n    /// Returns the balance factor of the root node. This is the difference\n    /// between the height of the right child (if any) and the height of the\n    /// left child (if any). For example, a balance factor of 2 means the right\n    /// subtree is 2 levels taller than the left subtree.\n    #[inline]\n    pub fn balance_factor(&self) -> i8 {\n        let left_height = self.child_height(true) as i8;\n        let right_height = self.child_height(false) as i8;\n        right_height - left_height\n    }\n\n    /// Attaches the child (if any) to the root node on the given side. Creates\n    /// a `Link` of variant `Link::Modified` which contains the child.\n    ///\n    /// Panics if there is already a child on the given side.\n    #[inline]\n    pub fn attach(mut self, left: bool, maybe_child: Option<Self>) -> Self {\n        debug_assert_ne!(\n            Some(self.key()),\n            maybe_child.as_ref().map(|c| c.key()),\n            \"Tried to attach tree with same key\"\n        );\n\n        let slot = self.slot_mut(left);\n\n        assert!(\n            !slot.is_some(),\n            \"Tried to attach to {} tree slot, but it is already Some\",\n            side_to_str(left)\n        );\n\n        *slot = Link::maybe_from_modified_tree(maybe_child);\n\n        self\n    }\n\n    /// Detaches the child on the given side (if any) from the root node, and\n    /// returns `(root_node, maybe_child)`.\n    ///\n    /// One will usually want to reattach (see `attach`) a child on the same\n    /// side after applying some operation to the detached child.\n    #[inline]\n    pub fn detach(mut self, left: bool) -> (Self, Option<Self>) {\n        let maybe_child = match self.slot_mut(left).take() {\n            None => None,\n            Some(Link::Reference { .. }) => None,\n            Some(Link::Modified { tree, .. }) => Some(tree),\n            Some(Link::Uncommitted { tree, .. }) => Some(tree),\n            Some(Link::Loaded { tree, .. }) => Some(tree),\n        };\n\n        (self, maybe_child)\n    }\n\n    /// Detaches the child on the given side from the root node, and\n    /// returns `(root_node, child)`.\n    ///\n    /// Panics if there is no child on the given side.\n    ///\n    /// One will usually want to reattach (see `attach`) a child on the same\n    /// side after applying some operation to the detached child.\n    #[inline]\n    pub fn detach_expect(self, left: bool) -> (Self, Self) {\n        let (parent, maybe_child) = self.detach(left);\n\n        if let Some(child) = maybe_child {\n            (parent, child)\n        } else {\n            panic!(\n                \"Expected tree to have {} child, but got None\",\n                side_to_str(left)\n            );\n        }\n    }\n\n    /// Detaches the child on the given side and passes it into `f`, which must\n    /// return a new child (either the same child, a new child to take its\n    /// place, or `None` to explicitly keep the slot empty).\n    ///\n    /// This is the same as `detach`, but with the function interface to enforce\n    /// at compile-time that an explicit final child value is returned. This is\n    /// less error prone that detaching with `detach` and reattaching with\n    /// `attach`.\n    #[inline]\n    pub fn walk<F>(self, left: bool, f: F) -> Self\n    where\n        F: FnOnce(Option<Self>) -> Option<Self>,\n    {\n        let (tree, maybe_child) = self.detach(left);\n        tree.attach(left, f(maybe_child))\n    }\n\n    /// Like `walk`, but panics if there is no child on the given side.\n    #[inline]\n    pub fn walk_expect<F>(self, left: bool, f: F) -> Self\n    where\n        F: FnOnce(Self) -> Option<Self>,\n    {\n        let (tree, child) = self.detach_expect(left);\n        tree.attach(left, f(child))\n    }\n\n    /// Returns a mutable reference to the child slot for the given side.\n    #[inline]\n    pub(crate) fn slot_mut(&mut self, left: bool) -> &mut Option<Link> {\n        if left {\n            &mut self.inner.left\n        } else {\n            &mut self.inner.right\n        }\n    }\n\n    /// Replaces the root node's value with the given value and returns the\n    /// modified `Tree`.\n    #[inline]\n    pub fn with_value(mut self, value: Vec<u8>) -> Result<Self> {\n        self.inner.kv = self.inner.kv.with_value(value)?;\n        Ok(self)\n    }\n\n    // TODO: add compute_hashes method\n\n    /// Called to finalize modifications to a tree, recompute its hashes, and\n    /// write the updated nodes to a backing store.\n    ///\n    /// Traverses through the tree, computing hashes for all modified links and\n    /// replacing them with `Link::Loaded` variants, writes out all changes to\n    /// the given `Commit` object's `write` method, and calls the its `prune`\n    /// method to test whether or not to keep or prune nodes from memory.\n    #[inline]\n    pub fn commit<C: Commit>(&mut self, c: &mut C) -> Result<()> {\n        // TODO: make this method less ugly\n        // TODO: call write in-order for better performance in writing batch to db?\n\n        if let Some(Link::Modified { .. }) = self.inner.left {\n            if let Some(Link::Modified {\n                mut tree,\n                child_heights,\n                ..\n            }) = self.inner.left.take()\n            {\n                tree.commit(c)?;\n                self.inner.left = Some(Link::Loaded {\n                    hash: tree.hash(),\n                    tree,\n                    child_heights,\n                });\n            } else {\n                unreachable!()\n            }\n        }\n\n        if let Some(Link::Modified { .. }) = self.inner.right {\n            if let Some(Link::Modified {\n                mut tree,\n                child_heights,\n                ..\n            }) = self.inner.right.take()\n            {\n                tree.commit(c)?;\n                self.inner.right = Some(Link::Loaded {\n                    hash: tree.hash(),\n                    tree,\n                    child_heights,\n                });\n            } else {\n                unreachable!()\n            }\n        }\n\n        c.write(self)?;\n\n        let (prune_left, prune_right) = c.prune(self);\n        if prune_left {\n            self.inner.left = self.inner.left.take().map(|link| link.into_reference());\n        }\n        if prune_right {\n            self.inner.right = self.inner.right.take().map(|link| link.into_reference());\n        }\n\n        Ok(())\n    }\n\n    /// Fetches the child on the given side using the given data source, and\n    /// places it in the child slot (upgrading the link from `Link::Reference`\n    /// to `Link::Loaded`).\n    #[inline]\n    pub fn load<S: Fetch>(&mut self, left: bool, source: &S) -> Result<()> {\n        // TODO: return Err instead of panic?\n        let link = self.link(left).expect(\"Expected link\");\n        let (child_heights, hash) = match link {\n            Link::Reference {\n                child_heights,\n                hash,\n                ..\n            } => (child_heights, hash),\n            _ => panic!(\"Expected Some(Link::Reference)\"),\n        };\n\n        let tree = source.fetch(link)?;\n        debug_assert_eq!(tree.key(), link.key());\n        *self.slot_mut(left) = Some(Link::Loaded {\n            tree,\n            hash: *hash,\n            child_heights: *child_heights,\n        });\n\n        Ok(())\n    }\n\n    pub fn get_value(&self, key: &[u8]) -> Result<GetResult> {\n        let mut cursor = self;\n\n        loop {\n            if key == cursor.key() {\n                return Ok(GetResult::Found(cursor.value().to_vec()));\n            }\n\n            let left = key < cursor.key();\n            let link = match cursor.link(left) {\n                None => return Ok(GetResult::NotFound), // not found\n                Some(link) => link,\n            };\n\n            let maybe_child = link.tree();\n            match maybe_child {\n                None => return Ok(GetResult::Pruned), /* value is pruned, caller will have to */\n                // fetch from disk\n                Some(child) => cursor = child, // traverse to child\n            }\n        }\n    }\n}\n\npub enum GetResult {\n    Found(Vec<u8>),\n    Pruned,\n    NotFound,\n}\n\npub fn side_to_str(left: bool) -> &'static str {\n    if left {\n        \"left\"\n    } else {\n        \"right\"\n    }\n}\n\n#[cfg(test)]\nmod test {\n    use super::commit::NoopCommit;\n    use super::hash::NULL_HASH;\n    use super::Tree;\n    use crate::error::Result;\n\n    #[test]\n    fn build_tree() -> Result<()> {\n        let tree = Tree::new(vec![1], vec![101])?;\n        assert_eq!(tree.key(), &[1]);\n        assert_eq!(tree.value(), &[101]);\n        assert!(tree.child(true).is_none());\n        assert!(tree.child(false).is_none());\n\n        let tree = tree.attach(true, None);\n        assert!(tree.child(true).is_none());\n        assert!(tree.child(false).is_none());\n\n        let tree = tree.attach(true, Some(Tree::new(vec![2], vec![102])?));\n        assert_eq!(tree.key(), &[1]);\n        assert_eq!(tree.child(true).unwrap().key(), &[2]);\n        assert!(tree.child(false).is_none());\n\n        let tree = Tree::new(vec![3], vec![103])?.attach(false, Some(tree));\n        assert_eq!(tree.key(), &[3]);\n        assert_eq!(tree.child(false).unwrap().key(), &[1]);\n        assert!(tree.child(true).is_none());\n        Ok(())\n    }\n\n    #[should_panic]\n    #[test]\n    fn attach_existing() {\n        Tree::new(vec![0], vec![1])\n            .expect(\"tree construction failed\")\n            .attach(\n                true,\n                Some(Tree::new(vec![2], vec![3]).expect(\"tree construction failed\")),\n            )\n            .attach(\n                true,\n                Some(Tree::new(vec![4], vec![5]).expect(\"tree construction failed\")),\n            );\n    }\n\n    #[test]\n    fn modify() -> Result<()> {\n        let tree = Tree::new(vec![0], vec![1])?\n            .attach(true, Some(Tree::new(vec![2], vec![3])?))\n            .attach(false, Some(Tree::new(vec![4], vec![5])?));\n\n        let tree = tree.walk(true, |left_opt| {\n            assert_eq!(left_opt.as_ref().unwrap().key(), &[2]);\n            None\n        });\n        assert!(tree.child(true).is_none());\n        assert!(tree.child(false).is_some());\n        let fixed_tree = Some(Tree::new(vec![2], vec![3])?);\n        let tree = tree.walk(true, |left_opt| {\n            assert!(left_opt.is_none());\n            fixed_tree\n        });\n        assert_eq!(tree.link(true).unwrap().key(), &[2]);\n\n        let tree = tree.walk_expect(false, |right| {\n            assert_eq!(right.key(), &[4]);\n            None\n        });\n        assert!(tree.child(true).is_some());\n        assert!(tree.child(false).is_none());\n        Ok(())\n    }\n\n    #[test]\n    fn child_and_link() -> Result<()> {\n        let mut tree =\n            Tree::new(vec![0], vec![1])?.attach(true, Some(Tree::new(vec![2], vec![3])?));\n        assert!(tree.link(true).expect(\"expected link\").is_modified());\n        assert!(tree.child(true).is_some());\n        assert!(tree.link(false).is_none());\n        assert!(tree.child(false).is_none());\n\n        tree.commit(&mut NoopCommit {}).expect(\"commit failed\");\n        assert!(tree.link(true).expect(\"expected link\").is_stored());\n        assert!(tree.child(true).is_some());\n\n        let tree = tree.walk(true, |_| None);\n        assert!(tree.link(true).is_none());\n        assert!(tree.child(true).is_none());\n        Ok(())\n    }\n\n    #[test]\n    fn child_hash() -> Result<()> {\n        let mut tree =\n            Tree::new(vec![0], vec![1])?.attach(true, Some(Tree::new(vec![2], vec![3])?));\n        tree.commit(&mut NoopCommit {}).expect(\"commit failed\");\n        assert_eq!(\n            tree.child_hash(true),\n            &[\n                130, 215, 14, 92, 6, 226, 203, 67, 180, 206, 170, 81, 142, 77, 62, 33, 146, 78, 29,\n                252, 100, 149, 207, 172, 89, 254, 96, 166, 159, 49, 169, 106\n            ]\n        );\n        assert_eq!(tree.child_hash(false), &NULL_HASH);\n        Ok(())\n    }\n\n    #[test]\n    fn hash() -> Result<()> {\n        let tree = Tree::new(vec![0], vec![1])?;\n        assert_eq!(\n            tree.hash(),\n            [\n                115, 223, 9, 212, 135, 4, 223, 163, 244, 126, 25, 190, 255, 217, 132, 76, 219, 149,\n                151, 237, 164, 103, 67, 44, 196, 177, 227, 195, 217, 146, 156, 86\n            ]\n        );\n        Ok(())\n    }\n\n    #[test]\n    fn child_pending_writes() -> Result<()> {\n        let tree = Tree::new(vec![0], vec![1])?;\n        assert_eq!(tree.child_pending_writes(true), 0);\n        assert_eq!(tree.child_pending_writes(false), 0);\n\n        let tree = tree.attach(true, Some(Tree::new(vec![2], vec![3])?));\n        assert_eq!(tree.child_pending_writes(true), 1);\n        assert_eq!(tree.child_pending_writes(false), 0);\n        Ok(())\n    }\n\n    #[test]\n    fn height_and_balance() -> Result<()> {\n        let tree = Tree::new(vec![0], vec![1])?;\n        assert_eq!(tree.height(), 1);\n        assert_eq!(tree.child_height(true), 0);\n        assert_eq!(tree.child_height(false), 0);\n        assert_eq!(tree.balance_factor(), 0);\n\n        let tree = tree.attach(true, Some(Tree::new(vec![2], vec![3])?));\n        assert_eq!(tree.height(), 2);\n        assert_eq!(tree.child_height(true), 1);\n        assert_eq!(tree.child_height(false), 0);\n        assert_eq!(tree.balance_factor(), -1);\n\n        let (tree, maybe_child) = tree.detach(true);\n        let tree = tree.attach(false, maybe_child);\n        assert_eq!(tree.height(), 2);\n        assert_eq!(tree.child_height(true), 0);\n        assert_eq!(tree.child_height(false), 1);\n        assert_eq!(tree.balance_factor(), 1);\n        Ok(())\n    }\n\n    #[test]\n    fn commit() -> Result<()> {\n        let mut tree =\n            Tree::new(vec![0], vec![1])?.attach(false, Some(Tree::new(vec![2], vec![3])?));\n        tree.commit(&mut NoopCommit {}).expect(\"commit failed\");\n\n        assert!(tree.link(false).expect(\"expected link\").is_stored());\n        Ok(())\n    }\n}\n"
  },
  {
    "path": "src/tree/ops.rs",
    "content": "use super::{Fetch, Tree, Walker};\nuse crate::error::Result;\nuse std::collections::LinkedList;\nuse std::fmt;\nuse Op::*;\n\n/// An operation to be applied to a key in the store.\npub enum Op {\n    /// Inserts or updates the key/value entry to the given value.\n    Put(Vec<u8>),\n    /// Deletes the key/value entry.\n    Delete,\n}\n\nimpl fmt::Debug for Op {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        writeln!(\n            f,\n            \"{}\",\n            match self {\n                Put(value) => format!(\"Put({value:?})\"),\n                Delete => \"Delete\".to_string(),\n            }\n        )\n    }\n}\n\n/// A single `(key, operation)` pair.\npub type BatchEntry = (Vec<u8>, Op);\n\n/// A mapping of keys and operations. Keys should be sorted and unique.\npub type Batch = [BatchEntry];\n\n/// A source of data which panics when called. Useful when creating a store\n/// which always keeps the state in memory.\n#[derive(Clone)]\npub struct PanicSource {}\n\nimpl Fetch for PanicSource {\n    fn fetch_by_key(&self, _: &[u8]) -> Result<Option<Tree>> {\n        unreachable!()\n    }\n}\n\nimpl<S> Walker<S>\nwhere\n    S: Fetch + Sized + Send + Clone,\n{\n    /// Applies a batch of operations, possibly creating a new tree if\n    /// `maybe_tree` is `None`. This is similar to `Walker<S>::apply`, but does\n    /// not require a non-empty tree.\n    ///\n    /// Keys in batch must be sorted and unique.\n    pub fn apply_to(\n        maybe_tree: Option<Self>,\n        batch: &Batch,\n        source: S,\n    ) -> Result<(Option<Tree>, LinkedList<Vec<u8>>)> {\n        let (maybe_walker, deleted_keys) = if batch.is_empty() {\n            (maybe_tree, LinkedList::default())\n        } else {\n            match maybe_tree {\n                None => return Ok((Self::build(batch, source)?, LinkedList::default())),\n                Some(tree) => tree.apply(batch)?,\n            }\n        };\n\n        let maybe_tree = maybe_walker.map(|walker| walker.into_inner());\n        Ok((maybe_tree, deleted_keys))\n    }\n\n    /// Builds a `Tree` from a batch of operations.\n    ///\n    /// Keys in batch must be sorted and unique.\n    fn build(batch: &Batch, source: S) -> Result<Option<Tree>> {\n        if batch.is_empty() {\n            return Ok(None);\n        }\n\n        let mid_index = batch.len() / 2;\n        let (mid_key, mid_op) = &batch[mid_index];\n        let mid_value = match mid_op {\n            Delete => {\n                let left_batch = &batch[..mid_index];\n                let right_batch = &batch[mid_index + 1..];\n\n                let maybe_tree = Self::build(left_batch, source.clone())?\n                    .map(|tree| Self::new(tree, source.clone()));\n                let maybe_tree = match maybe_tree {\n                    Some(tree) => tree.apply(right_batch)?.0,\n                    None => Self::build(right_batch, source.clone())?\n                        .map(|tree| Self::new(tree, source.clone())),\n                };\n                return Ok(maybe_tree.map(|tree| tree.into()));\n            }\n            Put(value) => value,\n        };\n\n        // TODO: take from batch so we don't have to clone\n        let mid_tree = Tree::new(mid_key.to_vec(), mid_value.to_vec())?;\n        let mid_walker = Walker::new(mid_tree, PanicSource {});\n        Ok(mid_walker\n            .recurse(batch, mid_index, true)?\n            .0 // use walker, ignore deleted_keys since it should be empty\n            .map(|w| w.into_inner()))\n    }\n\n    /// Applies a batch of operations to an existing tree. This is similar to\n    /// `Walker<S>::apply`_to, but requires a populated tree.\n    ///\n    /// Keys in batch must be sorted and unique.\n    fn apply(self, batch: &Batch) -> Result<(Option<Self>, LinkedList<Vec<u8>>)> {\n        // binary search to see if this node's key is in the batch, and to split\n        // into left and right batches\n        let search = batch.binary_search_by(|(key, _op)| key.as_slice().cmp(self.tree().key()));\n        let tree = if let Ok(index) = search {\n            // a key matches this node's key, apply op to this node\n            match &batch[index].1 {\n                // TODO: take vec from batch so we don't need to clone\n                Put(value) => self.with_value(value.to_vec()),\n                Delete => {\n                    let source = self.clone_source();\n                    let key = self.tree().key().to_vec();\n\n                    let (walker, maybe_left) = self.detach(true)?;\n                    let (walker, maybe_right) = walker.detach(false)?;\n\n                    let (maybe_left, mut deleted_keys) =\n                        Self::apply_to(maybe_left, &batch[..index], source.clone())?;\n\n                    deleted_keys.push_back(key);\n\n                    let (maybe_right, mut deleted_keys_right) =\n                        Self::apply_to(maybe_right, &batch[index + 1..], source)?;\n                    deleted_keys.append(&mut deleted_keys_right);\n\n                    let maybe_walker = walker\n                        .attach(true, maybe_left)\n                        .attach(false, maybe_right)\n                        .remove()?\n                        .map(|w| w.maybe_balance())\n                        .transpose()?;\n\n                    return Ok((maybe_walker, deleted_keys));\n                }\n            }\n        } else {\n            Ok(self)\n        };\n\n        let (mid, exclusive) = match search {\n            Ok(index) => (index, true),\n            Err(index) => (index, false),\n        };\n\n        tree?.recurse(batch, mid, exclusive)\n    }\n\n    /// Recursively applies operations to the tree's children (if there are any\n    /// operations for them).\n    ///\n    /// This recursion executes serially in the same thread, but in the future\n    /// will be dispatched to workers in other threads.\n    fn recurse(\n        self,\n        batch: &Batch,\n        mid: usize,\n        exclusive: bool,\n    ) -> Result<(Option<Self>, LinkedList<Vec<u8>>)> {\n        let left_batch = &batch[..mid];\n        let right_batch = if exclusive {\n            &batch[mid + 1..]\n        } else {\n            &batch[mid..]\n        };\n\n        let mut deleted_keys = LinkedList::default();\n\n        let tree = if !left_batch.is_empty() {\n            let source = self.clone_source();\n            self.walk(true, |maybe_left| {\n                let (maybe_left, mut deleted_keys_left) =\n                    Self::apply_to(maybe_left, left_batch, source)?;\n                deleted_keys.append(&mut deleted_keys_left);\n                Ok(maybe_left)\n            })?\n        } else {\n            self\n        };\n\n        let tree = if !right_batch.is_empty() {\n            let source = tree.clone_source();\n            tree.walk(false, |maybe_right| {\n                let (maybe_right, mut deleted_keys_right) =\n                    Self::apply_to(maybe_right, right_batch, source)?;\n                deleted_keys.append(&mut deleted_keys_right);\n                Ok(maybe_right)\n            })?\n        } else {\n            tree\n        };\n\n        let tree = tree.maybe_balance()?;\n\n        Ok((Some(tree), deleted_keys))\n    }\n\n    /// Gets the wrapped tree's balance factor.\n    #[inline]\n    fn balance_factor(&self) -> i8 {\n        self.tree().balance_factor()\n    }\n\n    /// Checks if the tree is unbalanced and if so, applies AVL tree rotation(s)\n    /// to rebalance the tree and its subtrees. Returns the root node of the\n    /// balanced tree after applying the rotations.\n    fn maybe_balance(self) -> Result<Self> {\n        let balance_factor = self.balance_factor();\n        if balance_factor.abs() <= 1 {\n            return Ok(self);\n        }\n\n        let left = balance_factor < 0;\n\n        // maybe do a double rotation\n        let tree = if left == (self.tree().link(left).unwrap().balance_factor() > 0) {\n            self.walk_expect(left, |child| Ok(Some(child.rotate(!left)?)))?\n        } else {\n            self\n        };\n\n        tree.rotate(left)\n    }\n\n    /// Applies an AVL tree rotation, a constant-time operation which only needs\n    /// to swap pointers in order to rebalance a tree.\n    fn rotate(self, left: bool) -> Result<Self> {\n        let (tree, child) = self.detach_expect(left)?;\n        let (child, maybe_grandchild) = child.detach(!left)?;\n\n        // attach grandchild to self\n        let tree = tree.attach(left, maybe_grandchild).maybe_balance()?;\n\n        // attach self to child, return child\n        child.attach(!left, Some(tree)).maybe_balance()\n    }\n\n    /// Removes the root node from the tree. Rearranges and rebalances\n    /// descendants (if any) in order to maintain a valid tree.\n    pub fn remove(self) -> Result<Option<Self>> {\n        let tree = self.tree();\n        let has_left = tree.link(true).is_some();\n        let has_right = tree.link(false).is_some();\n        let left = tree.child_height(true) > tree.child_height(false);\n\n        let maybe_tree = if has_left && has_right {\n            // two children, promote edge of taller child\n            let (tree, tall_child) = self.detach_expect(left)?;\n            let (_, short_child) = tree.detach_expect(!left)?;\n            Some(tall_child.promote_edge(!left, short_child)?)\n        } else if has_left || has_right {\n            // single child, promote it\n            Some(self.detach_expect(left)?.1)\n        } else {\n            // no child\n            None\n        };\n\n        Ok(maybe_tree)\n    }\n\n    /// Traverses to find the tree's edge on the given side, removes it, and\n    /// reattaches it at the top in order to fill in a gap when removing a root\n    /// node from a tree with both left and right children. Attaches `attach` on\n    /// the opposite side. Returns the promoted node.\n    fn promote_edge(self, left: bool, attach: Self) -> Result<Self> {\n        let (edge, maybe_child) = self.remove_edge(left)?;\n        edge.attach(!left, maybe_child)\n            .attach(left, Some(attach))\n            .maybe_balance()\n    }\n\n    /// Traverses to the tree's edge on the given side and detaches it\n    /// (reattaching its child, if any, to its former parent). Return value is\n    /// `(edge, maybe_updated_tree)`.\n    fn remove_edge(self, left: bool) -> Result<(Self, Option<Self>)> {\n        if self.tree().link(left).is_some() {\n            // this node is not the edge, recurse\n            let (tree, child) = self.detach_expect(left)?;\n            let (edge, maybe_child) = child.remove_edge(left)?;\n            let tree = tree.attach(left, maybe_child).maybe_balance()?;\n            Ok((edge, Some(tree)))\n        } else {\n            // this node is the edge, detach its child if present\n            self.detach(!left)\n        }\n    }\n}\n\n#[cfg(test)]\nmod test {\n    use super::*;\n    use crate::test_utils::{\n        apply_memonly, assert_tree_invariants, del_entry, make_tree_seq, put_entry, seq_key,\n    };\n    use crate::tree::*;\n\n    #[test]\n    fn simple_insert() -> Result<()> {\n        let batch = [(b\"foo2\".to_vec(), Op::Put(b\"bar2\".to_vec()))];\n        let tree = Tree::new(b\"foo\".to_vec(), b\"bar\".to_vec())?;\n        let (maybe_walker, deleted_keys) = Walker::new(tree, PanicSource {})\n            .apply(&batch)\n            .expect(\"apply errored\");\n        let walker = maybe_walker.expect(\"should be Some\");\n        assert_eq!(walker.tree().key(), b\"foo\");\n        assert_eq!(walker.into_inner().child(false).unwrap().key(), b\"foo2\");\n        assert!(deleted_keys.is_empty());\n        Ok(())\n    }\n\n    #[test]\n    fn simple_update() -> Result<()> {\n        let batch = [(b\"foo\".to_vec(), Op::Put(b\"bar2\".to_vec()))];\n        let tree = Tree::new(b\"foo\".to_vec(), b\"bar\".to_vec())?;\n        let (maybe_walker, deleted_keys) = Walker::new(tree, PanicSource {})\n            .apply(&batch)\n            .expect(\"apply errored\");\n        let walker = maybe_walker.expect(\"should be Some\");\n        assert_eq!(walker.tree().key(), b\"foo\");\n        assert_eq!(walker.tree().value(), b\"bar2\");\n        assert!(walker.tree().link(true).is_none());\n        assert!(walker.tree().link(false).is_none());\n        assert!(deleted_keys.is_empty());\n        Ok(())\n    }\n\n    #[test]\n    fn simple_delete() -> Result<()> {\n        let batch = [(b\"foo2\".to_vec(), Op::Delete)];\n        let tree = Tree::from_fields(\n            b\"foo\".to_vec(),\n            b\"bar\".to_vec(),\n            [123; 32],\n            None,\n            Some(Link::Loaded {\n                hash: [123; 32],\n                child_heights: (0, 0),\n                tree: Tree::new(b\"foo2\".to_vec(), b\"bar2\".to_vec())?,\n            }),\n        );\n        let (maybe_walker, deleted_keys) = Walker::new(tree, PanicSource {})\n            .apply(&batch)\n            .expect(\"apply errored\");\n        let walker = maybe_walker.expect(\"should be Some\");\n        assert_eq!(walker.tree().key(), b\"foo\");\n        assert_eq!(walker.tree().value(), b\"bar\");\n        assert!(walker.tree().link(true).is_none());\n        assert!(walker.tree().link(false).is_none());\n        assert_eq!(deleted_keys.len(), 1);\n        assert_eq!(*deleted_keys.front().unwrap(), b\"foo2\");\n        Ok(())\n    }\n\n    #[test]\n    fn delete_non_existent() -> Result<()> {\n        let batch = [(b\"foo2\".to_vec(), Op::Delete)];\n        let tree = Tree::new(b\"foo\".to_vec(), b\"bar\".to_vec())?;\n        Walker::new(tree, PanicSource {}).apply(&batch).unwrap();\n        Ok(())\n    }\n\n    #[test]\n    fn delete_only_node() -> Result<()> {\n        let batch = [(b\"foo\".to_vec(), Op::Delete)];\n        let tree = Tree::new(b\"foo\".to_vec(), b\"bar\".to_vec())?;\n        let (maybe_walker, deleted_keys) = Walker::new(tree, PanicSource {})\n            .apply(&batch)\n            .expect(\"apply errored\");\n        assert!(maybe_walker.is_none());\n        assert_eq!(deleted_keys.len(), 1);\n        assert_eq!(deleted_keys.front().unwrap(), b\"foo\");\n        Ok(())\n    }\n\n    #[test]\n    fn delete_deep() {\n        let tree = make_tree_seq(50);\n        let batch = [del_entry(5)];\n        let (maybe_walker, deleted_keys) = Walker::new(tree, PanicSource {})\n            .apply(&batch)\n            .expect(\"apply errored\");\n        maybe_walker.expect(\"should be Some\");\n        assert_eq!(deleted_keys.len(), 1);\n        assert_eq!(*deleted_keys.front().unwrap(), seq_key(5));\n    }\n\n    #[test]\n    fn delete_recursive() {\n        let tree = make_tree_seq(50);\n        let batch = [del_entry(29), del_entry(34)];\n        let (maybe_walker, mut deleted_keys) = Walker::new(tree, PanicSource {})\n            .apply(&batch)\n            .expect(\"apply errored\");\n        maybe_walker.expect(\"should be Some\");\n        assert_eq!(deleted_keys.len(), 2);\n        assert_eq!(deleted_keys.pop_front().unwrap(), seq_key(29));\n        assert_eq!(deleted_keys.pop_front().unwrap(), seq_key(34));\n    }\n\n    #[test]\n    fn delete_recursive_2() {\n        let tree = make_tree_seq(10);\n        let batch = [del_entry(7), del_entry(9)];\n        let (maybe_walker, deleted_keys) = Walker::new(tree, PanicSource {})\n            .apply(&batch)\n            .expect(\"apply errored\");\n        maybe_walker.expect(\"should be Some\");\n        let mut deleted_keys: Vec<&Vec<u8>> = deleted_keys.iter().collect();\n        deleted_keys.sort();\n        assert_eq!(deleted_keys, vec![&seq_key(7), &seq_key(9)]);\n    }\n\n    #[test]\n    fn rebalanced_delete() {\n        let tree = make_tree_seq(7);\n\n        let walker = Walker::new(tree, PanicSource {})\n            .apply(&[(vec![0; 20], Delete)])\n            .expect(\"apply errored\")\n            .0\n            .unwrap();\n\n        let batch = [\n            put_entry(0),\n            put_entry(1),\n            put_entry(2),\n            put_entry(3),\n            del_entry(4),\n            del_entry(5),\n            del_entry(6),\n        ];\n        let (maybe_walker, deleted_keys) = walker.apply(&batch).expect(\"apply errored\");\n        let walker = maybe_walker.expect(\"should be Some\");\n\n        let mut deleted_keys: Vec<&Vec<u8>> = deleted_keys.iter().collect();\n        deleted_keys.sort();\n        assert_eq!(deleted_keys, vec![&seq_key(4), &seq_key(5), &seq_key(6)]);\n\n        let mut iter = walker.tree().iter();\n        assert_eq!(iter.next().unwrap().0, seq_key(0));\n        assert_eq!(iter.next().unwrap().0, seq_key(1));\n        assert_eq!(iter.next().unwrap().0, seq_key(2));\n        assert_eq!(iter.next().unwrap().0, seq_key(3));\n        assert!(iter.next().is_none());\n    }\n\n    #[test]\n    fn apply_empty_none() {\n        let (maybe_tree, deleted_keys) =\n            Walker::<PanicSource>::apply_to(None, &[], PanicSource {}).expect(\"apply_to failed\");\n        assert!(maybe_tree.is_none());\n        assert!(deleted_keys.is_empty());\n    }\n\n    #[test]\n    fn insert_empty_single() {\n        let batch = vec![(vec![0], Op::Put(vec![1]))];\n        let (maybe_tree, deleted_keys) =\n            Walker::<PanicSource>::apply_to(None, &batch, PanicSource {}).expect(\"apply_to failed\");\n        let tree = maybe_tree.expect(\"expected tree\");\n        assert_eq!(tree.key(), &[0]);\n        assert_eq!(tree.value(), &[1]);\n        assert_tree_invariants(&tree);\n        assert!(deleted_keys.is_empty());\n    }\n\n    #[test]\n    fn insert_root_single() -> Result<()> {\n        let tree = Tree::new(vec![5], vec![123])?;\n        let batch = vec![(vec![6], Op::Put(vec![123]))];\n        let tree = apply_memonly(tree, &batch);\n        assert_eq!(tree.key(), &[5]);\n        assert!(tree.child(true).is_none());\n        assert_eq!(tree.child(false).expect(\"expected child\").key(), &[6]);\n        Ok(())\n    }\n\n    #[test]\n    fn insert_root_double() -> Result<()> {\n        let tree = Tree::new(vec![5], vec![123])?;\n        let batch = vec![(vec![4], Op::Put(vec![123])), (vec![6], Op::Put(vec![123]))];\n        let tree = apply_memonly(tree, &batch);\n        assert_eq!(tree.key(), &[5]);\n        assert_eq!(tree.child(true).expect(\"expected child\").key(), &[4]);\n        assert_eq!(tree.child(false).expect(\"expected child\").key(), &[6]);\n        Ok(())\n    }\n\n    #[test]\n    fn insert_rebalance() -> Result<()> {\n        let tree = Tree::new(vec![5], vec![123])?;\n\n        let batch = vec![(vec![6], Op::Put(vec![123]))];\n        let tree = apply_memonly(tree, &batch);\n\n        let batch = vec![(vec![7], Op::Put(vec![123]))];\n        let tree = apply_memonly(tree, &batch);\n\n        assert_eq!(tree.key(), &[6]);\n        assert_eq!(tree.child(true).expect(\"expected child\").key(), &[5]);\n        assert_eq!(tree.child(false).expect(\"expected child\").key(), &[7]);\n        Ok(())\n    }\n\n    #[test]\n    fn insert_100_sequential() -> Result<()> {\n        let mut tree = Tree::new(vec![0], vec![123])?;\n\n        for i in 0..100 {\n            let batch = vec![(vec![i + 1], Op::Put(vec![123]))];\n            tree = apply_memonly(tree, &batch);\n        }\n\n        assert_eq!(tree.key(), &[63]);\n        assert_eq!(tree.child(true).expect(\"expected child\").key(), &[31]);\n        assert_eq!(tree.child(false).expect(\"expected child\").key(), &[79]);\n        Ok(())\n    }\n\n    #[test]\n    fn delete_recursive_large() {\n        let tree = make_tree_seq(2_500);\n\n        let mut batch = vec![];\n        for i in 500..2_000 {\n            batch.push(del_entry(i));\n        }\n\n        let (maybe_walker, deleted_keys) = Walker::new(tree, PanicSource {})\n            .apply(&batch)\n            .expect(\"apply errored\");\n        maybe_walker.expect(\"should be Some\");\n        assert_eq!(deleted_keys.len(), 1_500);\n    }\n}\n"
  },
  {
    "path": "src/tree/walk/fetch.rs",
    "content": "use super::super::{Link, Tree};\nuse crate::error::{Error, Result};\n\n/// A source of data to be used by the tree when encountering a pruned node.\n///\n/// This typcially means fetching the tree node from a backing store by its key,\n/// but could also implement an in-memory cache for example.\npub trait Fetch {\n    fn fetch_by_key(&self, key: &[u8]) -> Result<Option<Tree>>;\n\n    /// Called when the tree needs to fetch a node with the given `Link`. The\n    /// `link` value will always be a `Link::Reference` variant.\n    fn fetch(&self, link: &Link) -> Result<Tree> {\n        self.fetch_by_key_expect(link.key())\n    }\n\n    fn fetch_by_key_expect(&self, key: &[u8]) -> Result<Tree> {\n        self.fetch_by_key(key)?\n            .ok_or_else(|| Error::Key(format!(\"Key does not exist: {key:?}\")))\n    }\n}\n"
  },
  {
    "path": "src/tree/walk/mod.rs",
    "content": "mod fetch;\nmod ref_walker;\n\nuse super::{Link, Tree};\nuse crate::error::Result;\nuse crate::owner::Owner;\npub use fetch::Fetch;\npub use ref_walker::RefWalker;\n\n/// Allows traversal of a `Tree`, fetching from the given source when traversing\n/// to a pruned node, detaching children as they are traversed.\npub struct Walker<S>\nwhere\n    S: Fetch + Sized + Clone + Send,\n{\n    tree: Owner<Tree>,\n    source: S,\n}\n\nimpl<S> Walker<S>\nwhere\n    S: Fetch + Sized + Clone + Send,\n{\n    /// Creates a `Walker` with the given tree and source.\n    pub fn new(tree: Tree, source: S) -> Self {\n        Walker {\n            tree: Owner::new(tree),\n            source,\n        }\n    }\n\n    /// Similar to `Tree#detach`, but yields a `Walker` which fetches from the\n    /// same source as `self`. Returned tuple is `(updated_self,\n    /// maybe_child_walker)`.\n    pub fn detach(mut self, left: bool) -> Result<(Self, Option<Self>)> {\n        let link = match self.tree.link(left) {\n            None => return Ok((self, None)),\n            Some(link) => link,\n        };\n\n        let child = if link.tree().is_some() {\n            match self.tree.own_return(|t| t.detach(left)) {\n                Some(child) => child,\n                _ => unreachable!(\"Expected Some\"),\n            }\n        } else {\n            let link = self.tree.slot_mut(left).take();\n            match link {\n                Some(Link::Reference { .. }) => (),\n                _ => unreachable!(\"Expected Some(Link::Reference)\"),\n            }\n            self.source.fetch(&link.unwrap())?\n        };\n\n        let child = self.wrap(child);\n        Ok((self, Some(child)))\n    }\n\n    /// Similar to `Tree#detach_expect`, but yields a `Walker` which fetches\n    /// from the same source as `self`. Returned tuple is `(updated_self,\n    /// child_walker)`.\n    pub fn detach_expect(self, left: bool) -> Result<(Self, Self)> {\n        let (walker, maybe_child) = self.detach(left)?;\n        if let Some(child) = maybe_child {\n            Ok((walker, child))\n        } else {\n            panic!(\n                \"Expected {} child, got None\",\n                if left { \"left\" } else { \"right\" }\n            );\n        }\n    }\n\n    /// Similar to `Tree#walk`, but yields a `Walker` which fetches from the\n    /// same source as `self`.\n    pub fn walk<F, T>(self, left: bool, f: F) -> Result<Self>\n    where\n        F: FnOnce(Option<Self>) -> Result<Option<T>>,\n        T: Into<Tree>,\n    {\n        let (mut walker, maybe_child) = self.detach(left)?;\n        let new_child = f(maybe_child)?.map(|t| t.into());\n        walker.tree.own(|t| t.attach(left, new_child));\n        Ok(walker)\n    }\n\n    /// Similar to `Tree#walk_expect` but yields a `Walker` which fetches from\n    /// the same source as `self`.\n    pub fn walk_expect<F, T>(self, left: bool, f: F) -> Result<Self>\n    where\n        F: FnOnce(Self) -> Result<Option<T>>,\n        T: Into<Tree>,\n    {\n        let (mut walker, child) = self.detach_expect(left)?;\n        let new_child = f(child)?.map(|t| t.into());\n        walker.tree.own(|t| t.attach(left, new_child));\n        Ok(walker)\n    }\n\n    /// Returns an immutable reference to the `Tree` wrapped by this walker.\n    pub fn tree(&self) -> &Tree {\n        &self.tree\n    }\n\n    /// Consumes the `Walker` and returns the `Tree` it wraps.\n    pub fn into_inner(self) -> Tree {\n        self.tree.into_inner()\n    }\n\n    /// Takes a `Tree` and returns a `Walker` which fetches from the same source\n    /// as `self`.\n    fn wrap(&self, tree: Tree) -> Self {\n        Walker::new(tree, self.source.clone())\n    }\n\n    /// Returns a clone of this `Walker`'s source.\n    pub fn clone_source(&self) -> S {\n        self.source.clone()\n    }\n\n    /// Similar to `Tree#attach`, but can also take a `Walker` since it\n    /// implements `Into<Tree>`.\n    pub fn attach<T>(mut self, left: bool, maybe_child: Option<T>) -> Self\n    where\n        T: Into<Tree>,\n    {\n        self.tree\n            .own(|t| t.attach(left, maybe_child.map(|t| t.into())));\n        self\n    }\n\n    /// Similar to `Tree#with_value`.\n    pub fn with_value(mut self, value: Vec<u8>) -> Result<Self> {\n        self.tree.own_fallible(|t| t.with_value(value))?;\n        Ok(self)\n    }\n}\n\nimpl<S> From<Walker<S>> for Tree\nwhere\n    S: Fetch + Sized + Clone + Send,\n{\n    fn from(walker: Walker<S>) -> Tree {\n        walker.into_inner()\n    }\n}\n\n#[cfg(test)]\nmod test {\n    use super::super::NoopCommit;\n    use super::*;\n    use crate::tree::Tree;\n\n    #[derive(Clone)]\n    struct MockSource {}\n\n    impl Fetch for MockSource {\n        fn fetch_by_key(&self, key: &[u8]) -> Result<Option<Tree>> {\n            Tree::new(key.to_vec(), b\"foo\".to_vec()).map(Some)\n        }\n    }\n\n    #[test]\n    fn walk_modified() -> Result<()> {\n        let tree = Tree::new(b\"test\".to_vec(), b\"abc\".to_vec())?\n            .attach(true, Some(Tree::new(b\"foo\".to_vec(), b\"bar\".to_vec())?));\n\n        let source = MockSource {};\n        let walker = Walker::new(tree, source);\n\n        let walker = walker\n            .walk(true, |child| -> Result<Option<Tree>> {\n                assert_eq!(child.expect(\"should have child\").tree().key(), b\"foo\");\n                Ok(None)\n            })\n            .expect(\"walk failed\");\n        assert!(walker.into_inner().child(true).is_none());\n        Ok(())\n    }\n\n    #[test]\n    fn walk_stored() -> Result<()> {\n        let mut tree = Tree::new(b\"test\".to_vec(), b\"abc\".to_vec())?\n            .attach(true, Some(Tree::new(b\"foo\".to_vec(), b\"bar\".to_vec())?));\n        tree.commit(&mut NoopCommit {}).expect(\"commit failed\");\n\n        let source = MockSource {};\n        let walker = Walker::new(tree, source);\n\n        let walker = walker\n            .walk(true, |child| -> Result<Option<Tree>> {\n                assert_eq!(child.expect(\"should have child\").tree().key(), b\"foo\");\n                Ok(None)\n            })\n            .expect(\"walk failed\");\n        assert!(walker.into_inner().child(true).is_none());\n        Ok(())\n    }\n\n    #[test]\n    fn walk_pruned() {\n        let tree = Tree::from_fields(\n            b\"test\".to_vec(),\n            b\"abc\".to_vec(),\n            Default::default(),\n            Some(Link::Reference {\n                hash: Default::default(),\n                key: b\"foo\".to_vec(),\n                child_heights: (0, 0),\n            }),\n            None,\n        );\n\n        let source = MockSource {};\n        let walker = Walker::new(tree, source);\n\n        let walker = walker\n            .walk_expect(true, |child| -> Result<Option<Tree>> {\n                assert_eq!(child.tree().key(), b\"foo\");\n                Ok(None)\n            })\n            .expect(\"walk failed\");\n        assert!(walker.into_inner().child(true).is_none());\n    }\n\n    #[test]\n    fn walk_none() -> Result<()> {\n        let tree = Tree::new(b\"test\".to_vec(), b\"abc\".to_vec())?;\n\n        let source = MockSource {};\n        let walker = Walker::new(tree, source);\n\n        walker\n            .walk(true, |child| -> Result<Option<Tree>> {\n                assert!(child.is_none());\n                Ok(None)\n            })\n            .expect(\"walk failed\");\n        Ok(())\n    }\n}\n"
  },
  {
    "path": "src/tree/walk/ref_walker.rs",
    "content": "use super::super::{Link, Tree};\nuse super::Fetch;\nuse crate::error::Result;\n\n/// Allows read-only traversal of a `Tree`, fetching from the given source when\n/// traversing to a pruned node.\n///\n/// The fetched nodes are in memory until they (possibly) get pruned on the next\n/// commit.\n///\n/// Only finalized trees may be walked (trees which have had `commit` called\n/// since the last update).\npub struct RefWalker<'a, S>\nwhere\n    S: Fetch + Sized + Clone + Send,\n{\n    tree: &'a mut Tree,\n    source: S,\n}\n\nimpl<'a, S> RefWalker<'a, S>\nwhere\n    S: Fetch + Sized + Clone + Send,\n{\n    /// Creates a `RefWalker` with the given tree and source.\n    pub fn new(tree: &'a mut Tree, source: S) -> Self {\n        // TODO: check if tree has modified links, panic if so\n        RefWalker { tree, source }\n    }\n\n    /// Gets an immutable reference to the `Tree` wrapped by this `RefWalker`.\n    pub fn tree(&self) -> &Tree {\n        self.tree\n    }\n\n    /// Traverses to the child on the given side (if any), fetching from the\n    /// source if pruned. When fetching, the link is upgraded from\n    /// `Link::Reference` to `Link::Loaded`.\n    pub fn walk(&mut self, left: bool) -> Result<Option<RefWalker<S>>> {\n        let link = match self.tree.link(left) {\n            None => return Ok(None),\n            Some(link) => link,\n        };\n\n        match link {\n            Link::Reference { .. } => {\n                self.tree.load(left, &self.source)?;\n            }\n            Link::Modified { .. } => panic!(\"Cannot traverse Link::Modified\"),\n            Link::Uncommitted { .. } | Link::Loaded { .. } => {}\n        }\n\n        let child = self.tree.child_mut(left).unwrap();\n        Ok(Some(RefWalker::new(child, self.source.clone())))\n    }\n}\n"
  }
]