[
  {
    "path": ".github/dependabot.yml",
    "content": "version: 2\nupdates:\n- package-ecosystem: cargo\n  directory: \"/\"\n  schedule:\n    interval: daily\n  open-pull-requests-limit: 10\n  ignore:\n  # Ignore raw-window-handle because it's tied to ash-window\n  - dependency-name: raw-window-handle\n  # Ignore rustls and rustls-pemfile because they're tied to quinn\n  - dependency-name: rustls\n  - dependency-name: rustls-pemfile\n"
  },
  {
    "path": ".github/workflows/package.yml",
    "content": "name: Package\n\non:\n  push:\n    branches: ['master']\n\njobs:\n  package-windows:\n    name: Windows\n    runs-on: windows-latest\n    env:\n      VULKAN_VERSION: \"1.3.290.0\"\n      VULKAN_SDK: \"C:/VulkanSDK/1.3.290.0\"\n    steps:\n      - uses: actions/checkout@v4\n        with:\n          lfs: true\n      - name: Install Vulkan SDK\n        run: |\n          Invoke-WebRequest -Uri \"https://sdk.lunarg.com/sdk/download/${{ env.VULKAN_VERSION }}/windows/VulkanSDK-${{ env.VULKAN_VERSION }}-Installer.exe\" -OutFile vulkan.exe\n          ./vulkan.exe --accept-licenses --default-answer --confirm-command install\n\n      - uses: dtolnay/rust-toolchain@stable\n\n      - name: Build Server\n        run: cargo build --package server --release --locked\n\n      - name: Build Client\n        run: cargo build --package client --release --no-default-features --locked\n\n      - name: Package Artifacts\n        run: |\n          mkdir artifacts\n          Move-Item -Path assets/* -Destination artifacts/\n          Move-Item -Path target/release/*.exe -Destination artifacts/\n\n      - name: Upload Artifacts\n        uses: actions/upload-artifact@v4\n        with:\n          name: windows\n          path: \"artifacts/*\"\n\n  package-linux:\n    name: Linux\n    # Oldest supported runner, for wide glibc compat\n    runs-on: ubuntu-22.04\n    steps:\n      - uses: actions/checkout@v4\n        with:\n          lfs: true\n      - name: Install dependencies\n        run: sudo apt update && sudo apt-get -y install libasound2-dev libvulkan-dev libfontconfig-dev\n      # No prebuilt shaderc, since the official binaries don't seem to be compatible with Ubuntu 20.04,\n      # and we haven't tested them on Ubuntu 22.04.\n      - uses: dtolnay/rust-toolchain@stable\n\n      - name: Build Server\n        run: cargo build --package server --release --locked\n\n      - name: Build Client\n        run: cargo build --package client --release --no-default-features --locked\n\n      - name: Strip\n        run: |\n          strip target/release/server target/release/client\n\n      - name: Package Artifacts\n        run: |\n          mkdir artifacts\n          mv assets/* artifacts/\n          mv target/release/server artifacts/\n          mv target/release/client artifacts/\n\n      - name: Upload Artifacts\n        uses: actions/upload-artifact@v4\n        with:\n          name: linux\n          path: \"artifacts/*\"\n"
  },
  {
    "path": ".github/workflows/rust.yml",
    "content": "name: CI\n\non:\n  push:\n    branches: ['master']\n    paths-ignore:\n      - 'docs/**'\n  pull_request:\n    paths-ignore:\n      - 'docs/**'\n\njobs:\n  test:\n    strategy:\n      matrix:\n        os: [ubuntu-latest, windows-latest]\n\n    runs-on: ${{ matrix.os }}\n\n    env:\n      VULKAN_VERSION: \"1.3.290.0\"\n      VULKAN_SDK: \"C:/VulkanSDK/1.3.290.0\"\n\n    steps:\n      - name: Install shaderc\n        if: matrix.os == 'ubuntu-latest'\n        run: |\n          wget -nv -r -nd -A install.tgz 'https://storage.googleapis.com/shaderc/badges/build_link_linux_clang_release.html'\n          tar xf install.tgz\n          echo \"SHADERC_LIB_DIR=$PWD/install/lib\" >> \"$GITHUB_ENV\"\n      - name: Install Vulkan SDK\n        if: matrix.os == 'windows-latest'\n        run: |\n          Invoke-WebRequest -Uri \"https://sdk.lunarg.com/sdk/download/${{ env.VULKAN_VERSION }}/windows/VulkanSDK-${{ env.VULKAN_VERSION }}-Installer.exe\" -OutFile vulkan.exe\n          ./vulkan.exe --accept-licenses --default-answer --confirm-command install\n\n      - uses: actions/checkout@v4\n      - uses: dtolnay/rust-toolchain@stable\n      - run: cargo build --workspace --all-targets --locked\n      - run: cargo test --workspace --locked\n\n  lint:\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v4\n      - uses: dtolnay/rust-toolchain@stable\n        with:\n          components: rustfmt, clippy\n      - run: cargo fmt --all -- --check\n      - if: always()\n        run: cargo clippy --workspace --all-targets -- -D warnings\n\n  check-protos:\n    name: Check protos\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v4\n      - uses: dtolnay/rust-toolchain@stable\n      - run: sudo apt update && sudo apt-get -y install protobuf-compiler\n      - name: Generate Rust code from .proto files\n        run: cargo run -p gen-protos --locked\n      - name: Check for uncommitted changes\n        run: git diff --exit-code\n"
  },
  {
    "path": ".gitignore",
    "content": "/target\n**/*.rs.bk\n\n# IDEA workspace stuff\n/*.iml\n/.idea\n\n/.vscode/launch.json\n/.vscode/tasks.json\n\n/tarpaulin-report.html"
  },
  {
    "path": "Cargo.toml",
    "content": "[workspace]\nresolver = \"2\"\nmembers = [\"client\", \"server\", \"common\", \"save\", \"save/gen-protos\"]\n\n[workspace.dependencies]\nhecs = \"0.11.0\"\nnalgebra = { version = \"0.34.1\", features = [\"libm-force\"] }\nquinn = { version = \"0.11\", default-features = false, features = [\"rustls\", \"ring\", \"runtime-tokio\"] }\ntoml = { version = \"0.9.12\", default-features = false, features = [\"parse\", \"serde\", \"std\"] }\n\n[profile.dev]\nopt-level = 1\ndebug-assertions = true\n\n[profile.dev.package.\"*\"]\nopt-level = 2\n\n[profile.release.build-override]\nopt-level = 0\n"
  },
  {
    "path": "LICENSE-APACHE",
    "content": "Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"{}\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright {yyyy} {name of copyright owner}\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License."
  },
  {
    "path": "LICENSE-ZLIB",
    "content": "Copyright (c) 2020 Benjamin Saunders\n\nThis software is provided 'as-is', without any express or implied warranty. In\nno event will the authors be held liable for any damages arising from the use of\nthis software.\n\nPermission is granted to anyone to use this software for any purpose, including\ncommercial applications, and to alter it and redistribute it freely, subject to\nthe following restrictions:\n\n1. The origin of this software must not be misrepresented; you must not claim\n   that you wrote the original software. If you use this software in a product, an\n   acknowledgment in the product documentation would be appreciated but is not\n   required.\n\n2. Altered source versions must be plainly marked as such, and must not be\n   misrepresented as being the original software.\n\n3. This notice may not be removed or altered from any source distribution.\n"
  },
  {
    "path": "README.md",
    "content": "## Installation\n\nSee the [wiki](https://github.com/Ralith/hypermine/wiki) for instructions on how to build and run\n\n\n## License\n\nLicensed under either of\n\n * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or\n   http://www.apache.org/licenses/LICENSE-2.0)\n * Zlib license ([LICENSE-ZLIB](LICENSE-ZLIB) or\n   https://opensource.org/licenses/Zlib)\n\nat your option.\n\n### Contribution\n\nUnless you explicitly state otherwise, any contribution intentionally submitted\nfor inclusion in the work by you, as defined in the Apache-2.0 license, shall be\ndual licensed as above, without any additional terms or conditions.\n"
  },
  {
    "path": "assets/.gitattributes",
    "content": "*.png filter=lfs diff=lfs merge=lfs -text\n*.glb filter=lfs diff=lfs merge=lfs -text\n*.gltf filter=lfs diff=lfs merge=lfs -text\n"
  },
  {
    "path": "assets/character.glb",
    "content": "version https://git-lfs.github.com/spec/v1\noid sha256:ee33342c11e0746b106031b2765ee86a0f1890b72fec7c0219815ae98f414e16\nsize 139624\n"
  },
  {
    "path": "client/Cargo.toml",
    "content": "[package]\nname = \"client\"\nversion = \"0.1.0\"\nauthors = [\"Benjamin Saunders <ben.e.saunders@gmail.com>\"]\nedition = \"2024\"\npublish = false\nlicense = \"Apache-2.0 OR Zlib\"\n\n# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html\n\n[dependencies]\ncommon = { path = \"../common\" }\nserver = { path = \"../server\" }\ntracing = \"0.1.10\"\nash = { version = \"0.38.0\", default-features = false, features = [\"loaded\", \"debug\", \"std\"] }\nlahar = { git = \"https://github.com/Ralith/lahar\", rev = \"7963ae5750ea61fa0a894dbb73d3be0ac77255d2\" }\nyakui = \"0.3.0\"\nyakui-vulkan = \"0.3.0\"\nwinit = \"0.30.4\"\nash-window = \"0.13\"\nraw-window-handle = \"0.6\"\ndirectories = \"6.0.0\"\nvk-shader-macros = \"0.2.5\"\nnalgebra = { workspace = true }\nlibm = \"0.2.16\"\ntokio = { version = \"1.43.0\", features = [\"rt-multi-thread\", \"sync\", \"macros\"] }\npng = \"0.18.0\"\nanyhow = \"1.0.26\"\nserde = { version = \"1.0.104\", features = [\"derive\", \"rc\"] }\ntoml = { workspace = true }\nfxhash = \"0.2.1\"\ndowncast-rs = \"2.0.0\"\nquinn = { workspace = true }\nfutures-util = \"0.3.1\"\nwebpki = \"0.22.4\"\nhecs = { workspace = true }\nmemoffset = \"0.9\"\ngltf = { version = \"1.0.0\", default-features = false, features = [\"utils\"] }\nmetrics = \"0.24.0\"\nhdrhistogram = { version = \"7\", default-features = false }\nsave = { path = \"../save\" }\nlru-slab = \"0.1.2\"\n\n[features]\ndefault = [\"use-repo-assets\"]\nuse-repo-assets = []\n\n[dev-dependencies]\napprox = \"0.5.1\"\nbencher = \"0.1.5\"\nrenderdoc = \"0.12.1\"\n\n[[bench]]\nname = \"surface_extraction\"\nharness = false\n"
  },
  {
    "path": "client/benches/surface_extraction.rs",
    "content": "use std::sync::Arc;\n\nuse ash::vk;\nuse bencher::{Bencher, benchmark_group, benchmark_main};\n\nuse client::graphics::{\n    Base,\n    voxels::surface_extraction::{self, ExtractTask, SurfaceExtraction},\n};\n//use common::world::Material;\n\nfn extract(bench: &mut Bencher) {\n    let gfx = Arc::new(Base::headless());\n    let extract = SurfaceExtraction::new(&gfx);\n    let mut scratch = surface_extraction::ScratchBuffer::new(&gfx, &extract, BATCH_SIZE, DIMENSION);\n    let draw = surface_extraction::DrawBuffer::new(&gfx, BATCH_SIZE, DIMENSION);\n    let device = &*gfx.device;\n\n    unsafe {\n        let cmd_pool = device\n            .create_command_pool(\n                &vk::CommandPoolCreateInfo::default().queue_family_index(gfx.queue_family),\n                None,\n            )\n            .unwrap();\n\n        let cmd = device\n            .allocate_command_buffers(\n                &vk::CommandBufferAllocateInfo::default()\n                    .command_pool(cmd_pool)\n                    .command_buffer_count(1),\n            )\n            .unwrap()[0];\n\n        device\n            .begin_command_buffer(cmd, &vk::CommandBufferBeginInfo::default())\n            .unwrap();\n\n        let batch = (0..BATCH_SIZE)\n            .map(|i| ExtractTask {\n                index: i,\n                draw_id: i,\n                indirect_offset: draw.indirect_offset(i),\n                face_offset: draw.face_offset(i),\n                reverse_winding: false,\n            })\n            .collect::<Vec<_>>();\n        scratch.extract(\n            device,\n            &extract,\n            draw.indirect_buffer(),\n            draw.face_buffer(),\n            cmd,\n            &batch,\n        );\n        device.end_command_buffer(cmd).unwrap();\n\n        bench.iter(|| {\n            device\n                .queue_submit(\n                    gfx.queue,\n                    &[vk::SubmitInfo::default().command_buffers(&[cmd])],\n                    vk::Fence::null(),\n                )\n                .unwrap();\n            device.device_wait_idle().unwrap();\n        })\n    }\n}\n\nconst DIMENSION: u32 = 16;\nconst BATCH_SIZE: u32 = 16;\n\nbenchmark_group!(benches, extract);\nbenchmark_main!(benches);\n"
  },
  {
    "path": "client/shaders/common.h",
    "content": "#ifndef COMMON_H\n#define COMMON_H\n\nconst float PI = 3.14159265;\nconst float INFINITY = 1.0 / 0.0;\n\nlayout(set = 0, binding = 0) uniform Common {\n    // Maps local node space to clip space\n    mat4 view_projection;\n    // Maps clip space to view space\n    mat4 inverse_projection;\n    float fog_density;\n    float time;\n};\n\n#endif\n"
  },
  {
    "path": "client/shaders/fog.frag",
    "content": "#version 450\n\n#include \"common.h\"\n\nlayout(location = 0) in vec2 texcoords;\n\nlayout(location = 0) out vec4 fog;\n\nlayout(input_attachment_index=0, set=0, binding=1) uniform subpassInput depth;\n\nvoid main() {\n    vec4 clip_pos = vec4(texcoords * 2.0 - 1.0, subpassLoad(depth).x, 1.0);\n    vec4 scaled_view_pos = inverse_projection * clip_pos;\n    // Cancel out perspective, obtaining klein ball position\n    vec3 view_pos = scaled_view_pos.xyz / scaled_view_pos.w;\n    float view_length = length(view_pos);\n    // Convert to true hyperbolic distance, taking care to respect atanh's domain\n    float dist = view_length >= 1.0 ? INFINITY : atanh(view_length);\n    // Exponential^k fog\n    fog = vec4(0.5, 0.65, 0.9, exp(-pow(dist * fog_density, 5)));\n}\n"
  },
  {
    "path": "client/shaders/fullscreen.vert",
    "content": "#version 450\n\nlayout (location = 0) out vec2 texcoords;\n\nvoid main()  {\n    texcoords = vec2((gl_VertexIndex << 1) & 2, gl_VertexIndex & 2);\n    gl_Position = vec4(texcoords * 2.0f + -1.0f, 0.0f, 1.0f);\n}\n"
  },
  {
    "path": "client/shaders/mesh.frag",
    "content": "#version 450\n\nlayout(location = 0) in vec2 texcoords;\nlayout(location = 1) in vec4 normal;\n\nlayout(location = 0) out vec4 color_out;\n\nlayout(set = 1, binding = 0) uniform sampler2D color;\n\nvoid main() {\n    color_out = texture(color, texcoords);\n}\n"
  },
  {
    "path": "client/shaders/mesh.vert",
    "content": "#version 450\n\n#include \"common.h\"\n\nlayout(location = 0) in vec3 position;\nlayout(location = 1) in vec2 texcoords;\nlayout(location = 2) in vec3 normal;\n\nlayout(location = 0) out vec2 texcoords_out;\nlayout(location = 1) out vec4 normal_out;\n\nlayout(push_constant) uniform PushConstants {\n    mat4 transform;\n};\n\nvoid main() {\n    gl_Position = view_projection * transform * vec4(position, 1);\n    texcoords_out = texcoords;\n    normal_out = transform * vec4(normal, 0);\n}\n"
  },
  {
    "path": "client/shaders/surface-extraction/extract.comp",
    "content": "#version 450\n\n#extension GL_KHR_shader_subgroup_ballot: enable\n#extension GL_KHR_shader_subgroup_arithmetic: enable\n\n#include \"surface.h\"\n\nlayout(local_size_x_id = 0, local_size_y_id = 1, local_size_z_id = 2) in;\n\nlayout(set = 0, binding = 0) restrict uniform Parameters {\n    int dimension;\n};\n\nlayout(set = 1, binding = 0) readonly restrict buffer Voxels {\n    uint voxel_pair[];\n};\n\nlayout(set = 1, binding = 1) restrict buffer State {\n    uint face_count;\n};\n\nlayout(set = 1, binding = 2) writeonly restrict buffer Indirect {\n    uint vertex_count;\n    uint instance_count;\n    uint first_vertex;\n    uint first_instance;\n};\n\nlayout(set = 1, binding = 3) writeonly restrict buffer Surfaces {\n    Surface surfaces[];\n};\n\nlayout(push_constant) uniform Uniforms {\n    bool reverse_winding;\n};\n\nuint get_voxel(ivec3 coords) {\n    // We assume that all dimensions are equal, except that gl_NumWorkGroups.x is three times larger\n    // (yielding one invocation per negative-facing face). Each coordinate is offset by 1 to account\n    // for the margin on the negative-facing sides of the chunk.\n\n    // There's a margin of 1 on each side of each dimension, only half of which is dispatched over\n    uint linear = (coords.x + 1) + (coords.y + 1) * (dimension + 2) + (coords.z + 1) * (dimension + 2) * (dimension + 2);\n    uint pair = voxel_pair[linear / 2];\n    return (linear % 2) == 0 ? pair & 0xFFFF : pair >> 16;\n}\n\n// A face between a voxel and its neighbor in the -X, -Y, or -Z direction\nstruct Face {\n    // coordinates of the voxel\n    ivec3 voxel;\n    // [0,3), indicating which axis this face is perpendicular to\n    uint axis;\n    // whether the normal is facing towards the center of this voxel\n    bool inward;\n    // contents of the solid voxel incident to the face, which may be a neighbor\n    uint material;\n};\n\nivec3 neighbor_offset(uint axis) {\n    ivec3 off = ivec3(0);\n    off[axis] = -1;\n    return off;\n}\n\nbool find_face(out Face info) {\n    // We only look at negative-facing faces of the current voxel, and iterate one past the end on\n    // each dimension to enclose it fully.\n    info.voxel = ivec3(gl_GlobalInvocationID.x / 3, gl_GlobalInvocationID.yz);\n    info.axis = gl_GlobalInvocationID.x % 3;\n    ivec3 neighbor = info.voxel + neighbor_offset(info.axis);\n    // Don't generate faces between out-of-bounds voxels\n    if (any(greaterThanEqual(info.voxel, ivec3(dimension))) && any(greaterThanEqual(neighbor, ivec3(dimension)))) return false;\n    uint neighbor_mat = get_voxel(neighbor);\n    uint self_mat = get_voxel(info.voxel);\n    // Flip face around if the neighbor is the solid one\n    info.inward = self_mat == 0;\n    info.material = self_mat | neighbor_mat;\n    // If self or neighbor is a void margin, then no surface should be generated, as any surface\n    // that would be rendered is the responsibility of the adjacent chunk.\n    if ((self_mat == 0 && info.voxel[info.axis] == dimension) || (neighbor_mat == 0 && neighbor[info.axis] == -1)) return false;\n    return (neighbor_mat == 0) != (self_mat == 0);\n}\n\n// Compute the occlusion state based on the three voxels surrounding an exposed vertex:\n//\n// a b\n// c .\n//\n// There are four occlusion states:\n// 0 - fully enclosed\n// 1 - two neighboring voxels\n// 2 - one neighboring voxel\n// 3 - fully exposed\nuint vertex_occlusion(bool a, bool b, bool c) {\n    return b && c ? 0 : (3 - uint(a) - uint(b) - uint(c));\n}\n\n// Compute the occlusion state for each vertex on a surface\nuvec4 surface_occlusion(ivec3 voxel, uint axis, bool inward) {\n    // U/V axes on this surface\n    const ivec3 uvs[3][2] = {\n        {{0, 1, 0}, {0, 0, 1}},\n        {{0, 0, 1}, {1, 0, 0}},\n        {{1, 0, 0}, {0, 1, 0}},\n    };\n\n    if (!inward) {\n        voxel += neighbor_offset(axis);\n    }\n\n    ivec3 u = uvs[axis][0];\n    ivec3 v = uvs[axis][1];\n    // 0 1 2\n    // 3 . 4\n    // 5 6 7\n    bool occluders[8] = {\n        get_voxel(voxel - u - v) != 0,\n        get_voxel(voxel     - v) != 0,\n        get_voxel(voxel + u - v) != 0,\n        get_voxel(voxel - u    ) != 0,\n        get_voxel(voxel + u    ) != 0,\n        get_voxel(voxel - u + v) != 0,\n        get_voxel(voxel     + v) != 0,\n        get_voxel(voxel + u + v) != 0,\n    };\n    return uvec4(\n        vertex_occlusion(occluders[0], occluders[1], occluders[3]),\n        vertex_occlusion(occluders[2], occluders[1], occluders[4]),\n        vertex_occlusion(occluders[5], occluders[6], occluders[3]),\n        vertex_occlusion(occluders[7], occluders[6], occluders[4])\n    );\n}\n\nvoid main() {\n    // Determine whether this thread generates a face\n    Face info;\n    bool has_face = find_face(info);\n\n    // Number of faces in the subgroup\n    uint subgroup_faces = subgroupAdd(uint(has_face));\n\n    // Compute the starting storage offset for this subgroup\n    uint subgroup_offset;\n    if (subgroupElect()) {\n        subgroup_offset = atomicAdd(face_count, subgroup_faces);\n        // Increment the vertex count while we're at it, accounting for two triangles per face.\n        atomicAdd(vertex_count, subgroup_faces * 6);\n    }\n    subgroup_offset = subgroupBroadcastFirst(subgroup_offset);\n\n    if (!has_face) return;\n\n    // Write the thread's face\n    uint thread_offset = subgroupExclusiveAdd(uint(has_face));\n    surfaces[subgroup_offset + thread_offset] = surface(\n        info.voxel,\n        info.axis,\n        info.inward ^^ reverse_winding,\n        info.material,\n        surface_occlusion(info.voxel, info.axis, info.inward)\n    );\n}\n"
  },
  {
    "path": "client/shaders/surface-extraction/surface.h",
    "content": "#ifndef SURFACE_EXTRACTION_SURFACE_H_\n#define SURFACE_EXTRACTION_SURFACE_H_\n\n// A face between a voxel and its neighbor in the -X, -Y, or -Z direction\nstruct Surface {\n    // From most to least significant byte, (axis, z, y, x)\n    uint pos_axis;\n    // From most to least significant byte, (occlusion, <padding>, mat, mat)\n    uint occlusion_mat;\n};\n\n// [0,2^8)^3\nuvec3 get_pos(Surface s) {\n    return uvec3(s.pos_axis & 0xFF, (s.pos_axis >> 8) & 0xFF, (s.pos_axis >> 16) & 0xFF);\n}\n\n// Identifies the order in which the vertices should be rendered. The vertex positions are the same,\n// but winding and diagonal position vary. A flipped diagonal is used to ensure barycentric\n// interpolation of ambient occlusion is isotropic, and does not affect texture coordinates\n//\n// [0,3) are -X/-Y/-Z\n// [3,6) are +X/+Y/+Z\n// [6,9) are -X/-Y/-Z flipped\n// [9,12) are +X/+Y/+Z flipped\nuint get_axis(Surface s) {\n    return s.pos_axis >> 24;\n}\n\nuint get_mat(Surface s) {\n    return s.occlusion_mat & 0xFFFF;\n}\n\nfloat get_occlusion(Surface s, uvec2 texcoords) {\n    return float((s.occlusion_mat >> (24 + 2 * (texcoords.x | texcoords.y << 1))) & 0x03) / 3.0 * 0.95 + 0.05;\n}\n\nSurface surface(uvec3 pos, uint axis, bool reverse, uint mat, uvec4 occlusion) {\n    Surface result;\n    // Flip the quad if necessary to prevent the triangle dividing line from being parallel to the\n    // gradient of ambient occlusion, ensuring isotropy.\n    axis += 3 * uint(reverse) + 6 * uint(occlusion.y + occlusion.z > occlusion.x + occlusion.w);\n    result.pos_axis = pos.x | pos.y << 8 | pos.z << 16 | axis << 24;\n    result.occlusion_mat = mat | occlusion.x << 24 | occlusion.y << 26 | occlusion.z << 28 | occlusion.w << 30;\n    return result;\n}\n\n#endif\n"
  },
  {
    "path": "client/shaders/voxels.frag",
    "content": "#version 450\n\nlayout(location = 0) in vec3 texcoords;\nlayout(location = 1) in float occlusion;\nlayout(location = 0) out vec4 color;\n\nlayout(set = 1, binding = 1) uniform sampler2DArray textures;\n\nvoid main() {\n    color = texture(textures, texcoords) * occlusion;\n}\n"
  },
  {
    "path": "client/shaders/voxels.vert",
    "content": "#version 460\n\n#include \"common.h\"\n#include \"surface-extraction/surface.h\"\n\n// Maps from cube space ([0..1]^3) to local node space\nlayout(location = 0) in mat4 transform;\n\nlayout(location = 0) out vec3 texcoords_out;\nlayout(location = 1) out float occlusion;\n\nlayout(set = 1, binding = 0) readonly restrict buffer Surfaces {\n    Surface surfaces[];\n};\n\nlayout(push_constant) uniform PushConstants {\n    uint dimension;\n};\n\n// Each set of 6 vertices makes a ring around the quad, with the middle and start/end vertices\n// duplicated. Note that the sign only indicates the winding of the face; all faces contain the\n// origin regardless.\nconst uvec3 vertices[12][6] = {\n    {{0, 0, 0}, {0, 0, 1}, {0, 1, 1}, {0, 1, 1}, {0, 1, 0}, {0, 0, 0}}, // -X\n    {{0, 0, 0}, {1, 0, 0}, {1, 0, 1}, {1, 0, 1}, {0, 0, 1}, {0, 0, 0}}, // -Y\n    {{0, 0, 0}, {0, 1, 0}, {1, 1, 0}, {1, 1, 0}, {1, 0, 0}, {0, 0, 0}}, // -Z\n\n    {{0, 0, 0}, {0, 1, 0}, {0, 1, 1}, {0, 1, 1}, {0, 0, 1}, {0, 0, 0}}, // +X\n    {{0, 0, 0}, {0, 0, 1}, {1, 0, 1}, {1, 0, 1}, {1, 0, 0}, {0, 0, 0}}, // +Y\n    {{0, 0, 0}, {1, 0, 0}, {1, 1, 0}, {1, 1, 0}, {0, 1, 0}, {0, 0, 0}}, // +Z\n\n    // Versions of the above rotated 90 degrees so the diagonal goes the other way, used to improve\n    // the consistency of barycentric interpolation of ambient occlusion\n    {{0, 0, 1}, {0, 1, 1}, {0, 1, 0}, {0, 1, 0}, {0, 0, 0}, {0, 0, 1}}, // -X\n    {{1, 0, 0}, {1, 0, 1}, {0, 0, 1}, {0, 0, 1}, {0, 0, 0}, {1, 0, 0}}, // -Y\n    {{0, 1, 0}, {1, 1, 0}, {1, 0, 0}, {1, 0, 0}, {0, 0, 0}, {0, 1, 0}}, // -Z\n\n    {{0, 0, 1}, {0, 0, 0}, {0, 1, 0}, {0, 1, 0}, {0, 1, 1}, {0, 0, 1}}, // +X\n    {{1, 0, 0}, {0, 0, 0}, {0, 0, 1}, {0, 0, 1}, {1, 0, 1}, {1, 0, 0}}, // +Y\n    {{0, 1, 0}, {0, 0, 0}, {1, 0, 0}, {1, 0, 0}, {1, 1, 0}, {0, 1, 0}}  // +Z\n};\n\nconst uvec2 texcoords[4][6] = {\n    {{0, 0}, {0, 1}, {1, 1}, {1, 1}, {1, 0}, {0, 0}},\n    {{0, 0}, {1, 0}, {1, 1}, {1, 1}, {0, 1}, {0, 0}},\n    // Rotated versions\n    {{0, 1}, {1, 1}, {1, 0}, {1, 0}, {0, 0}, {0, 1}},\n    {{0, 1}, {0, 0}, {1, 0}, {1, 0}, {1, 1}, {0, 1}},\n};\n\nvoid main()  {\n    uint index = gl_VertexIndex / 6;\n    uint vertex = gl_VertexIndex % 6;\n    Surface s = surfaces[index];\n    uvec3 pos = get_pos(s);\n    uint axis = get_axis(s);\n    uvec2 uv = texcoords[axis / 3][vertex];\n    texcoords_out = vec3(uv, get_mat(s) - 1);\n    occlusion = get_occlusion(s, uv);\n    vec3 relative_coords = vertices[axis][vertex] + pos;\n    gl_Position = view_projection * transform * vec4(relative_coords / dimension, 1);\n}\n"
  },
  {
    "path": "client/src/config.rs",
    "content": "use std::{\n    env, fs, io,\n    net::SocketAddr,\n    path::{Path, PathBuf},\n    sync::Arc,\n};\n\nuse serde::Deserialize;\nuse tracing::{debug, error, info};\n\nuse common::{Anonymize, SimConfig, SimConfigRaw};\n\npub struct Config {\n    pub name: Arc<str>,\n    pub data_dirs: Vec<PathBuf>,\n    pub save: PathBuf,\n    pub chunk_load_parallelism: u32,\n    pub server: Option<SocketAddr>,\n    pub local_simulation: SimConfig,\n}\n\nimpl Config {\n    pub fn load(dirs: &directories::ProjectDirs) -> Self {\n        // Future work: search $XDG_CONFIG_DIRS\n        let path = dirs.config_dir().join(\"client.toml\");\n        // Read and parse config file\n        let RawConfig {\n            name,\n            data_dir,\n            save,\n            local_simulation,\n            chunk_load_parallelism,\n            server,\n        } = match fs::read(&path) {\n            Ok(data) => {\n                info!(\"found config at {}\", path.anonymize().display());\n                match std::str::from_utf8(&data)\n                    .map_err(anyhow::Error::from)\n                    .and_then(|s| toml::from_str(s).map_err(anyhow::Error::from))\n                {\n                    Ok(x) => x,\n                    Err(e) => {\n                        error!(\"failed to parse config: {}\", e);\n                        RawConfig::default()\n                    }\n                }\n            }\n            Err(ref e) if e.kind() == io::ErrorKind::NotFound => {\n                info!(\"{} not found, using defaults\", path.anonymize().display());\n                RawConfig::default()\n            }\n            Err(e) => {\n                error!(\n                    \"failed to read config: {}: {}\",\n                    path.anonymize().display(),\n                    e\n                );\n                RawConfig::default()\n            }\n        };\n        let mut data_dirs = Vec::new();\n        if let Some(dir) = data_dir {\n            data_dirs.push(dir);\n        }\n        data_dirs.push(dirs.data_dir().into());\n        if let Ok(path) = env::current_exe()\n            && let Some(dir) = path.parent()\n        {\n            data_dirs.push(dir.into());\n        }\n        #[cfg(feature = \"use-repo-assets\")]\n        {\n            data_dirs.push(\n                Path::new(env!(\"CARGO_MANIFEST_DIR\"))\n                    .parent()\n                    .unwrap()\n                    .join(\"assets\"),\n            );\n        }\n        // Massage into final form\n        Config {\n            name: name.unwrap_or(\"player\".into()),\n            data_dirs,\n            save: save.unwrap_or(\"default.save\".into()),\n            chunk_load_parallelism: chunk_load_parallelism.unwrap_or(256),\n            server,\n            local_simulation: SimConfig::from_raw(&local_simulation),\n        }\n    }\n\n    pub fn find_asset(&self, path: &Path) -> Option<PathBuf> {\n        for dir in &self.data_dirs {\n            let full_path = dir.join(path);\n            if full_path.exists() {\n                debug!(path = ?path.anonymize().display(), dir = ?dir.anonymize().display(), \"found asset\");\n                return Some(full_path);\n            }\n        }\n        None\n    }\n}\n\n/// Data as parsed directly out of the config file\n#[derive(Deserialize, Default)]\n#[serde(deny_unknown_fields)]\nstruct RawConfig {\n    name: Option<Arc<str>>,\n    data_dir: Option<PathBuf>,\n    save: Option<PathBuf>,\n    chunk_load_parallelism: Option<u32>,\n    server: Option<SocketAddr>,\n    #[serde(default)]\n    local_simulation: SimConfigRaw,\n}\n"
  },
  {
    "path": "client/src/graphics/base.rs",
    "content": "//! Common state shared throughout the graphics system\n\nuse ash::ext::debug_utils;\nuse common::Anonymize;\nuse std::ffi::{CStr, c_char};\nuse std::path::PathBuf;\nuse std::sync::Arc;\nuse std::{fs, io};\nuse tracing::{error, info, trace, warn};\n\nuse ash::{Device, vk};\n\nuse super::Core;\n\n/// Vulkan resources shared between many parts of the renderer\npub struct Base {\n    pub core: Arc<Core>,\n    /// The physical device (i.e. GPU) we're rendering with\n    pub physical: vk::PhysicalDevice,\n    /// The logical device, containing functions used for rendering\n    pub device: Arc<Device>,\n    /// The queue family we're rendering in\n    pub queue_family: u32,\n    /// The queue used for graphics and presentation\n    pub queue: vk::Queue,\n    /// Information about the types of device-visible memory that can be allocated\n    pub memory_properties: vk::PhysicalDeviceMemoryProperties,\n    /// Cache used to speed up graphics pipeline construction\n    pub pipeline_cache: vk::PipelineCache,\n    /// Context in which the main rendering work occurs\n    pub render_pass: vk::RenderPass,\n    /// A reasonable general-purpose texture sampler\n    pub linear_sampler: vk::Sampler,\n    /// Layout of common shader resources, such as the common uniform buffer\n    pub common_layout: vk::DescriptorSetLayout,\n    pub limits: vk::PhysicalDeviceLimits,\n    pub timestamp_bits: u32,\n    pipeline_cache_path: Option<PathBuf>,\n    debug_utils: Option<debug_utils::Device>,\n}\n\nunsafe impl Send for Base {}\nunsafe impl Sync for Base {}\n\nimpl Drop for Base {\n    fn drop(&mut self) {\n        unsafe {\n            self.device\n                .destroy_pipeline_cache(self.pipeline_cache, None);\n            self.device.destroy_render_pass(self.render_pass, None);\n            self.device.destroy_sampler(self.linear_sampler, None);\n            self.device\n                .destroy_descriptor_set_layout(self.common_layout, None);\n            self.device.destroy_device(None);\n        }\n    }\n}\n\nimpl Base {\n    pub fn new(\n        core: Arc<Core>,\n        pipeline_cache_path: Option<PathBuf>,\n        device_exts: &[&CStr],\n        mut device_filter: impl FnMut(vk::PhysicalDevice, u32) -> bool,\n    ) -> Option<Self> {\n        let pipeline_cache_data = if let Some(ref path) = pipeline_cache_path {\n            match fs::read(path) {\n                Ok(x) => x,\n                Err(e) => {\n                    if e.kind() == io::ErrorKind::NotFound {\n                        info!(\"creating fresh pipeline cache\");\n                    } else {\n                        warn!(path=%path.anonymize().display(), \"failed to load pipeline cache: {}\", e);\n                    }\n                    Vec::new()\n                }\n            }\n        } else {\n            Vec::new()\n        };\n        unsafe {\n            let instance = &core.instance;\n            // Select a physical device and queue family to use for rendering\n            let (physical, queue_family_index, queue_family_properties) = instance\n                .enumerate_physical_devices()\n                .unwrap()\n                .into_iter()\n                .find_map(|physical| {\n                    instance\n                        .get_physical_device_queue_family_properties(physical)\n                        .into_iter()\n                        .enumerate()\n                        .filter_map(|(queue_family_index, info)| {\n                            let supports_graphic_and_surface =\n                                info.queue_flags.contains(vk::QueueFlags::GRAPHICS)\n                                    && device_filter(physical, queue_family_index as u32);\n                            if supports_graphic_and_surface {\n                                Some((physical, queue_family_index as u32, info))\n                            } else {\n                                None\n                            }\n                        })\n                        .next()\n                })?;\n            let mut subgroup_properties = vk::PhysicalDeviceSubgroupProperties::default();\n            let mut physical_properties = vk::PhysicalDeviceProperties2 {\n                p_next: &mut subgroup_properties as *mut _ as *mut _,\n                ..Default::default()\n            };\n            instance.get_physical_device_properties2(physical, &mut physical_properties);\n            let name = std::str::from_utf8(\n                &*(&physical_properties.properties.device_name[..physical_properties\n                    .properties\n                    .device_name\n                    .iter()\n                    .position(|&x| x == 0)\n                    .unwrap()] as *const [c_char] as *const [u8]),\n            )\n            .unwrap();\n            info!(name, \"selected device\");\n\n            if !subgroup_properties\n                .supported_operations\n                .contains(vk::SubgroupFeatureFlags::BALLOT & vk::SubgroupFeatureFlags::ARITHMETIC)\n            {\n                error!(\n                    \"required subgroup operations are unsupported (supported: {:?})\",\n                    subgroup_properties.supported_operations\n                );\n                return None;\n            }\n\n            // Create the logical device and common resources descended from it\n            let device_exts = device_exts.iter().map(|x| x.as_ptr()).collect::<Vec<_>>();\n            let device = Arc::new(\n                instance\n                    .create_device(\n                        physical,\n                        &vk::DeviceCreateInfo::default()\n                            .queue_create_infos(&[vk::DeviceQueueCreateInfo::default()\n                                .queue_family_index(queue_family_index)\n                                .queue_priorities(&[1.0])])\n                            .enabled_extension_names(&device_exts)\n                            .push_next(\n                                &mut vk::PhysicalDeviceVulkan12Features::default()\n                                    .descriptor_binding_partially_bound(true)\n                                    .descriptor_binding_sampled_image_update_after_bind(true),\n                            ),\n                        None,\n                    )\n                    .unwrap(),\n            );\n            let queue = device.get_device_queue(queue_family_index, 0);\n            let memory_properties = instance.get_physical_device_memory_properties(physical);\n            let pipeline_cache = device\n                .create_pipeline_cache(\n                    &vk::PipelineCacheCreateInfo::default().initial_data(&pipeline_cache_data),\n                    None,\n                )\n                .unwrap();\n\n            let render_pass = device\n                .create_render_pass(\n                    &vk::RenderPassCreateInfo::default()\n                        .attachments(&[\n                            vk::AttachmentDescription {\n                                format: COLOR_FORMAT,\n                                samples: vk::SampleCountFlags::TYPE_1,\n                                load_op: vk::AttachmentLoadOp::CLEAR,\n                                store_op: vk::AttachmentStoreOp::STORE,\n                                initial_layout: vk::ImageLayout::UNDEFINED,\n                                final_layout: vk::ImageLayout::PRESENT_SRC_KHR,\n                                ..Default::default()\n                            },\n                            vk::AttachmentDescription {\n                                format: vk::Format::D32_SFLOAT,\n                                samples: vk::SampleCountFlags::TYPE_1,\n                                load_op: vk::AttachmentLoadOp::CLEAR,\n                                store_op: vk::AttachmentStoreOp::DONT_CARE,\n                                initial_layout: vk::ImageLayout::UNDEFINED,\n                                final_layout: vk::ImageLayout::DEPTH_STENCIL_ATTACHMENT_OPTIMAL,\n                                ..Default::default()\n                            },\n                        ])\n                        .subpasses(&[\n                            vk::SubpassDescription::default()\n                                .color_attachments(&[vk::AttachmentReference {\n                                    attachment: 0,\n                                    layout: vk::ImageLayout::COLOR_ATTACHMENT_OPTIMAL,\n                                }])\n                                .depth_stencil_attachment(&vk::AttachmentReference {\n                                    attachment: 1,\n                                    layout: vk::ImageLayout::DEPTH_STENCIL_ATTACHMENT_OPTIMAL,\n                                })\n                                .pipeline_bind_point(vk::PipelineBindPoint::GRAPHICS),\n                            vk::SubpassDescription::default()\n                                .color_attachments(&[vk::AttachmentReference {\n                                    attachment: 0,\n                                    layout: vk::ImageLayout::COLOR_ATTACHMENT_OPTIMAL,\n                                }])\n                                .input_attachments(&[vk::AttachmentReference {\n                                    attachment: 1,\n                                    layout: vk::ImageLayout::DEPTH_STENCIL_READ_ONLY_OPTIMAL,\n                                }])\n                                .pipeline_bind_point(vk::PipelineBindPoint::GRAPHICS),\n                        ])\n                        .dependencies(&[\n                            vk::SubpassDependency {\n                                src_subpass: vk::SUBPASS_EXTERNAL,\n                                dst_subpass: 0,\n                                src_stage_mask: vk::PipelineStageFlags::COLOR_ATTACHMENT_OUTPUT\n                                    | vk::PipelineStageFlags::LATE_FRAGMENT_TESTS,\n                                dst_stage_mask: vk::PipelineStageFlags::COLOR_ATTACHMENT_OUTPUT\n                                    | vk::PipelineStageFlags::EARLY_FRAGMENT_TESTS,\n                                dst_access_mask: vk::AccessFlags::COLOR_ATTACHMENT_READ\n                                    | vk::AccessFlags::COLOR_ATTACHMENT_WRITE\n                                    | vk::AccessFlags::DEPTH_STENCIL_ATTACHMENT_READ\n                                    | vk::AccessFlags::DEPTH_STENCIL_ATTACHMENT_WRITE,\n                                ..Default::default()\n                            },\n                            vk::SubpassDependency {\n                                src_subpass: 0,\n                                dst_subpass: 1,\n                                src_stage_mask: vk::PipelineStageFlags::EARLY_FRAGMENT_TESTS\n                                    | vk::PipelineStageFlags::LATE_FRAGMENT_TESTS, // depth write\n                                dst_stage_mask: vk::PipelineStageFlags::FRAGMENT_SHADER, // subpass input\n\n                                src_access_mask: vk::AccessFlags::DEPTH_STENCIL_ATTACHMENT_WRITE,\n                                dst_access_mask: vk::AccessFlags::INPUT_ATTACHMENT_READ,\n                                dependency_flags: vk::DependencyFlags::BY_REGION,\n                            },\n                        ]),\n                    None,\n                )\n                .unwrap();\n\n            let linear_sampler = device\n                .create_sampler(\n                    &vk::SamplerCreateInfo::default()\n                        .min_filter(vk::Filter::LINEAR)\n                        .mag_filter(vk::Filter::LINEAR)\n                        .mipmap_mode(vk::SamplerMipmapMode::NEAREST)\n                        .address_mode_u(vk::SamplerAddressMode::CLAMP_TO_EDGE)\n                        .address_mode_v(vk::SamplerAddressMode::CLAMP_TO_EDGE)\n                        .address_mode_w(vk::SamplerAddressMode::CLAMP_TO_EDGE),\n                    None,\n                )\n                .unwrap();\n\n            let common_layout = device\n                .create_descriptor_set_layout(\n                    &vk::DescriptorSetLayoutCreateInfo::default().bindings(&[\n                        // Uniforms\n                        vk::DescriptorSetLayoutBinding {\n                            binding: 0,\n                            descriptor_type: vk::DescriptorType::UNIFORM_BUFFER,\n                            descriptor_count: 1,\n                            stage_flags: vk::ShaderStageFlags::VERTEX\n                                | vk::ShaderStageFlags::FRAGMENT,\n                            ..Default::default()\n                        },\n                        // Depth buffer\n                        vk::DescriptorSetLayoutBinding {\n                            binding: 1,\n                            descriptor_type: vk::DescriptorType::INPUT_ATTACHMENT,\n                            descriptor_count: 1,\n                            stage_flags: vk::ShaderStageFlags::FRAGMENT,\n                            ..Default::default()\n                        },\n                    ]),\n                    None,\n                )\n                .unwrap();\n            let debug_utils = core\n                .debug_utils\n                .as_ref()\n                .map(|_| debug_utils::Device::new(&core.instance, &device));\n\n            Some(Self {\n                core,\n                physical,\n                device,\n                queue_family: queue_family_index,\n                queue,\n                memory_properties,\n                pipeline_cache,\n                render_pass,\n                linear_sampler,\n                common_layout,\n                pipeline_cache_path,\n                limits: physical_properties.properties.limits,\n                timestamp_bits: queue_family_properties.timestamp_valid_bits,\n                debug_utils,\n            })\n        }\n    }\n\n    pub fn save_pipeline_cache(&self) {\n        let path = match self.pipeline_cache_path {\n            Some(ref x) => x,\n            None => return,\n        };\n        let data = unsafe {\n            self.device\n                .get_pipeline_cache_data(self.pipeline_cache)\n                .unwrap()\n        };\n        match fs::create_dir_all(path.parent().unwrap()).and_then(|()| fs::write(path, &data)) {\n            Ok(()) => {\n                trace!(len = data.len(), \"wrote pipeline cache\");\n            }\n            Err(e) => {\n                warn!(path=%path.anonymize().display(), \"failed to save pipeline cache: {}\", e);\n            }\n        }\n    }\n\n    /// Set an object's name for use in diagnostics\n    pub unsafe fn set_name<T: vk::Handle>(&self, object: T, name: &CStr) {\n        unsafe {\n            let Some(ref ex) = self.debug_utils else {\n                return;\n            };\n            ex.set_debug_utils_object_name(\n                &vk::DebugUtilsObjectNameInfoEXT::default()\n                    .object_handle(object)\n                    .object_name(name),\n            )\n            .unwrap();\n        }\n    }\n\n    /// Convenience constructor for tests and benchmarks\n    pub fn headless() -> Self {\n        let core = Core::new(&[]);\n        Self::new(Arc::new(core), None, &[], |_, _| true).unwrap()\n    }\n}\n\n/// The pixel format we render in\npub const COLOR_FORMAT: vk::Format = vk::Format::B8G8R8A8_SRGB;\n"
  },
  {
    "path": "client/src/graphics/core.rs",
    "content": "use std::ffi::CStr;\nuse std::os::raw::c_char;\nuse std::os::raw::c_void;\nuse std::ptr;\nuse std::slice;\n\nuse ash::ext::debug_utils;\nuse ash::{Entry, Instance, vk};\nuse tracing::{debug, error, info, trace, warn};\n\nuse common::defer;\n\n/// The most fundamental components of a Vulkan setup\npub struct Core {\n    /// Handle to the Vulkan dynamic library itself, used to bootstrap\n    pub entry: Entry,\n    /// The Vulkan instance, containing fundamental device-independent functions\n    pub instance: Instance,\n\n    /// Diagnostic infrastructure, configured if the environment supports them. Typically present\n    /// when the Vulkan validation layers are enabled or a graphics debugger is in use and absent\n    /// otherwise.\n    pub debug_utils: Option<debug_utils::Instance>,\n    messenger: vk::DebugUtilsMessengerEXT,\n}\n\nimpl Drop for Core {\n    fn drop(&mut self) {\n        unsafe {\n            if let Some(ref utils) = self.debug_utils {\n                utils.destroy_debug_utils_messenger(self.messenger, None);\n            }\n            self.instance.destroy_instance(None);\n        }\n    }\n}\n\nimpl Core {\n    pub fn new(exts: &[*const c_char]) -> Self {\n        unsafe {\n            let entry = Entry::load().unwrap();\n\n            let supported_exts = entry.enumerate_instance_extension_properties(None).unwrap();\n            let has_debug = supported_exts\n                .iter()\n                .any(|x| CStr::from_ptr(x.extension_name.as_ptr()) == debug_utils::NAME);\n\n            let mut exts = exts.to_vec();\n            if has_debug {\n                exts.push(debug_utils::NAME.as_ptr());\n            } else {\n                info!(\"vulkan debugging unavailable\");\n            }\n\n            let instance_layers = entry.enumerate_instance_layer_properties().unwrap();\n            tracing::info!(\n                \"Vulkan instance layers: {:?}\",\n                instance_layers\n                    .iter()\n                    .map(|layer| CStr::from_ptr(layer.layer_name.as_ptr()).to_str().unwrap())\n                    .collect::<Vec<_>>()\n            );\n\n            let name = cstr!(\"hypermine\");\n\n            let app_info = vk::ApplicationInfo::default()\n                .application_name(name)\n                .application_version(0)\n                .engine_name(name)\n                .engine_version(0)\n                .api_version(vk::make_api_version(0, 1, 2, 0));\n            let mut instance_info = vk::InstanceCreateInfo::default()\n                .application_info(&app_info)\n                .enabled_extension_names(&exts);\n\n            let mut debug_utils_messenger_info = vk::DebugUtilsMessengerCreateInfoEXT::default()\n                .message_severity(\n                    vk::DebugUtilsMessageSeverityFlagsEXT::ERROR\n                        | vk::DebugUtilsMessageSeverityFlagsEXT::WARNING\n                        | vk::DebugUtilsMessageSeverityFlagsEXT::INFO\n                        | vk::DebugUtilsMessageSeverityFlagsEXT::VERBOSE,\n                )\n                .message_type(\n                    vk::DebugUtilsMessageTypeFlagsEXT::GENERAL\n                        | vk::DebugUtilsMessageTypeFlagsEXT::VALIDATION\n                        | vk::DebugUtilsMessageTypeFlagsEXT::PERFORMANCE,\n                )\n                .pfn_user_callback(Some(messenger_callback))\n                .user_data(ptr::null_mut());\n            if has_debug {\n                instance_info = instance_info.push_next(&mut debug_utils_messenger_info);\n            }\n\n            let instance = entry.create_instance(&instance_info, None).unwrap();\n            // Guards ensure we clean up gracefully if something panics\n            let instance_guard = defer(|| instance.destroy_instance(None));\n            let debug_utils;\n            let messenger;\n            if has_debug {\n                // Configure Vulkan diagnostic message logging\n                let utils = debug_utils::Instance::new(&entry, &instance);\n                messenger = utils\n                    .create_debug_utils_messenger(&debug_utils_messenger_info, None)\n                    .unwrap();\n                debug_utils = Some(utils);\n            } else {\n                debug_utils = None;\n                messenger = vk::DebugUtilsMessengerEXT::null();\n            }\n\n            // Setup successful, don't destroy things.\n            instance_guard.cancel();\n            Self {\n                entry,\n                instance,\n                debug_utils,\n                messenger,\n            }\n        }\n    }\n}\n\n/// Callback invoked by Vulkan for diagnostic messages\n///\n/// We forward these to our `tracing` logging infrastructure.\nunsafe extern \"system\" fn messenger_callback(\n    message_severity: vk::DebugUtilsMessageSeverityFlagsEXT,\n    _message_types: vk::DebugUtilsMessageTypeFlagsEXT,\n    p_data: *const vk::DebugUtilsMessengerCallbackDataEXT,\n    _p_user_data: *mut c_void,\n) -> vk::Bool32 {\n    unsafe {\n        unsafe fn fmt_labels(ptr: *const vk::DebugUtilsLabelEXT, count: u32) -> String {\n            unsafe {\n                if count == 0 {\n                    // We need to handle a count of 0 separately because ptr may be\n                    // null, resulting in undefined behavior if used with\n                    // slice::from_raw_parts.\n                    return String::new();\n                }\n                slice::from_raw_parts(ptr, count as usize)\n                    .iter()\n                    .map(|label| {\n                        CStr::from_ptr(label.p_label_name)\n                            .to_string_lossy()\n                            .into_owned()\n                    })\n                    .collect::<Vec<_>>()\n                    .join(\", \")\n            }\n        }\n\n        let data = &*p_data;\n        let msg_id = if data.p_message_id_name.is_null() {\n            \"\".into()\n        } else {\n            CStr::from_ptr(data.p_message_id_name).to_string_lossy()\n        };\n        let msg = CStr::from_ptr(data.p_message).to_string_lossy();\n        let queue_labels = fmt_labels(data.p_queue_labels, data.queue_label_count);\n        let cmd_labels = fmt_labels(data.p_cmd_buf_labels, data.cmd_buf_label_count);\n        let objects = slice::from_raw_parts(data.p_objects, data.object_count as usize)\n            .iter()\n            .map(|obj| {\n                if obj.p_object_name.is_null() {\n                    format!(\"{:?} {:x}\", obj.object_type, obj.object_handle)\n                } else {\n                    format!(\"{:?} {:x} {}\", obj.object_type, obj.object_handle, msg_id)\n                }\n            })\n            .collect::<Vec<_>>()\n            .join(\", \");\n        if message_severity >= vk::DebugUtilsMessageSeverityFlagsEXT::ERROR {\n            error!(target: \"vulkan\", id = %msg_id, number = data.message_id_number, queue_labels = %queue_labels, cmd_labels = %cmd_labels, objects = %objects, \"{}\", msg);\n        } else if message_severity >= vk::DebugUtilsMessageSeverityFlagsEXT::WARNING {\n            warn! (target: \"vulkan\", id = %msg_id, number = data.message_id_number, queue_labels = %queue_labels, cmd_labels = %cmd_labels, objects = %objects, \"{}\", msg);\n        } else if message_severity >= vk::DebugUtilsMessageSeverityFlagsEXT::INFO {\n            debug!(target: \"vulkan\", id = %msg_id, number = data.message_id_number, queue_labels = %queue_labels, cmd_labels = %cmd_labels, objects = %objects, \"{}\", msg);\n        } else {\n            trace!(target: \"vulkan\", id = %msg_id, number = data.message_id_number, queue_labels = %queue_labels, cmd_labels = %cmd_labels, objects = %objects, \"{}\", msg);\n        }\n        vk::FALSE\n    }\n}\n"
  },
  {
    "path": "client/src/graphics/draw.rs",
    "content": "use std::sync::Arc;\nuse std::time::Instant;\n\nuse ash::vk;\nuse common::traversal;\nuse lahar::Staged;\nuse metrics::histogram;\n\nuse super::{Base, Fog, Frustum, GltfScene, Meshes, Voxels, fog, voxels};\nuse crate::{Asset, Config, Loader, Sim};\nuse common::SimConfig;\nuse common::proto::{Character, Position};\n\n/// Manages rendering, independent of what is being rendered to\npub struct Draw {\n    gfx: Arc<Base>,\n    cfg: Arc<Config>,\n    /// Used to allocate the command buffers we render with\n    cmd_pool: vk::CommandPool,\n    /// Allows accurate frame timing information to be recorded\n    timestamp_pool: vk::QueryPool,\n    /// State that varies per frame in flight\n    states: Vec<State>,\n    /// The index of the next element of `states` to use\n    next_state: usize,\n    /// A reference time\n    epoch: Instant,\n    /// The lowest common denominator between the interfaces of our graphics pipelines\n    ///\n    /// Represents e.g. the binding for common uniforms\n    common_pipeline_layout: vk::PipelineLayout,\n    /// Descriptor pool from which descriptor sets shared between many pipelines are allocated\n    common_descriptor_pool: vk::DescriptorPool,\n\n    /// Drives async asset loading\n    loader: Loader,\n\n    //\n    // Rendering pipelines\n    //\n    /// Populated after connect, once the voxel configuration is known\n    voxels: Option<Voxels>,\n    meshes: Meshes,\n    fog: Fog,\n\n    /// Reusable storage for barriers that prevent races between image upload and read\n    image_barriers: Vec<vk::ImageMemoryBarrier<'static>>,\n    /// Reusable storage for barriers that prevent races between buffer upload and read\n    buffer_barriers: Vec<vk::BufferMemoryBarrier<'static>>,\n\n    /// Yakui Vulkan context\n    yakui_vulkan: yakui_vulkan::YakuiVulkan,\n\n    /// Miscellany\n    character_model: Asset<GltfScene>,\n}\n\n/// Maximum number of simultaneous frames in flight\nconst PIPELINE_DEPTH: u32 = 2;\nconst TIMESTAMPS_PER_FRAME: u32 = 3;\n\nimpl Draw {\n    pub fn new(gfx: Arc<Base>, cfg: Arc<Config>) -> Self {\n        let device = &*gfx.device;\n        unsafe {\n            // Allocate a command buffer for each frame state\n            let cmd_pool = device\n                .create_command_pool(\n                    &vk::CommandPoolCreateInfo::default()\n                        .queue_family_index(gfx.queue_family)\n                        .flags(\n                            vk::CommandPoolCreateFlags::RESET_COMMAND_BUFFER\n                                | vk::CommandPoolCreateFlags::TRANSIENT,\n                        ),\n                    None,\n                )\n                .unwrap();\n            let cmds = device\n                .allocate_command_buffers(\n                    &vk::CommandBufferAllocateInfo::default()\n                        .command_pool(cmd_pool)\n                        .command_buffer_count(2 * PIPELINE_DEPTH),\n                )\n                .unwrap();\n\n            let timestamp_pool = device\n                .create_query_pool(\n                    &vk::QueryPoolCreateInfo::default()\n                        .query_type(vk::QueryType::TIMESTAMP)\n                        .query_count(TIMESTAMPS_PER_FRAME * PIPELINE_DEPTH),\n                    None,\n                )\n                .unwrap();\n            gfx.set_name(timestamp_pool, cstr!(\"timestamp pool\"));\n\n            let common_pipeline_layout = device\n                .create_pipeline_layout(\n                    &vk::PipelineLayoutCreateInfo::default().set_layouts(&[gfx.common_layout]),\n                    None,\n                )\n                .unwrap();\n\n            // Allocate descriptor sets for data used by all graphics pipelines (e.g. common\n            // uniforms)\n            let common_descriptor_pool = device\n                .create_descriptor_pool(\n                    &vk::DescriptorPoolCreateInfo::default()\n                        .max_sets(PIPELINE_DEPTH)\n                        .pool_sizes(&[\n                            vk::DescriptorPoolSize {\n                                ty: vk::DescriptorType::UNIFORM_BUFFER,\n                                descriptor_count: PIPELINE_DEPTH,\n                            },\n                            vk::DescriptorPoolSize {\n                                ty: vk::DescriptorType::INPUT_ATTACHMENT,\n                                descriptor_count: PIPELINE_DEPTH,\n                            },\n                        ]),\n                    None,\n                )\n                .unwrap();\n            let common_ds = device\n                .allocate_descriptor_sets(\n                    &vk::DescriptorSetAllocateInfo::default()\n                        .descriptor_pool(common_descriptor_pool)\n                        .set_layouts(&vec![gfx.common_layout; PIPELINE_DEPTH as usize]),\n                )\n                .unwrap();\n\n            let mut loader = Loader::new(cfg.clone(), gfx.clone());\n\n            // Construct the per-frame states\n            let states = cmds\n                .chunks(2)\n                .zip(common_ds)\n                .map(|(cmds, common_ds)| {\n                    let uniforms = Staged::new(\n                        device,\n                        &gfx.memory_properties,\n                        vk::BufferUsageFlags::UNIFORM_BUFFER,\n                    );\n                    device.update_descriptor_sets(\n                        &[vk::WriteDescriptorSet::default()\n                            .dst_set(common_ds)\n                            .dst_binding(0)\n                            .descriptor_type(vk::DescriptorType::UNIFORM_BUFFER)\n                            .buffer_info(&[vk::DescriptorBufferInfo {\n                                buffer: uniforms.buffer(),\n                                offset: 0,\n                                range: vk::WHOLE_SIZE,\n                            }])],\n                        &[],\n                    );\n                    let x = State {\n                        cmd: cmds[0],\n                        post_cmd: cmds[1],\n                        common_ds,\n                        image_acquired: device.create_semaphore(&Default::default(), None).unwrap(),\n                        fence: device\n                            .create_fence(\n                                &vk::FenceCreateInfo::default()\n                                    .flags(vk::FenceCreateFlags::SIGNALED),\n                                None,\n                            )\n                            .unwrap(),\n                        uniforms,\n                        used: false,\n                        in_flight: false,\n\n                        voxels: None,\n                    };\n                    gfx.set_name(x.cmd, cstr!(\"frame\"));\n                    gfx.set_name(x.post_cmd, cstr!(\"post-frame\"));\n                    gfx.set_name(x.image_acquired, cstr!(\"image acquired\"));\n                    gfx.set_name(x.fence, cstr!(\"render complete\"));\n                    gfx.set_name(x.uniforms.buffer(), cstr!(\"uniforms\"));\n                    x\n                })\n                .collect();\n\n            let meshes = Meshes::new(&gfx, loader.ctx().mesh_ds_layout);\n\n            let fog = Fog::new(&gfx);\n\n            gfx.save_pipeline_cache();\n\n            let mut yakui_vulkan_options = yakui_vulkan::Options::default();\n            yakui_vulkan_options.render_pass = gfx.render_pass;\n            yakui_vulkan_options.subpass = 1;\n            let mut yakui_vulkan = yakui_vulkan::YakuiVulkan::new(\n                &yakui_vulkan::VulkanContext::new(device, gfx.queue, gfx.memory_properties),\n                yakui_vulkan_options,\n            );\n            for _ in 0..PIPELINE_DEPTH {\n                yakui_vulkan.transfers_submitted();\n            }\n\n            let character_model = loader.load(\n                \"character model\",\n                super::GlbFile {\n                    path: \"character.glb\".into(),\n                },\n            );\n\n            Self {\n                gfx,\n                cfg,\n                cmd_pool,\n                timestamp_pool,\n                states,\n                next_state: 0,\n                epoch: Instant::now(),\n                common_pipeline_layout,\n                common_descriptor_pool,\n\n                loader,\n\n                voxels: None,\n                meshes,\n                fog,\n\n                buffer_barriers: Vec::new(),\n                image_barriers: Vec::new(),\n\n                yakui_vulkan,\n\n                character_model,\n            }\n        }\n    }\n\n    /// Called with server-defined world parameters once they're known\n    pub fn configure(&mut self, cfg: &SimConfig) {\n        let voxels = Voxels::new(\n            &self.gfx,\n            self.cfg.clone(),\n            &mut self.loader,\n            u32::from(cfg.chunk_size),\n            PIPELINE_DEPTH,\n        );\n        for state in &mut self.states {\n            state.voxels = Some(voxels::Frame::new(&self.gfx, &voxels));\n        }\n        self.voxels = Some(voxels);\n    }\n\n    /// Waits for a frame's worth of resources to become available for use in rendering a new frame\n    ///\n    /// Call before signaling the image_acquired semaphore or invoking `draw`.\n    pub unsafe fn wait(&mut self) {\n        unsafe {\n            let device = &*self.gfx.device;\n            let state = &mut self.states[self.next_state];\n            device.wait_for_fences(&[state.fence], true, !0).unwrap();\n            self.yakui_vulkan\n                .transfers_finished(&yakui_vulkan::VulkanContext::new(\n                    device,\n                    self.gfx.queue,\n                    self.gfx.memory_properties,\n                ));\n            state.in_flight = false;\n        }\n    }\n\n    /// Semaphore that must be signaled when an output framebuffer can be rendered to\n    ///\n    /// Don't signal until after `wait`ing; call before `draw`\n    pub fn image_acquired(&self) -> vk::Semaphore {\n        self.states[self.next_state].image_acquired\n    }\n\n    /// Submit commands to the GPU to draw a frame\n    ///\n    /// `framebuffer` must have a color and depth buffer attached and have the dimensions specified\n    /// in `extent`. The `present` semaphore is signaled when rendering is complete and the color\n    /// image can be presented.\n    ///\n    /// Submits commands that wait on `image_acquired` before writing to `framebuffer`'s color\n    /// attachment.\n    #[allow(clippy::too_many_arguments)] // Every argument is of a different type, making this less of a problem.\n    pub unsafe fn draw(\n        &mut self,\n        mut sim: Option<&mut Sim>,\n        yakui_paint_dom: &yakui::paint::PaintDom,\n        framebuffer: vk::Framebuffer,\n        depth_view: vk::ImageView,\n        extent: vk::Extent2D,\n        present: vk::Semaphore,\n        frustum: &Frustum,\n    ) {\n        unsafe {\n            let draw_started = Instant::now();\n            let view = sim.as_ref().map_or_else(Position::origin, |sim| sim.view());\n            let projection = frustum.projection(1.0e-4);\n            let view_projection = projection.matrix() * na::Matrix4::from(view.local.inverse());\n            self.loader.drive();\n\n            let device = &*self.gfx.device;\n            let state_index = self.next_state;\n            let state = &mut self.states[self.next_state];\n            let cmd = state.cmd;\n\n            let yakui_vulkan_context = yakui_vulkan::VulkanContext::new(\n                device,\n                self.gfx.queue,\n                self.gfx.memory_properties,\n            );\n\n            // We're using this state again, so put the fence back in the unsignaled state and compute\n            // the next frame to use\n            device.reset_fences(&[state.fence]).unwrap();\n            self.next_state = (self.next_state + 1) % PIPELINE_DEPTH as usize;\n\n            // Set up framebuffer attachments\n            device.update_descriptor_sets(\n                &[vk::WriteDescriptorSet::default()\n                    .dst_set(state.common_ds)\n                    .dst_binding(1)\n                    .descriptor_type(vk::DescriptorType::INPUT_ATTACHMENT)\n                    .image_info(&[vk::DescriptorImageInfo {\n                        sampler: vk::Sampler::null(),\n                        image_view: depth_view,\n                        image_layout: vk::ImageLayout::DEPTH_STENCIL_READ_ONLY_OPTIMAL,\n                    }])],\n                &[],\n            );\n\n            // Handle completed queries\n            let first_query = state_index as u32 * TIMESTAMPS_PER_FRAME;\n            if state.used {\n                // Collect timestamps from the last time we drew this frame\n                let mut queries = [0u64; TIMESTAMPS_PER_FRAME as usize];\n                // `WAIT` is guaranteed not to block here because `Self::draw` is only called after\n                // `Self::wait` ensures that the prior instance of this frame is complete.\n                device\n                    .get_query_pool_results(\n                        self.timestamp_pool,\n                        first_query,\n                        &mut queries,\n                        vk::QueryResultFlags::TYPE_64 | vk::QueryResultFlags::WAIT,\n                    )\n                    .unwrap();\n                let draw_seconds = self.gfx.limits.timestamp_period as f64\n                    * 1e-9\n                    * (queries[1] - queries[0]) as f64;\n                let after_seconds = self.gfx.limits.timestamp_period as f64\n                    * 1e-9\n                    * (queries[2] - queries[1]) as f64;\n                histogram!(\"frame.gpu.draw\").record(draw_seconds);\n                histogram!(\"frame.gpu.after_draw\").record(after_seconds);\n            }\n\n            device\n                .begin_command_buffer(\n                    cmd,\n                    &vk::CommandBufferBeginInfo::default()\n                        .flags(vk::CommandBufferUsageFlags::ONE_TIME_SUBMIT),\n                )\n                .unwrap();\n            device\n                .begin_command_buffer(\n                    state.post_cmd,\n                    &vk::CommandBufferBeginInfo::default()\n                        .flags(vk::CommandBufferUsageFlags::ONE_TIME_SUBMIT),\n                )\n                .unwrap();\n\n            device.cmd_reset_query_pool(\n                cmd,\n                self.timestamp_pool,\n                first_query,\n                TIMESTAMPS_PER_FRAME,\n            );\n            let mut timestamp_index = first_query;\n            device.cmd_write_timestamp(\n                cmd,\n                vk::PipelineStageFlags::BOTTOM_OF_PIPE,\n                self.timestamp_pool,\n                timestamp_index,\n            );\n            timestamp_index += 1;\n\n            self.yakui_vulkan\n                .transfer(yakui_paint_dom, &yakui_vulkan_context, cmd);\n\n            // Schedule transfer of uniform data. Note that we defer actually preparing the data to just\n            // before submitting the command buffer so time-sensitive values can be set with minimum\n            // latency.\n            state.uniforms.record_transfer(device, cmd);\n            self.buffer_barriers.push(\n                vk::BufferMemoryBarrier::default()\n                    .src_access_mask(vk::AccessFlags::TRANSFER_WRITE)\n                    .dst_access_mask(vk::AccessFlags::UNIFORM_READ)\n                    .buffer(state.uniforms.buffer())\n                    .size(vk::WHOLE_SIZE),\n            );\n\n            let nearby_nodes_started = Instant::now();\n            let nearby_nodes = if let Some(sim) = sim.as_deref() {\n                traversal::nearby_nodes(&sim.graph, &view, self.cfg.local_simulation.view_distance)\n            } else {\n                vec![]\n            };\n            histogram!(\"frame.cpu.nearby_nodes\").record(nearby_nodes_started.elapsed());\n\n            if let (Some(voxels), Some(sim)) = (self.voxels.as_mut(), sim.as_mut()) {\n                voxels.prepare(\n                    device,\n                    state.voxels.as_mut().unwrap(),\n                    sim,\n                    &nearby_nodes,\n                    state.post_cmd,\n                    frustum,\n                );\n            }\n\n            // Ensure reads of just-transferred memory wait until it's ready\n            device.cmd_pipeline_barrier(\n                cmd,\n                vk::PipelineStageFlags::TRANSFER,\n                vk::PipelineStageFlags::VERTEX_SHADER | vk::PipelineStageFlags::FRAGMENT_SHADER,\n                vk::DependencyFlags::default(),\n                &[],\n                &self.buffer_barriers,\n                &self.image_barriers,\n            );\n            self.buffer_barriers.clear();\n            self.image_barriers.clear();\n\n            device.cmd_begin_render_pass(\n                cmd,\n                &vk::RenderPassBeginInfo::default()\n                    .render_pass(self.gfx.render_pass)\n                    .framebuffer(framebuffer)\n                    .render_area(vk::Rect2D {\n                        offset: vk::Offset2D::default(),\n                        extent,\n                    })\n                    .clear_values(&[\n                        vk::ClearValue {\n                            color: vk::ClearColorValue {\n                                float32: [0.0, 0.0, 0.0, 0.0],\n                            },\n                        },\n                        vk::ClearValue {\n                            depth_stencil: vk::ClearDepthStencilValue {\n                                depth: 0.0,\n                                stencil: 0,\n                            },\n                        },\n                    ]),\n                vk::SubpassContents::INLINE,\n            );\n\n            // Set up common dynamic state\n            let viewports = [vk::Viewport {\n                x: 0.0,\n                y: 0.0,\n                width: extent.width as f32,\n                height: extent.height as f32,\n                min_depth: 0.0,\n                max_depth: 1.0,\n            }];\n            let scissors = [vk::Rect2D {\n                offset: vk::Offset2D { x: 0, y: 0 },\n                extent: vk::Extent2D {\n                    width: extent.width,\n                    height: extent.height,\n                },\n            }];\n            device.cmd_set_viewport(cmd, 0, &viewports);\n            device.cmd_set_scissor(cmd, 0, &scissors);\n\n            // Record the actual rendering commands\n            if let Some(ref mut voxels) = self.voxels {\n                voxels.draw(\n                    device,\n                    &self.loader,\n                    state.common_ds,\n                    state.voxels.as_ref().unwrap(),\n                    cmd,\n                );\n            }\n\n            if let Some(sim) = sim.as_deref() {\n                for (node, transform) in nearby_nodes {\n                    for &entity in sim.graph_entities.get(node) {\n                        if sim.local_character == Some(entity) {\n                            // Don't draw ourself\n                            continue;\n                        }\n                        let pos = sim\n                            .world\n                            .get::<&Position>(entity)\n                            .expect(\"positionless entity in graph\");\n                        if let Some(character_model) = self.loader.get(self.character_model)\n                            && let Ok(ch) = sim.world.get::<&Character>(entity)\n                        {\n                            let transform = na::Matrix4::from(transform * pos.local)\n                                * na::Matrix4::new_scaling(sim.cfg().meters_to_absolute)\n                                * ch.state.orientation.to_homogeneous();\n                            for mesh in &character_model.0 {\n                                self.meshes\n                                    .draw(device, state.common_ds, cmd, mesh, &transform);\n                            }\n                        }\n                    }\n                }\n            }\n\n            device.cmd_next_subpass(cmd, vk::SubpassContents::INLINE);\n\n            self.fog.draw(device, state.common_ds, cmd);\n\n            self.yakui_vulkan\n                .paint(yakui_paint_dom, &yakui_vulkan_context, cmd, extent);\n\n            // Finish up\n            device.cmd_end_render_pass(cmd);\n            device.cmd_write_timestamp(\n                cmd,\n                vk::PipelineStageFlags::BOTTOM_OF_PIPE,\n                self.timestamp_pool,\n                timestamp_index,\n            );\n            timestamp_index += 1;\n            device.end_command_buffer(cmd).unwrap();\n\n            device.cmd_write_timestamp(\n                state.post_cmd,\n                vk::PipelineStageFlags::BOTTOM_OF_PIPE,\n                self.timestamp_pool,\n                timestamp_index,\n            );\n            device.end_command_buffer(state.post_cmd).unwrap();\n\n            // Specify the uniform data before actually submitting the command to transfer it\n            state.uniforms.write(Uniforms {\n                view_projection,\n                inverse_projection: *projection.inverse().matrix(),\n                fog_density: fog::density(self.cfg.local_simulation.fog_distance, 1e-3, 5.0),\n                time: self.epoch.elapsed().as_secs_f32().fract(),\n            });\n\n            // Submit the commands to the GPU\n            device\n                .queue_submit(\n                    self.gfx.queue,\n                    &[\n                        vk::SubmitInfo::default()\n                            .command_buffers(&[cmd])\n                            .wait_semaphores(&[state.image_acquired])\n                            .wait_dst_stage_mask(&[vk::PipelineStageFlags::COLOR_ATTACHMENT_OUTPUT])\n                            .signal_semaphores(&[present]),\n                        vk::SubmitInfo::default().command_buffers(&[state.post_cmd]),\n                    ],\n                    state.fence,\n                )\n                .unwrap();\n            self.yakui_vulkan.transfers_submitted();\n            state.used = true;\n            state.in_flight = true;\n            histogram!(\"frame.cpu\").record(draw_started.elapsed());\n        }\n    }\n\n    /// Wait for all drawing to complete\n    ///\n    /// Useful to e.g. ensure it's safe to deallocate an image that's being rendered to\n    pub fn wait_idle(&self) {\n        let device = &*self.gfx.device;\n        for state in &self.states {\n            unsafe {\n                device.wait_for_fences(&[state.fence], true, !0).unwrap();\n            }\n        }\n    }\n}\n\nimpl Drop for Draw {\n    fn drop(&mut self) {\n        let device = &*self.gfx.device;\n        unsafe {\n            for state in &mut self.states {\n                if state.in_flight {\n                    device.wait_for_fences(&[state.fence], true, !0).unwrap();\n                    state.in_flight = false;\n                }\n                device.destroy_semaphore(state.image_acquired, None);\n                device.destroy_fence(state.fence, None);\n                state.uniforms.destroy(device);\n                if let Some(mut voxels) = state.voxels.take() {\n                    voxels.destroy(device);\n                }\n            }\n            self.yakui_vulkan.cleanup(&self.gfx.device);\n            device.destroy_command_pool(self.cmd_pool, None);\n            device.destroy_query_pool(self.timestamp_pool, None);\n            device.destroy_descriptor_pool(self.common_descriptor_pool, None);\n            device.destroy_pipeline_layout(self.common_pipeline_layout, None);\n            self.fog.destroy(device);\n            self.meshes.destroy(device);\n            if let Some(mut voxels) = self.voxels.take() {\n                voxels.destroy(device);\n            }\n        }\n    }\n}\n\nstruct State {\n    /// Semaphore signaled by someone else to indicate that output to the framebuffer can begin\n    image_acquired: vk::Semaphore,\n    /// Fence signaled when this state is no longer in use\n    fence: vk::Fence,\n    /// Command buffer we record the frame's rendering onto\n    cmd: vk::CommandBuffer,\n    /// Work performed after rendering, overlapping with the next frame's CPU work\n    post_cmd: vk::CommandBuffer,\n    /// Descriptor set for graphics-pipeline-independent data\n    common_ds: vk::DescriptorSet,\n    /// The common uniform buffer\n    uniforms: Staged<Uniforms>,\n    /// Whether this state has been previously used\n    ///\n    /// Indicates that e.g. valid timestamps are associated with this query\n    used: bool,\n    /// Whether this state is currently being accessed by the GPU\n    ///\n    /// True for the period between `cmd` being submitted and `fence` being waited.\n    in_flight: bool,\n\n    // Per-pipeline states\n    voxels: Option<voxels::Frame>,\n}\n\n/// Data stored in the common uniform buffer\n///\n/// Alignment and padding must be manually managed to match the std140 ABI as expected by the\n/// shaders.\n#[repr(C)]\n#[derive(Copy, Clone)]\nstruct Uniforms {\n    /// Camera projection matrix\n    view_projection: na::Matrix4<f32>,\n    inverse_projection: na::Matrix4<f32>,\n    fog_density: f32,\n    /// Cycles through [0,1) once per second for simple animation effects\n    time: f32,\n}\n"
  },
  {
    "path": "client/src/graphics/fog.rs",
    "content": "use ash::{Device, vk};\nuse vk_shader_macros::include_glsl;\n\nuse super::Base;\nuse common::defer;\n\nconst VERT: &[u32] = include_glsl!(\"shaders/fullscreen.vert\");\nconst FRAG: &[u32] = include_glsl!(\"shaders/fog.frag\");\n\npub struct Fog {\n    pipeline_layout: vk::PipelineLayout,\n    pipeline: vk::Pipeline,\n}\n\nimpl Fog {\n    pub fn new(gfx: &Base) -> Self {\n        let device = &*gfx.device;\n        unsafe {\n            // Construct the shader modules\n            let vert = device\n                .create_shader_module(&vk::ShaderModuleCreateInfo::default().code(VERT), None)\n                .unwrap();\n            // Note that these only need to live until the pipeline itself is constructed\n            let v_guard = defer(|| device.destroy_shader_module(vert, None));\n\n            let frag = device\n                .create_shader_module(&vk::ShaderModuleCreateInfo::default().code(FRAG), None)\n                .unwrap();\n            let f_guard = defer(|| device.destroy_shader_module(frag, None));\n\n            // Define the outward-facing interface of the shaders, incl. uniforms, samplers, etc.\n            let pipeline_layout = device\n                .create_pipeline_layout(\n                    &vk::PipelineLayoutCreateInfo::default().set_layouts(&[gfx.common_layout]),\n                    None,\n                )\n                .unwrap();\n\n            let entry_point = cstr!(\"main\").as_ptr();\n            let mut pipelines = device\n                .create_graphics_pipelines(\n                    gfx.pipeline_cache,\n                    &[vk::GraphicsPipelineCreateInfo::default()\n                        .stages(&[\n                            vk::PipelineShaderStageCreateInfo {\n                                stage: vk::ShaderStageFlags::VERTEX,\n                                module: vert,\n                                p_name: entry_point,\n                                ..Default::default()\n                            },\n                            vk::PipelineShaderStageCreateInfo {\n                                stage: vk::ShaderStageFlags::FRAGMENT,\n                                module: frag,\n                                p_name: entry_point,\n                                ..Default::default()\n                            },\n                        ])\n                        .vertex_input_state(&vk::PipelineVertexInputStateCreateInfo::default())\n                        .input_assembly_state(\n                            &vk::PipelineInputAssemblyStateCreateInfo::default()\n                                .topology(vk::PrimitiveTopology::TRIANGLE_LIST),\n                        )\n                        .viewport_state(\n                            &vk::PipelineViewportStateCreateInfo::default()\n                                .scissor_count(1)\n                                .viewport_count(1),\n                        )\n                        .rasterization_state(\n                            &vk::PipelineRasterizationStateCreateInfo::default()\n                                .cull_mode(vk::CullModeFlags::NONE)\n                                .polygon_mode(vk::PolygonMode::FILL)\n                                .line_width(1.0),\n                        )\n                        .multisample_state(\n                            &vk::PipelineMultisampleStateCreateInfo::default()\n                                .rasterization_samples(vk::SampleCountFlags::TYPE_1),\n                        )\n                        .depth_stencil_state(\n                            &vk::PipelineDepthStencilStateCreateInfo::default()\n                                .depth_test_enable(false)\n                                .depth_write_enable(false),\n                        )\n                        .color_blend_state(\n                            &vk::PipelineColorBlendStateCreateInfo::default().attachments(&[\n                                vk::PipelineColorBlendAttachmentState {\n                                    blend_enable: vk::TRUE,\n                                    src_color_blend_factor: vk::BlendFactor::ONE_MINUS_SRC_ALPHA,\n                                    dst_color_blend_factor: vk::BlendFactor::SRC_ALPHA,\n                                    color_blend_op: vk::BlendOp::ADD,\n                                    color_write_mask: vk::ColorComponentFlags::R\n                                        | vk::ColorComponentFlags::G\n                                        | vk::ColorComponentFlags::B,\n                                    ..Default::default()\n                                },\n                            ]),\n                        )\n                        .dynamic_state(\n                            &vk::PipelineDynamicStateCreateInfo::default().dynamic_states(&[\n                                vk::DynamicState::VIEWPORT,\n                                vk::DynamicState::SCISSOR,\n                            ]),\n                        )\n                        .layout(pipeline_layout)\n                        .render_pass(gfx.render_pass)\n                        .subpass(1)],\n                    None,\n                )\n                .unwrap()\n                .into_iter();\n\n            let pipeline = pipelines.next().unwrap();\n            gfx.set_name(pipeline, cstr!(\"fog\"));\n\n            // Clean up the shaders explicitly, so the defer guards don't hold onto references we're\n            // moving into `Self` to be returned\n            v_guard.invoke();\n            f_guard.invoke();\n\n            Self {\n                pipeline_layout,\n                pipeline,\n            }\n        }\n    }\n\n    pub unsafe fn draw(\n        &mut self,\n        device: &Device,\n        common_ds: vk::DescriptorSet,\n        cmd: vk::CommandBuffer,\n    ) {\n        unsafe {\n            device.cmd_bind_pipeline(cmd, vk::PipelineBindPoint::GRAPHICS, self.pipeline);\n            device.cmd_bind_descriptor_sets(\n                cmd,\n                vk::PipelineBindPoint::GRAPHICS,\n                self.pipeline_layout,\n                0,\n                &[common_ds],\n                &[],\n            );\n            device.cmd_draw(cmd, 3, 1, 0, 0);\n        }\n    }\n\n    pub unsafe fn destroy(&mut self, device: &Device) {\n        unsafe {\n            device.destroy_pipeline(self.pipeline, None);\n            device.destroy_pipeline_layout(self.pipeline_layout, None);\n        }\n    }\n}\n\n/// Compute the density value that will lead to a certain transmission from points at a certain\n/// distance, for a certain fog exponent\npub fn density(distance: f32, transmission: f32, exponent: f32) -> f32 {\n    transmission.recip().ln().powf(exponent.recip()) / distance\n}\n"
  },
  {
    "path": "client/src/graphics/frustum.rs",
    "content": "use common::math::{MDirection, MPoint};\n\n#[derive(Debug, Copy, Clone)]\npub struct Frustum {\n    pub left: f32,\n    pub right: f32,\n    pub down: f32,\n    pub up: f32,\n}\n\nimpl Frustum {\n    /// Construct a symmetric frustum from a vertical FoV and an aspect ratio (width / height)\n    pub fn from_vfov(vfov: f32, aspect_ratio: f32) -> Self {\n        let hfov = (aspect_ratio * vfov.tan()).atan();\n        Self {\n            left: -hfov,\n            right: hfov,\n            down: -vfov,\n            up: vfov,\n        }\n    }\n\n    #[rustfmt::skip]\n    /// Compute right-handed y-up inverse Z perspective projection matrix with far plane at 1.0\n    ///\n    /// This projection is applied to Beltrami-Klein vertices, which fall within a ball of radius 1\n    /// around the viewpoint, so a far plane of 1.0 gives us ideal distribution of depth precision.\n    pub fn projection(&self, znear: f32) -> na::Projective3<f32> {\n        // Based on http://dev.theomader.com/depth-precision/ (broken link) + OpenVR docs\n        // Additional context at https://developer.nvidia.com/content/depth-precision-visualized\n        let zfar = 1.0;\n        let left = self.left.tan();\n        let right = self.right.tan();\n        let down = self.down.tan();\n        let up = self.up.tan();\n        let idx = 1.0 / (right - left);\n        let idy = 1.0 / (down - up);\n        let sx = right + left;\n        let sy = down + up;\n        // For an infinite far plane instead, za = 0 and zb = znear\n        let za = -znear / (znear - zfar);\n        let zb = -(znear * zfar) / (znear - zfar);\n        na::Projective3::from_matrix_unchecked(\n            na::Matrix4::new(\n                2.0 * idx,       0.0,  sx * idx,       0.0,\n                      0.0, 2.0 * idy,  sy * idy,       0.0,\n                      0.0,       0.0,        za,        zb,\n                      0.0,       0.0,      -1.0,       0.0))\n    }\n\n    pub fn planes(&self) -> FrustumPlanes {\n        FrustumPlanes {\n            left: MDirection::from(\n                na::UnitQuaternion::from_axis_angle(&na::Vector3::y_axis(), self.left)\n                    * -na::Vector3::x_axis(),\n            ),\n            right: MDirection::from(\n                na::UnitQuaternion::from_axis_angle(&na::Vector3::y_axis(), self.right)\n                    * na::Vector3::x_axis(),\n            ),\n            down: MDirection::from(\n                na::UnitQuaternion::from_axis_angle(&na::Vector3::x_axis(), self.down)\n                    * na::Vector3::y_axis(),\n            ),\n            up: MDirection::from(\n                na::UnitQuaternion::from_axis_angle(&na::Vector3::x_axis(), self.up)\n                    * -na::Vector3::y_axis(),\n            ),\n        }\n    }\n}\n\n#[derive(Debug, Copy, Clone)]\npub struct FrustumPlanes {\n    left: MDirection<f32>,\n    right: MDirection<f32>,\n    down: MDirection<f32>,\n    up: MDirection<f32>,\n}\n\nimpl FrustumPlanes {\n    pub fn contain(&self, point: &MPoint<f32>, radius: f32) -> bool {\n        for &plane in &[&self.left, &self.right, &self.down, &self.up] {\n            if plane.mip(point).asinh() < -radius {\n                return false;\n            }\n        }\n        true\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use common::math::MIsometry;\n    use std::f32;\n\n    #[test]\n    fn planes_sanity() {\n        // 90 degree square\n        let planes = Frustum::from_vfov(f32::consts::FRAC_PI_4, 1.0).planes();\n        assert!(planes.contain(&MPoint::origin(), 0.1));\n        assert!(planes.contain(\n            &(MIsometry::translation_along(&-na::Vector3::z()) * MPoint::origin()),\n            0.0\n        ));\n        assert!(!planes.contain(\n            &(MIsometry::translation_along(&na::Vector3::z()) * MPoint::origin()),\n            0.0\n        ));\n        assert!(!planes.contain(\n            &(MIsometry::translation_along(&na::Vector3::x()) * MPoint::origin()),\n            0.0\n        ));\n        assert!(!planes.contain(\n            &(MIsometry::translation_along(&na::Vector3::y()) * MPoint::origin()),\n            0.0\n        ));\n        assert!(!planes.contain(\n            &(MIsometry::translation_along(&-na::Vector3::x()) * MPoint::origin()),\n            0.0\n        ));\n        assert!(!planes.contain(\n            &(MIsometry::translation_along(&-na::Vector3::y()) * MPoint::origin()),\n            0.0\n        ));\n    }\n}\n"
  },
  {
    "path": "client/src/graphics/gltf_mesh.rs",
    "content": "use std::{\n    borrow::Cow,\n    fs::{self, File},\n    io::Cursor,\n    mem,\n    path::{Path, PathBuf},\n    ptr,\n};\n\nuse anyhow::{Context, Result, anyhow, bail};\nuse ash::vk;\nuse common::Anonymize;\nuse futures_util::future::{BoxFuture, FutureExt, try_join_all};\nuse lahar::{BufferRegionAlloc, DedicatedImage};\nuse tracing::{error, trace};\n\nuse super::{Base, Mesh, meshes::Vertex};\nuse crate::loader::{Cleanup, LoadCtx, LoadFuture, Loadable};\n\npub struct GlbFile {\n    pub path: PathBuf,\n}\n\nimpl Loadable for GlbFile {\n    type Output = GltfScene;\n\n    fn load(self, ctx: &LoadCtx) -> LoadFuture<'_, Self::Output> {\n        Box::pin(self.load(ctx))\n    }\n}\n\nimpl GlbFile {\n    async fn load(self, ctx: &LoadCtx) -> Result<GltfScene> {\n        let path = ctx\n            .cfg\n            .find_asset(&self.path)\n            .ok_or_else(|| anyhow!(\"{} not found\", self.path.anonymize().display()))?;\n\n        let glb = gltf::Glb::from_reader(\n            File::open(&path).with_context(|| format!(\"opening {}\", path.anonymize().display()))?,\n        )\n        .with_context(|| format!(\"reading {}\", path.anonymize().display()))?;\n        let gltf = gltf::Document::from_json(\n            gltf::json::deserialize::from_slice(&glb.json).context(\"JSON parsing\")?,\n        )\n        .context(\"GLTF parsing\")?;\n        let buffer = glb\n            .bin\n            .as_ref()\n            .ok_or_else(|| anyhow!(\"missing binary payload\"))?;\n\n        let scene = gltf\n            .default_scene()\n            .ok_or_else(|| anyhow!(\"no default scene\"))?;\n        let identity = na::Matrix4::identity();\n        let meshes = try_join_all(\n            scene\n                .nodes()\n                .map(|node| load_node(ctx, buffer, &identity, node)),\n        )\n        .await?\n        .into_iter()\n        .flatten()\n        .collect();\n        Ok(GltfScene(meshes))\n    }\n}\n\npub struct GltfScene(pub Vec<Mesh>);\n\nimpl Cleanup for GltfScene {\n    unsafe fn cleanup(self, gfx: &Base) {\n        unsafe {\n            for mesh in self.0 {\n                mesh.cleanup(gfx);\n            }\n        }\n    }\n}\n\nfn load_node<'a>(\n    ctx: &'a LoadCtx,\n    buffer: &'a [u8],\n    transform: &'a na::Matrix4<f32>,\n    node: gltf::Node<'a>,\n) -> BoxFuture<'a, Result<Vec<Mesh>>> {\n    async move {\n        let transform = transform * na::Matrix4::from(node.transform().matrix());\n        let (mut local, children) = tokio::try_join!(\n            async {\n                if let Some(mesh) = node.mesh() {\n                    Ok(load_mesh(ctx, buffer, &transform, &mesh).await?)\n                } else {\n                    Ok(Vec::new())\n                }\n            },\n            try_join_all(\n                node.children()\n                    .map(|child| load_node(ctx, buffer, &transform, child))\n            )\n        )?;\n\n        local.extend(children.into_iter().flatten());\n\n        Ok(local)\n    }\n    .boxed()\n}\n\nasync fn load_mesh(\n    ctx: &LoadCtx,\n    buffer: &[u8],\n    transform: &na::Matrix4<f32>,\n    mesh: &gltf::Mesh<'_>,\n) -> Result<Vec<Mesh>> {\n    try_join_all(\n        mesh.primitives()\n            .map(|x| load_primitive(ctx, buffer, transform, x)),\n    )\n    .await\n}\n\nasync fn load_primitive(\n    ctx: &LoadCtx,\n    buffer: &[u8],\n    transform: &na::Matrix4<f32>,\n    prim: gltf::Primitive<'_>,\n) -> Result<Mesh> {\n    let device = &*ctx.gfx.device;\n    let texcoord_index = prim\n        .material()\n        .pbr_metallic_roughness()\n        .base_color_texture()\n        .map(|x| x.tex_coord());\n\n    // Concurrent upload\n    // TODO: Don't leak resources on error\n    let (geom, color) = tokio::join!(\n        load_geom(ctx, buffer, &prim, transform, texcoord_index),\n        load_material(ctx, buffer, &prim)\n    );\n    let geom = geom?;\n    let color = color?;\n\n    unsafe {\n        let color_view = device\n            .create_image_view(\n                &vk::ImageViewCreateInfo::default()\n                    .image(color.handle)\n                    .view_type(vk::ImageViewType::TYPE_2D)\n                    .format(vk::Format::R8G8B8A8_SRGB)\n                    .subresource_range(vk::ImageSubresourceRange {\n                        aspect_mask: vk::ImageAspectFlags::COLOR,\n                        base_mip_level: 0,\n                        level_count: 1,\n                        base_array_layer: 0,\n                        layer_count: 1,\n                    }),\n                None,\n            )\n            .unwrap();\n        let pool = device\n            .create_descriptor_pool(\n                &vk::DescriptorPoolCreateInfo::default()\n                    .max_sets(1)\n                    .pool_sizes(&[vk::DescriptorPoolSize {\n                        ty: vk::DescriptorType::COMBINED_IMAGE_SAMPLER,\n                        descriptor_count: 1,\n                    }]),\n                None,\n            )\n            .unwrap();\n        let ds = device\n            .allocate_descriptor_sets(\n                &vk::DescriptorSetAllocateInfo::default()\n                    .descriptor_pool(pool)\n                    .set_layouts(&[ctx.mesh_ds_layout]),\n            )\n            .unwrap()[0];\n        device.update_descriptor_sets(\n            &[vk::WriteDescriptorSet::default()\n                .dst_set(ds)\n                .dst_binding(0)\n                .descriptor_type(vk::DescriptorType::COMBINED_IMAGE_SAMPLER)\n                .image_info(&[vk::DescriptorImageInfo {\n                    sampler: vk::Sampler::null(),\n                    image_view: color_view,\n                    image_layout: vk::ImageLayout::SHADER_READ_ONLY_OPTIMAL,\n                }])],\n            &[],\n        );\n\n        Ok(Mesh {\n            vertices: geom.vertices,\n            indices: geom.indices,\n            index_count: geom.index_count,\n            pool,\n            ds,\n            color,\n            color_view,\n        })\n    }\n}\n\nstruct Geometry {\n    vertices: BufferRegionAlloc,\n    indices: BufferRegionAlloc,\n    index_count: u32,\n}\n\nasync fn load_geom(\n    ctx: &LoadCtx,\n    buffer: &[u8],\n    prim: &gltf::Primitive<'_>,\n    transform: &na::Matrix4<f32>,\n    texcoord_index: Option<u32>,\n) -> Result<Geometry> {\n    let normal_transform = match transform.try_inverse() {\n        None => {\n            error!(\"non-invertible transform\");\n            na::Matrix4::identity()\n        }\n        Some(x) => x.transpose(),\n    };\n\n    let prim = prim.reader(|x| {\n        if let gltf::buffer::Source::Bin = x.source() {\n            Some(buffer)\n        } else {\n            None\n        }\n    });\n    let positions = prim\n        .read_positions()\n        .ok_or_else(|| anyhow!(\"vertex positions missing\"))?;\n    let mut texcoords = texcoord_index\n        .map(|i| -> Result<_> {\n            Ok(prim\n                .read_tex_coords(i)\n                .ok_or_else(|| anyhow!(\"texcoords missing\"))?\n                .into_f32())\n        })\n        .transpose()?;\n    let normals = prim\n        .read_normals()\n        .ok_or_else(|| anyhow!(\"normals missing\"))?;\n    let vertex_count = positions.len();\n    if vertex_count != normals.len() || texcoords.as_ref().is_some_and(|x| vertex_count != x.len())\n    {\n        bail!(\"inconsistent vertex attribute counts\");\n    }\n    let byte_size = vertex_count * mem::size_of::<Vertex>();\n\n    let mut v_staging = ctx\n        .staging\n        .alloc(byte_size)\n        .await\n        .ok_or_else(|| anyhow!(\"too large\"))?;\n    for ((pos, norm), storage) in positions\n        .zip(normals)\n        .zip(v_staging.chunks_exact_mut(mem::size_of::<Vertex>()))\n    {\n        let v = Vertex {\n            position: na::Point3::from_homogeneous(\n                transform * (na::Point3::from(pos)).to_homogeneous(),\n            )\n            .unwrap_or_else(na::Point3::origin),\n            texcoords: texcoords\n                .as_mut()\n                .map_or_else(na::zero, |x| x.next().unwrap().into()),\n            normal: na::Unit::new_normalize(\n                (normal_transform * na::Vector3::from(norm).to_homogeneous()).xyz(),\n            ),\n        };\n        // write_unaligned accepts misaligned pointers\n        #[allow(clippy::cast_ptr_alignment)]\n        unsafe {\n            ptr::write_unaligned(storage.as_ptr() as *mut Vertex, v);\n        }\n    }\n\n    let indices = prim\n        .read_indices()\n        .ok_or_else(|| anyhow!(\"indices missing\"))?\n        .into_u32();\n    let index_count = indices.len();\n    let mut i_staging = ctx\n        .staging\n        .alloc(index_count * 4)\n        .await\n        .ok_or_else(|| anyhow!(\"too large\"))?;\n    for (idx, storage) in indices.zip(i_staging.chunks_exact_mut(4)) {\n        storage.copy_from_slice(&idx.to_ne_bytes());\n    }\n\n    let vert_alloc =\n        ctx.vertex_alloc\n            .lock()\n            .unwrap()\n            .alloc(&ctx.gfx.device, byte_size as vk::DeviceSize, 4);\n    let staging_buffer = ctx.staging.buffer();\n    let vert_buffer = vert_alloc.buffer;\n    let vert_src_offset = v_staging.offset();\n    let vert_dst_offset = vert_alloc.offset;\n    let vertex_upload = unsafe {\n        ctx.transfer.run(move |xf, cmd| {\n            xf.device.cmd_copy_buffer(\n                cmd,\n                staging_buffer,\n                vert_buffer,\n                &[vk::BufferCopy {\n                    src_offset: vert_src_offset,\n                    dst_offset: vert_dst_offset,\n                    size: byte_size as vk::DeviceSize,\n                }],\n            );\n            xf.stages |= vk::PipelineStageFlags::VERTEX_INPUT;\n            xf.buffer_barriers.push(\n                vk::BufferMemoryBarrier::default()\n                    .src_access_mask(vk::AccessFlags::TRANSFER_WRITE)\n                    .dst_access_mask(vk::AccessFlags::VERTEX_ATTRIBUTE_READ)\n                    .src_queue_family_index(xf.queue_family)\n                    .dst_queue_family_index(xf.dst_queue_family)\n                    .buffer(vert_buffer)\n                    .offset(vert_dst_offset)\n                    .size(byte_size as vk::DeviceSize),\n            );\n        })\n    };\n\n    let idx_alloc = ctx.index_alloc.lock().unwrap().alloc(\n        &ctx.gfx.device,\n        index_count as vk::DeviceSize * 4,\n        4,\n    );\n    let idx_buffer = idx_alloc.buffer;\n    let idx_src_offset = i_staging.offset();\n    let idx_dst_offset = idx_alloc.offset;\n    let index_upload = unsafe {\n        ctx.transfer.run(move |xf, cmd| {\n            xf.device.cmd_copy_buffer(\n                cmd,\n                staging_buffer,\n                idx_buffer,\n                &[vk::BufferCopy {\n                    src_offset: idx_src_offset,\n                    dst_offset: idx_dst_offset,\n                    size: index_count as vk::DeviceSize * 4,\n                }],\n            );\n            xf.stages |= vk::PipelineStageFlags::VERTEX_INPUT;\n            xf.buffer_barriers.push(\n                vk::BufferMemoryBarrier::default()\n                    .src_access_mask(vk::AccessFlags::TRANSFER_WRITE)\n                    .dst_access_mask(vk::AccessFlags::INDEX_READ)\n                    .src_queue_family_index(xf.queue_family)\n                    .dst_queue_family_index(xf.dst_queue_family)\n                    .buffer(idx_buffer)\n                    .offset(idx_dst_offset)\n                    .size(index_count as vk::DeviceSize * 4),\n            );\n        })\n    };\n    // Upload concurrently\n    let (r1, r2) = tokio::join!(vertex_upload, index_upload);\n    r1?;\n    r2?;\n    Ok(Geometry {\n        vertices: vert_alloc,\n        indices: idx_alloc,\n        index_count: index_count as u32,\n    })\n}\n\nasync fn load_material(\n    ctx: &LoadCtx,\n    buffer: &[u8],\n    prim: &gltf::Primitive<'_>,\n) -> Result<DedicatedImage> {\n    let device = &*ctx.gfx.device;\n    let color = match prim\n        .material()\n        .pbr_metallic_roughness()\n        .base_color_texture()\n    {\n        None => {\n            return load_solid_color(\n                ctx,\n                prim.material().pbr_metallic_roughness().base_color_factor(),\n            )\n            .await;\n        }\n        Some(x) => x,\n    };\n    let color_data = match color.texture().source().source() {\n        gltf::image::Source::Uri { uri, .. } => {\n            let path = ctx\n                .cfg\n                .find_asset(Path::new(uri))\n                .ok_or_else(|| anyhow!(\"texture {} not found\", uri))?;\n            trace!(path = %path.anonymize().display(), \"reading texture\");\n            Cow::Owned(fs::read(&path).context(\"reading texture\")?)\n        }\n        gltf::image::Source::View { view, .. } => {\n            match view.buffer().source() {\n                gltf::buffer::Source::Bin => {}\n                gltf::buffer::Source::Uri(_) => {\n                    bail!(\"external buffers unsupported\");\n                }\n            }\n            Cow::Borrowed(&buffer[view.offset()..view.offset() + view.length()])\n        }\n    };\n    let mut color_data = &color_data[..];\n    let mut color_reader = png::Decoder::new(Cursor::new(&mut color_data))\n        .read_info()\n        .with_context(|| \"decoding PNG header\")?;\n    let (width, height) = {\n        let info = color_reader.info();\n        (info.width, info.height)\n    };\n    let mut color_staging = ctx\n        .staging\n        .alloc(width as usize * height as usize * 4)\n        .await\n        .ok_or_else(|| anyhow!(\"texture too large\"))?;\n    color_reader\n        .next_frame(&mut color_staging)\n        .with_context(|| \"decoding PNG data\")?;\n    let color = unsafe {\n        DedicatedImage::new(\n            device,\n            &ctx.gfx.memory_properties,\n            &vk::ImageCreateInfo::default()\n                .image_type(vk::ImageType::TYPE_2D)\n                .format(vk::Format::R8G8B8A8_SRGB)\n                .extent(vk::Extent3D {\n                    width,\n                    height,\n                    depth: 1,\n                })\n                .mip_levels(1)\n                .array_layers(1)\n                .samples(vk::SampleCountFlags::TYPE_1)\n                .usage(vk::ImageUsageFlags::SAMPLED | vk::ImageUsageFlags::TRANSFER_DST),\n        )\n    };\n    let staging_buffer = ctx.staging.buffer();\n    let color_handle = color.handle;\n    let color_offset = color_staging.offset();\n    unsafe {\n        ctx.transfer\n            .run(move |xf, cmd| {\n                let range = vk::ImageSubresourceRange {\n                    aspect_mask: vk::ImageAspectFlags::COLOR,\n                    base_mip_level: 0,\n                    level_count: 1,\n                    base_array_layer: 0,\n                    layer_count: 1,\n                };\n                xf.device.cmd_pipeline_barrier(\n                    cmd,\n                    vk::PipelineStageFlags::TOP_OF_PIPE,\n                    vk::PipelineStageFlags::TRANSFER,\n                    vk::DependencyFlags::default(),\n                    &[],\n                    &[],\n                    &[vk::ImageMemoryBarrier::default()\n                        .dst_access_mask(vk::AccessFlags::TRANSFER_WRITE)\n                        .src_queue_family_index(vk::QUEUE_FAMILY_IGNORED)\n                        .dst_queue_family_index(vk::QUEUE_FAMILY_IGNORED)\n                        .old_layout(vk::ImageLayout::UNDEFINED)\n                        .new_layout(vk::ImageLayout::TRANSFER_DST_OPTIMAL)\n                        .image(color_handle)\n                        .subresource_range(range)],\n                );\n                xf.device.cmd_copy_buffer_to_image(\n                    cmd,\n                    staging_buffer,\n                    color_handle,\n                    vk::ImageLayout::TRANSFER_DST_OPTIMAL,\n                    &[vk::BufferImageCopy {\n                        buffer_offset: color_offset,\n                        image_subresource: vk::ImageSubresourceLayers {\n                            aspect_mask: vk::ImageAspectFlags::COLOR,\n                            mip_level: 0,\n                            base_array_layer: 0,\n                            layer_count: 1,\n                        },\n                        image_extent: vk::Extent3D {\n                            width,\n                            height,\n                            depth: 1,\n                        },\n                        ..Default::default()\n                    }],\n                );\n                xf.stages |= vk::PipelineStageFlags::FRAGMENT_SHADER;\n                xf.image_barriers.push(\n                    vk::ImageMemoryBarrier::default()\n                        .src_access_mask(vk::AccessFlags::TRANSFER_WRITE)\n                        .dst_access_mask(vk::AccessFlags::SHADER_READ)\n                        .src_queue_family_index(xf.queue_family)\n                        .dst_queue_family_index(xf.dst_queue_family)\n                        .old_layout(vk::ImageLayout::TRANSFER_DST_OPTIMAL)\n                        .new_layout(vk::ImageLayout::SHADER_READ_ONLY_OPTIMAL)\n                        .image(color_handle)\n                        .subresource_range(range),\n                );\n            })\n            .await?;\n    }\n    Ok(color)\n}\n\nasync fn load_solid_color(ctx: &LoadCtx, rgba: [f32; 4]) -> Result<DedicatedImage> {\n    unsafe {\n        let image = DedicatedImage::new(\n            &ctx.gfx.device,\n            &ctx.gfx.memory_properties,\n            &vk::ImageCreateInfo::default()\n                .image_type(vk::ImageType::TYPE_2D)\n                .format(vk::Format::R8G8B8A8_SRGB)\n                .extent(vk::Extent3D {\n                    width: 1,\n                    height: 1,\n                    depth: 1,\n                })\n                .mip_levels(1)\n                .array_layers(1)\n                .samples(vk::SampleCountFlags::TYPE_1)\n                .usage(vk::ImageUsageFlags::SAMPLED | vk::ImageUsageFlags::TRANSFER_DST),\n        );\n        let handle = image.handle;\n        ctx.transfer\n            .run(move |xf, cmd| {\n                let range = vk::ImageSubresourceRange {\n                    aspect_mask: vk::ImageAspectFlags::COLOR,\n                    base_mip_level: 0,\n                    level_count: 1,\n                    base_array_layer: 0,\n                    layer_count: 1,\n                };\n                xf.device.cmd_pipeline_barrier(\n                    cmd,\n                    vk::PipelineStageFlags::TOP_OF_PIPE,\n                    vk::PipelineStageFlags::TRANSFER,\n                    vk::DependencyFlags::default(),\n                    &[],\n                    &[],\n                    &[vk::ImageMemoryBarrier::default()\n                        .dst_access_mask(vk::AccessFlags::TRANSFER_WRITE)\n                        .src_queue_family_index(vk::QUEUE_FAMILY_IGNORED)\n                        .dst_queue_family_index(vk::QUEUE_FAMILY_IGNORED)\n                        .old_layout(vk::ImageLayout::UNDEFINED)\n                        .new_layout(vk::ImageLayout::TRANSFER_DST_OPTIMAL)\n                        .image(handle)\n                        .subresource_range(range)],\n                );\n                xf.device.cmd_clear_color_image(\n                    cmd,\n                    handle,\n                    vk::ImageLayout::TRANSFER_DST_OPTIMAL,\n                    &vk::ClearColorValue { float32: rgba },\n                    &[range],\n                );\n                xf.stages |= vk::PipelineStageFlags::FRAGMENT_SHADER;\n                xf.image_barriers.push(\n                    vk::ImageMemoryBarrier::default()\n                        .src_access_mask(vk::AccessFlags::TRANSFER_WRITE)\n                        .dst_access_mask(vk::AccessFlags::SHADER_READ)\n                        .src_queue_family_index(xf.queue_family)\n                        .dst_queue_family_index(xf.dst_queue_family)\n                        .old_layout(vk::ImageLayout::TRANSFER_DST_OPTIMAL)\n                        .new_layout(vk::ImageLayout::SHADER_READ_ONLY_OPTIMAL)\n                        .image(handle)\n                        .subresource_range(range),\n                );\n            })\n            .await?;\n        Ok(image)\n    }\n}\n"
  },
  {
    "path": "client/src/graphics/gui.rs",
    "content": "use yakui::{\n    Alignment, Color, align, colored_box, colored_box_container, label, pad, widgets::Pad,\n};\n\nuse crate::Sim;\n\npub struct GuiState {\n    show_gui: bool,\n}\n\nimpl GuiState {\n    pub fn new() -> Self {\n        GuiState { show_gui: true }\n    }\n\n    /// Toggles whether the GUI is shown\n    pub fn toggle_gui(&mut self) {\n        self.show_gui = !self.show_gui;\n    }\n\n    /// Prepare the GUI for rendering. This should be called between\n    /// Yakui::start and Yakui::finish.\n    pub fn run(&self, sim: &Sim) {\n        if !self.show_gui {\n            return;\n        }\n\n        align(Alignment::CENTER, || {\n            colored_box(Color::BLACK.with_alpha(0.9), [5.0, 5.0]);\n        });\n\n        align(Alignment::TOP_LEFT, || {\n            pad(Pad::all(8.0), || {\n                colored_box_container(Color::BLACK.with_alpha(0.7), || {\n                    let material_count_string = if sim.cfg.gameplay_enabled {\n                        sim.count_inventory_entities_matching_material(sim.selected_material())\n                            .to_string()\n                    } else {\n                        \"∞\".to_string()\n                    };\n                    label(format!(\n                        \"Selected material: {:?} (×{})\",\n                        sim.selected_material(),\n                        material_count_string\n                    ));\n                });\n            });\n        });\n    }\n}\n"
  },
  {
    "path": "client/src/graphics/meshes.rs",
    "content": "use std::mem;\n\nuse ash::{Device, vk};\nuse lahar::{BufferRegionAlloc, DedicatedImage};\nuse memoffset::offset_of;\nuse vk_shader_macros::include_glsl;\n\nuse super::Base;\nuse common::defer;\n\nconst VERT: &[u32] = include_glsl!(\"shaders/mesh.vert\");\nconst FRAG: &[u32] = include_glsl!(\"shaders/mesh.frag\");\n\npub struct Meshes {\n    pipeline_layout: vk::PipelineLayout,\n    pipeline: vk::Pipeline,\n}\n\nimpl Meshes {\n    #[allow(clippy::unneeded_field_pattern)] // Silence offset_of warnings nonsense\n    pub fn new(gfx: &Base, ds_layout: vk::DescriptorSetLayout) -> Self {\n        let device = &*gfx.device;\n        unsafe {\n            // Construct the shader modules\n            let vert = device\n                .create_shader_module(&vk::ShaderModuleCreateInfo::default().code(VERT), None)\n                .unwrap();\n            // Note that these only need to live until the pipeline itself is constructed\n            let v_guard = defer(|| device.destroy_shader_module(vert, None));\n\n            let frag = device\n                .create_shader_module(&vk::ShaderModuleCreateInfo::default().code(FRAG), None)\n                .unwrap();\n            let f_guard = defer(|| device.destroy_shader_module(frag, None));\n\n            // Define the outward-facing interface of the shaders, incl. uniforms, samplers, etc.\n            let pipeline_layout = device\n                .create_pipeline_layout(\n                    &vk::PipelineLayoutCreateInfo::default()\n                        .set_layouts(&[gfx.common_layout, ds_layout])\n                        .push_constant_ranges(&[vk::PushConstantRange {\n                            stage_flags: vk::ShaderStageFlags::VERTEX,\n                            offset: 0,\n                            size: 64,\n                        }]),\n                    None,\n                )\n                .unwrap();\n\n            let entry_point = cstr!(\"main\").as_ptr();\n            let mut pipelines = device\n                .create_graphics_pipelines(\n                    gfx.pipeline_cache,\n                    &[vk::GraphicsPipelineCreateInfo::default()\n                        .stages(&[\n                            vk::PipelineShaderStageCreateInfo {\n                                stage: vk::ShaderStageFlags::VERTEX,\n                                module: vert,\n                                p_name: entry_point,\n                                ..Default::default()\n                            },\n                            vk::PipelineShaderStageCreateInfo {\n                                stage: vk::ShaderStageFlags::FRAGMENT,\n                                module: frag,\n                                p_name: entry_point,\n                                ..Default::default()\n                            },\n                        ])\n                        .vertex_input_state(\n                            &vk::PipelineVertexInputStateCreateInfo::default()\n                                .vertex_binding_descriptions(&[vk::VertexInputBindingDescription {\n                                    binding: 0,\n                                    stride: mem::size_of::<Vertex>() as u32,\n                                    input_rate: vk::VertexInputRate::VERTEX,\n                                }])\n                                .vertex_attribute_descriptions(&[\n                                    vk::VertexInputAttributeDescription {\n                                        location: 0,\n                                        binding: 0,\n                                        format: vk::Format::R32G32B32_SFLOAT,\n                                        offset: offset_of!(Vertex, position) as u32,\n                                    },\n                                    vk::VertexInputAttributeDescription {\n                                        location: 1,\n                                        binding: 0,\n                                        format: vk::Format::R32G32_SFLOAT,\n                                        offset: offset_of!(Vertex, texcoords) as u32,\n                                    },\n                                    vk::VertexInputAttributeDescription {\n                                        location: 2,\n                                        binding: 0,\n                                        format: vk::Format::R32G32B32_SFLOAT,\n                                        offset: offset_of!(Vertex, normal) as u32,\n                                    },\n                                ]),\n                        )\n                        .input_assembly_state(\n                            &vk::PipelineInputAssemblyStateCreateInfo::default()\n                                .topology(vk::PrimitiveTopology::TRIANGLE_LIST),\n                        )\n                        .viewport_state(\n                            &vk::PipelineViewportStateCreateInfo::default()\n                                .scissor_count(1)\n                                .viewport_count(1),\n                        )\n                        .rasterization_state(\n                            &vk::PipelineRasterizationStateCreateInfo::default()\n                                .cull_mode(vk::CullModeFlags::BACK)\n                                .front_face(vk::FrontFace::COUNTER_CLOCKWISE)\n                                .polygon_mode(vk::PolygonMode::FILL)\n                                .line_width(1.0),\n                        )\n                        .multisample_state(\n                            &vk::PipelineMultisampleStateCreateInfo::default()\n                                .rasterization_samples(vk::SampleCountFlags::TYPE_1),\n                        )\n                        .depth_stencil_state(\n                            &vk::PipelineDepthStencilStateCreateInfo::default()\n                                .depth_test_enable(true)\n                                .depth_write_enable(true)\n                                .depth_compare_op(vk::CompareOp::GREATER),\n                        )\n                        .color_blend_state(\n                            &vk::PipelineColorBlendStateCreateInfo::default().attachments(&[\n                                vk::PipelineColorBlendAttachmentState {\n                                    blend_enable: vk::TRUE,\n                                    src_color_blend_factor: vk::BlendFactor::ONE,\n                                    dst_color_blend_factor: vk::BlendFactor::ZERO,\n                                    color_blend_op: vk::BlendOp::ADD,\n                                    color_write_mask: vk::ColorComponentFlags::R\n                                        | vk::ColorComponentFlags::G\n                                        | vk::ColorComponentFlags::B,\n                                    ..Default::default()\n                                },\n                            ]),\n                        )\n                        .dynamic_state(\n                            &vk::PipelineDynamicStateCreateInfo::default().dynamic_states(&[\n                                vk::DynamicState::VIEWPORT,\n                                vk::DynamicState::SCISSOR,\n                            ]),\n                        )\n                        .layout(pipeline_layout)\n                        .render_pass(gfx.render_pass)\n                        .subpass(0)],\n                    None,\n                )\n                .unwrap()\n                .into_iter();\n\n            let pipeline = pipelines.next().unwrap();\n            gfx.set_name(pipeline, cstr!(\"sky\"));\n\n            // Clean up the shaders explicitly, so the defer guards don't hold onto references we're\n            // moving into `Self` to be returned\n            v_guard.invoke();\n            f_guard.invoke();\n\n            Self {\n                pipeline_layout,\n                pipeline,\n            }\n        }\n    }\n\n    pub unsafe fn draw(\n        &mut self,\n        device: &Device,\n        common_ds: vk::DescriptorSet,\n        cmd: vk::CommandBuffer,\n        mesh: &Mesh,\n        transform: &na::Matrix4<f32>,\n    ) {\n        unsafe {\n            device.cmd_bind_pipeline(cmd, vk::PipelineBindPoint::GRAPHICS, self.pipeline);\n            device.cmd_bind_descriptor_sets(\n                cmd,\n                vk::PipelineBindPoint::GRAPHICS,\n                self.pipeline_layout,\n                0,\n                &[common_ds, mesh.ds],\n                &[],\n            );\n            device.cmd_push_constants(\n                cmd,\n                self.pipeline_layout,\n                vk::ShaderStageFlags::VERTEX,\n                0,\n                &mem::transmute::<na::Matrix4<f32>, [u8; 64]>(*transform),\n            );\n            device.cmd_bind_vertex_buffers(\n                cmd,\n                0,\n                &[mesh.vertices.buffer],\n                &[mesh.vertices.offset],\n            );\n            device.cmd_bind_index_buffer(\n                cmd,\n                mesh.indices.buffer,\n                mesh.indices.offset,\n                vk::IndexType::UINT32,\n            );\n            device.cmd_draw_indexed(cmd, mesh.index_count, 1, 0, 0, 0);\n        }\n    }\n\n    pub unsafe fn destroy(&mut self, device: &Device) {\n        unsafe {\n            device.destroy_pipeline(self.pipeline, None);\n            device.destroy_pipeline_layout(self.pipeline_layout, None);\n        }\n    }\n}\n\n#[repr(C)]\npub struct Vertex {\n    pub position: na::Point3<f32>,\n    pub texcoords: na::Vector2<f32>,\n    pub normal: na::Unit<na::Vector3<f32>>,\n}\n\n#[derive(Copy, Clone)]\npub struct Mesh {\n    pub vertices: BufferRegionAlloc,\n    pub indices: BufferRegionAlloc,\n    pub index_count: u32,\n    pub pool: vk::DescriptorPool,\n    pub ds: vk::DescriptorSet,\n    // TODO: Make shareable\n    pub color: DedicatedImage,\n    pub color_view: vk::ImageView,\n}\n\nimpl crate::loader::Cleanup for Mesh {\n    unsafe fn cleanup(mut self, gfx: &Base) {\n        unsafe {\n            let device = &*gfx.device;\n            device.destroy_descriptor_pool(self.pool, None);\n            device.destroy_image_view(self.color_view, None);\n            self.color.destroy(device);\n        }\n    }\n}\n"
  },
  {
    "path": "client/src/graphics/mod.rs",
    "content": "#![allow(clippy::missing_safety_doc)] // Vulkan wrangling is categorically unsafe\n\nmod base;\nmod core;\nmod draw;\nmod fog;\nmod frustum;\nmod gltf_mesh;\nmod gui;\nmod meshes;\nmod png_array;\npub mod voxels;\nmod window;\n\n#[cfg(test)]\nmod tests;\n\npub use self::{\n    base::Base,\n    core::Core,\n    draw::Draw,\n    fog::Fog,\n    frustum::Frustum,\n    gltf_mesh::{GlbFile, GltfScene},\n    meshes::{Mesh, Meshes},\n    png_array::PngArray,\n    voxels::Voxels,\n    window::{EarlyWindow, Window},\n};\n\nunsafe fn as_bytes<T: Copy>(x: &T) -> &[u8] {\n    unsafe { std::slice::from_raw_parts(x as *const T as *const u8, std::mem::size_of::<T>()) }\n}\n\n#[repr(C)]\n#[derive(Debug, Eq, PartialEq, Copy, Clone)]\npub struct VkDrawIndirectCommand {\n    pub vertex_count: u32,\n    pub instance_count: u32,\n    pub first_vertex: u32,\n    pub first_instance: u32,\n}\n"
  },
  {
    "path": "client/src/graphics/png_array.rs",
    "content": "use std::{\n    fs::{self, File},\n    io::BufReader,\n    path::PathBuf,\n};\n\nuse anyhow::{Context, anyhow, bail};\nuse ash::vk;\nuse common::Anonymize;\nuse lahar::DedicatedImage;\nuse tracing::trace;\n\nuse crate::loader::{LoadCtx, LoadFuture, Loadable};\n\npub struct PngArray {\n    pub path: PathBuf,\n    pub size: usize,\n}\n\nimpl Loadable for PngArray {\n    type Output = DedicatedImage;\n\n    fn load(self, handle: &LoadCtx) -> LoadFuture<'_, Self::Output> {\n        Box::pin(async move {\n            let full_path = handle\n                .cfg\n                .find_asset(&self.path)\n                .ok_or_else(|| anyhow!(\"{} not found\", self.path.anonymize().display()))?;\n            let mut paths = fs::read_dir(&full_path)\n                .with_context(|| format!(\"reading {}\", full_path.anonymize().display()))?\n                .map(|x| x.map(|x| x.path()))\n                .collect::<Result<Vec<_>, _>>()\n                .with_context(|| format!(\"reading {}\", full_path.anonymize().display()))?;\n            if paths.is_empty() {\n                bail!(\"{} is empty\", full_path.anonymize().display());\n            }\n            if paths.len() < self.size {\n                bail!(\n                    \"{}: expected {} textures, found {}\",\n                    full_path.anonymize().display(),\n                    self.size,\n                    paths.len()\n                );\n            }\n            paths.sort();\n            paths.truncate(self.size);\n            let mut dims: Option<(u32, u32)> = None;\n            let mut mem = None;\n            for (i, path) in paths.iter().enumerate() {\n                trace!(layer=i, path=%path.anonymize().display(), \"loading\");\n                let file = File::open(path)\n                    .with_context(|| format!(\"reading {}\", path.anonymize().display()))?;\n                let decoder = png::Decoder::new(BufReader::new(file));\n                let mut reader = decoder\n                    .read_info()\n                    .with_context(|| format!(\"decoding {}\", path.anonymize().display()))?;\n                let info = reader.info();\n                if let Some(dims) = dims {\n                    if dims != (info.width, info.height) {\n                        bail!(\n                            \"inconsistent dimensions: expected {}x{}, got {}x{}\",\n                            dims.0,\n                            dims.1,\n                            info.width,\n                            info.height\n                        );\n                    }\n                } else {\n                    dims = Some((info.width, info.height));\n                    mem = Some(\n                        handle\n                            .staging\n                            .alloc(info.width as usize * info.height as usize * 4 * self.size)\n                            .await\n                            .ok_or_else(|| {\n                                anyhow!(\n                                    \"{}: image array too large\",\n                                    full_path.anonymize().display()\n                                )\n                            })?,\n                    );\n                }\n                let mem = mem.as_mut().unwrap();\n                let step_size = info.width as usize * info.height as usize * 4;\n                reader\n                    .next_frame(&mut mem[i * step_size..(i + 1) * step_size])\n                    .with_context(|| format!(\"decoding {}\", path.anonymize().display()))?;\n            }\n            let (width, height) = dims.unwrap();\n            let mem = mem.unwrap();\n            unsafe {\n                let image = DedicatedImage::new(\n                    &handle.gfx.device,\n                    &handle.gfx.memory_properties,\n                    &vk::ImageCreateInfo::default()\n                        .image_type(vk::ImageType::TYPE_2D)\n                        .format(vk::Format::R8G8B8A8_SRGB)\n                        .extent(vk::Extent3D {\n                            width,\n                            height,\n                            depth: 1,\n                        })\n                        .mip_levels(1)\n                        .array_layers(self.size as u32)\n                        .samples(vk::SampleCountFlags::TYPE_1)\n                        .usage(vk::ImageUsageFlags::SAMPLED | vk::ImageUsageFlags::TRANSFER_DST),\n                );\n\n                let range = vk::ImageSubresourceRange {\n                    aspect_mask: vk::ImageAspectFlags::COLOR,\n                    base_mip_level: 0,\n                    level_count: 1,\n                    base_array_layer: 0,\n                    layer_count: self.size as u32,\n                };\n                let src = handle.staging.buffer();\n                let buffer_offset = mem.offset();\n                let dst = image.handle;\n\n                handle\n                    .transfer\n                    .run(move |xf, cmd| {\n                        xf.device.cmd_pipeline_barrier(\n                            cmd,\n                            vk::PipelineStageFlags::TOP_OF_PIPE,\n                            vk::PipelineStageFlags::TRANSFER,\n                            vk::DependencyFlags::default(),\n                            &[],\n                            &[],\n                            &[vk::ImageMemoryBarrier::default()\n                                .dst_access_mask(vk::AccessFlags::TRANSFER_WRITE)\n                                .src_queue_family_index(vk::QUEUE_FAMILY_IGNORED)\n                                .dst_queue_family_index(vk::QUEUE_FAMILY_IGNORED)\n                                .old_layout(vk::ImageLayout::UNDEFINED)\n                                .new_layout(vk::ImageLayout::TRANSFER_DST_OPTIMAL)\n                                .image(dst)\n                                .subresource_range(range)],\n                        );\n                        xf.device.cmd_copy_buffer_to_image(\n                            cmd,\n                            src,\n                            dst,\n                            vk::ImageLayout::TRANSFER_DST_OPTIMAL,\n                            &[vk::BufferImageCopy {\n                                buffer_offset,\n                                image_subresource: vk::ImageSubresourceLayers {\n                                    aspect_mask: vk::ImageAspectFlags::COLOR,\n                                    mip_level: 0,\n                                    base_array_layer: 0,\n                                    layer_count: range.layer_count,\n                                },\n                                image_extent: vk::Extent3D {\n                                    width,\n                                    height,\n                                    depth: 1,\n                                },\n                                ..Default::default()\n                            }],\n                        );\n                        xf.stages |= vk::PipelineStageFlags::FRAGMENT_SHADER;\n                        xf.image_barriers.push(\n                            vk::ImageMemoryBarrier::default()\n                                .src_access_mask(vk::AccessFlags::TRANSFER_WRITE)\n                                .dst_access_mask(vk::AccessFlags::SHADER_READ)\n                                .src_queue_family_index(xf.queue_family)\n                                .dst_queue_family_index(xf.dst_queue_family)\n                                .old_layout(vk::ImageLayout::TRANSFER_DST_OPTIMAL)\n                                .new_layout(vk::ImageLayout::SHADER_READ_ONLY_OPTIMAL)\n                                .image(dst)\n                                .subresource_range(range),\n                        );\n                    })\n                    .await?;\n\n                trace!(\n                    width = width,\n                    height = height,\n                    path = %full_path.anonymize().display(),\n                    \"loaded array\"\n                );\n                Ok(image)\n            }\n        })\n    }\n}\n"
  },
  {
    "path": "client/src/graphics/tests.rs",
    "content": "use super::Base;\n\n#[test]\n#[ignore]\nfn init_base() {\n    let _guard = common::tracing_guard();\n    Base::headless();\n}\n"
  },
  {
    "path": "client/src/graphics/voxels/mod.rs",
    "content": "mod surface;\npub mod surface_extraction;\n\n#[cfg(test)]\nmod tests;\n\nuse std::{sync::Arc, time::Instant};\n\nuse ash::{Device, vk};\nuse lru_slab::LruSlab;\nuse metrics::histogram;\nuse tracing::warn;\n\nuse crate::{\n    Config, Loader, Sim,\n    graphics::{Base, Frustum},\n};\nuse common::{\n    dodeca::{self, Vertex},\n    graph::NodeId,\n    math::{MIsometry, MPoint},\n    node::{Chunk, ChunkId, VoxelData},\n};\n\nuse surface::Surface;\nuse surface_extraction::{DrawBuffer, ExtractTask, ScratchBuffer, SurfaceExtraction};\n\npub struct Voxels {\n    config: Arc<Config>,\n    surface_extraction: SurfaceExtraction,\n    extraction_scratch: ScratchBuffer,\n    surfaces: DrawBuffer,\n    states: LruSlab<SurfaceState>,\n    draw: Surface,\n    max_chunks: u32,\n}\n\nimpl Voxels {\n    pub fn new(\n        gfx: &Base,\n        config: Arc<Config>,\n        loader: &mut Loader,\n        dimension: u32,\n        frames: u32,\n    ) -> Self {\n        let max_faces = 3 * (dimension.pow(3) + dimension.pow(2));\n        let max_supported_chunks = gfx.limits.max_storage_buffer_range / (8 * max_faces);\n        let max_chunks = if MAX_CHUNKS > max_supported_chunks {\n            warn!(\n                \"clamping max chunks to {} due to SSBO size limit\",\n                max_supported_chunks\n            );\n            max_supported_chunks\n        } else {\n            MAX_CHUNKS\n        };\n        let surfaces = DrawBuffer::new(gfx, max_chunks, dimension);\n        let draw = Surface::new(gfx, loader, &surfaces);\n        let surface_extraction = SurfaceExtraction::new(gfx);\n        let extraction_scratch = surface_extraction::ScratchBuffer::new(\n            gfx,\n            &surface_extraction,\n            config.chunk_load_parallelism * frames,\n            dimension,\n        );\n        Self {\n            config,\n            surface_extraction,\n            extraction_scratch,\n            surfaces,\n            states: LruSlab::with_capacity(max_chunks),\n            draw,\n            max_chunks,\n        }\n    }\n\n    /// Determine what to render and stage chunk transforms\n    ///\n    /// Surface extraction commands are written to `cmd`, and will be presumed complete for the next\n    /// (not current) frame.\n    pub unsafe fn prepare(\n        &mut self,\n        device: &Device,\n        frame: &mut Frame,\n        sim: &mut Sim,\n        nearby_nodes: &[(NodeId, MIsometry<f32>)],\n        cmd: vk::CommandBuffer,\n        frustum: &Frustum,\n    ) {\n        // Clean up after previous frame\n        for i in frame.extracted.drain(..) {\n            self.extraction_scratch.free(i);\n        }\n        for chunk in frame.drawn.drain(..) {\n            self.states.peek_mut(chunk).refcount -= 1;\n        }\n\n        // Determine what to load/render\n        let view = sim.view();\n        if !sim.graph.contains(view.node) {\n            // Graph is temporarily out of sync with the server; we don't know where we are, so\n            // there's no point trying to draw.\n            return;\n        }\n        let node_scan_started = Instant::now();\n        let frustum_planes = frustum.planes();\n        let local_to_view = view.local.inverse();\n        let mut extractions = Vec::new();\n        for &(node, ref node_transform) in nearby_nodes {\n            let node_to_view = local_to_view * node_transform;\n            let origin = node_to_view * MPoint::origin();\n            if !frustum_planes.contain(&origin, dodeca::BOUNDING_SPHERE_RADIUS) {\n                // Don't bother generating or drawing chunks from nodes that are wholly outside the\n                // frustum.\n                continue;\n            }\n\n            use Chunk::*;\n            for vertex in Vertex::iter() {\n                let chunk = ChunkId::new(node, vertex);\n\n                // Fetch existing chunk, or extract surface of new chunk\n                let &mut Populated {\n                    ref mut surface,\n                    ref mut old_surface,\n                    ref voxels,\n                } = &mut sim.graph[chunk]\n                else {\n                    continue;\n                };\n\n                if let Some(slot) = surface.or(*old_surface) {\n                    // Render an already-extracted surface\n                    self.states.get_mut(slot).refcount += 1;\n                    frame.drawn.push(slot);\n                    // Transfer transform\n                    frame.surface.transforms_mut()[slot as usize] =\n                        na::Matrix4::from(*node_transform) * vertex.chunk_to_node();\n                }\n                if let (None, &VoxelData::Dense(ref data)) = (&surface, voxels) {\n                    // Extract a surface so it can be drawn in future frames\n                    if frame.extracted.len() == self.config.chunk_load_parallelism as usize {\n                        continue;\n                    }\n                    let removed = if self.states.len() == self.max_chunks {\n                        let slot = self.states.lru().expect(\"full LRU table is nonempty\");\n                        if self.states.peek(slot).refcount != 0 {\n                            warn!(\"MAX_CHUNKS is too small\");\n                            break;\n                        }\n                        Some((slot, self.states.remove(slot)))\n                    } else {\n                        None\n                    };\n                    let scratch_slot = self.extraction_scratch.alloc().expect(\n                        \"there are at least chunks_loaded_per_frame scratch slots per frame\",\n                    );\n                    frame.extracted.push(scratch_slot);\n                    let slot = self.states.insert(SurfaceState {\n                        node,\n                        chunk: vertex,\n                        refcount: 0,\n                    });\n                    *surface = Some(slot);\n                    let storage = self.extraction_scratch.storage(scratch_slot);\n                    storage.copy_from_slice(&data[..]);\n                    if let Some((lru_slot, lru)) = removed\n                        && let Populated {\n                            ref mut surface,\n                            ref mut old_surface,\n                            ..\n                        } = sim.graph[lru.node].chunks[lru.chunk]\n                    {\n                        // Remove references to released slot IDs\n                        if *surface == Some(lru_slot) {\n                            *surface = None;\n                        }\n                        if *old_surface == Some(lru_slot) {\n                            *old_surface = None;\n                        }\n                    }\n                    let node_is_odd = sim.graph.depth(node) & 1 != 0;\n                    extractions.push(ExtractTask {\n                        index: scratch_slot,\n                        indirect_offset: self.surfaces.indirect_offset(slot),\n                        face_offset: self.surfaces.face_offset(slot),\n                        draw_id: slot,\n                        reverse_winding: vertex.parity() ^ node_is_odd,\n                    });\n                }\n            }\n        }\n        unsafe {\n            self.extraction_scratch.extract(\n                device,\n                &self.surface_extraction,\n                self.surfaces.indirect_buffer(),\n                self.surfaces.face_buffer(),\n                cmd,\n                &extractions,\n            );\n        }\n        histogram!(\"frame.cpu.voxels.node_scan\").record(node_scan_started.elapsed());\n    }\n\n    pub unsafe fn draw(\n        &mut self,\n        device: &Device,\n        loader: &Loader,\n        common_ds: vk::DescriptorSet,\n        frame: &Frame,\n        cmd: vk::CommandBuffer,\n    ) {\n        unsafe {\n            let started = Instant::now();\n            if !self.draw.bind(\n                device,\n                loader,\n                self.surfaces.dimension(),\n                common_ds,\n                &frame.surface,\n                cmd,\n            ) {\n                return;\n            }\n            for &chunk in &frame.drawn {\n                self.draw.draw(device, cmd, &self.surfaces, chunk);\n            }\n            histogram!(\"frame.cpu.voxels.draw\").record(started.elapsed());\n        }\n    }\n\n    pub unsafe fn destroy(&mut self, device: &Device) {\n        unsafe {\n            self.surface_extraction.destroy(device);\n            self.extraction_scratch.destroy(device);\n            self.surfaces.destroy(device);\n            self.draw.destroy(device);\n        }\n    }\n}\n\npub struct Frame {\n    surface: surface::Frame,\n    /// Scratch slots completed in this frame\n    extracted: Vec<u32>,\n    drawn: Vec<u32>,\n}\n\nimpl Frame {\n    pub unsafe fn destroy(&mut self, device: &Device) {\n        unsafe {\n            self.surface.destroy(device);\n        }\n    }\n}\n\nimpl Frame {\n    pub fn new(gfx: &Base, ctx: &Voxels) -> Self {\n        Self {\n            surface: surface::Frame::new(gfx, ctx.states.capacity()),\n            extracted: Vec::new(),\n            drawn: Vec::new(),\n        }\n    }\n}\n\n/// Maximum number of concurrently drawn voxel chunks\nconst MAX_CHUNKS: u32 = 8192;\n\nstruct SurfaceState {\n    node: NodeId,\n    chunk: common::dodeca::Vertex,\n    refcount: u32,\n}\n"
  },
  {
    "path": "client/src/graphics/voxels/surface.rs",
    "content": "use ash::{Device, vk};\nuse lahar::{DedicatedImage, DedicatedMapping};\nuse vk_shader_macros::include_glsl;\n\nuse super::surface_extraction::DrawBuffer;\nuse crate::{Asset, Loader, graphics::Base};\nuse common::{defer, world::Material};\n\nconst VERT: &[u32] = include_glsl!(\"shaders/voxels.vert\");\nconst FRAG: &[u32] = include_glsl!(\"shaders/voxels.frag\");\n\npub struct Surface {\n    static_ds_layout: vk::DescriptorSetLayout,\n    pipeline_layout: vk::PipelineLayout,\n    pipeline: vk::Pipeline,\n    descriptor_pool: vk::DescriptorPool,\n    ds: vk::DescriptorSet,\n    colors: Asset<DedicatedImage>,\n    colors_view: vk::ImageView,\n}\n\nimpl Surface {\n    pub fn new(gfx: &Base, loader: &mut Loader, buffer: &DrawBuffer) -> Self {\n        let device = &*gfx.device;\n        unsafe {\n            // Construct the shader modules\n            let vert = device\n                .create_shader_module(&vk::ShaderModuleCreateInfo::default().code(VERT), None)\n                .unwrap();\n            // Note that these only need to live until the pipeline itself is constructed\n            let v_guard = defer(|| device.destroy_shader_module(vert, None));\n\n            let frag = device\n                .create_shader_module(&vk::ShaderModuleCreateInfo::default().code(FRAG), None)\n                .unwrap();\n            let f_guard = defer(|| device.destroy_shader_module(frag, None));\n\n            let static_ds_layout = device\n                .create_descriptor_set_layout(\n                    &vk::DescriptorSetLayoutCreateInfo::default().bindings(&[\n                        vk::DescriptorSetLayoutBinding {\n                            binding: 0,\n                            descriptor_type: vk::DescriptorType::STORAGE_BUFFER,\n                            descriptor_count: 1,\n                            stage_flags: vk::ShaderStageFlags::VERTEX,\n                            ..Default::default()\n                        },\n                        vk::DescriptorSetLayoutBinding {\n                            binding: 1,\n                            descriptor_type: vk::DescriptorType::COMBINED_IMAGE_SAMPLER,\n                            descriptor_count: 1,\n                            stage_flags: vk::ShaderStageFlags::FRAGMENT,\n                            p_immutable_samplers: &gfx.linear_sampler,\n                            ..Default::default()\n                        },\n                    ]),\n                    None,\n                )\n                .unwrap();\n\n            let descriptor_pool = device\n                .create_descriptor_pool(\n                    &vk::DescriptorPoolCreateInfo::default()\n                        .max_sets(1)\n                        .pool_sizes(&[\n                            vk::DescriptorPoolSize {\n                                ty: vk::DescriptorType::STORAGE_BUFFER,\n                                descriptor_count: 1,\n                            },\n                            vk::DescriptorPoolSize {\n                                ty: vk::DescriptorType::COMBINED_IMAGE_SAMPLER,\n                                descriptor_count: 1,\n                            },\n                        ]),\n                    None,\n                )\n                .unwrap();\n            let ds = device\n                .allocate_descriptor_sets(\n                    &vk::DescriptorSetAllocateInfo::default()\n                        .descriptor_pool(descriptor_pool)\n                        .set_layouts(&[static_ds_layout]),\n                )\n                .unwrap()[0];\n            device.update_descriptor_sets(\n                &[vk::WriteDescriptorSet::default()\n                    .dst_set(ds)\n                    .dst_binding(0)\n                    .descriptor_type(vk::DescriptorType::STORAGE_BUFFER)\n                    .buffer_info(&[vk::DescriptorBufferInfo {\n                        buffer: buffer.face_buffer(),\n                        offset: 0,\n                        range: vk::WHOLE_SIZE,\n                    }])],\n                &[],\n            );\n\n            // Define the outward-facing interface of the shaders, incl. uniforms, samplers, etc.\n            let pipeline_layout = device\n                .create_pipeline_layout(\n                    &vk::PipelineLayoutCreateInfo::default()\n                        .set_layouts(&[gfx.common_layout, static_ds_layout])\n                        .push_constant_ranges(&[vk::PushConstantRange {\n                            stage_flags: vk::ShaderStageFlags::VERTEX,\n                            offset: 0,\n                            size: 4,\n                        }]),\n                    None,\n                )\n                .unwrap();\n\n            let entry_point = cstr!(\"main\").as_ptr();\n            let mut pipelines = device\n                .create_graphics_pipelines(\n                    gfx.pipeline_cache,\n                    &[vk::GraphicsPipelineCreateInfo::default()\n                        .stages(&[\n                            vk::PipelineShaderStageCreateInfo {\n                                stage: vk::ShaderStageFlags::VERTEX,\n                                module: vert,\n                                p_name: entry_point,\n                                ..Default::default()\n                            },\n                            vk::PipelineShaderStageCreateInfo {\n                                stage: vk::ShaderStageFlags::FRAGMENT,\n                                module: frag,\n                                p_name: entry_point,\n                                ..Default::default()\n                            },\n                        ])\n                        .vertex_input_state(\n                            &vk::PipelineVertexInputStateCreateInfo::default()\n                                .vertex_binding_descriptions(&[vk::VertexInputBindingDescription {\n                                    binding: 0,\n                                    stride: TRANSFORM_SIZE as u32,\n                                    input_rate: vk::VertexInputRate::INSTANCE,\n                                }])\n                                .vertex_attribute_descriptions(&[\n                                    vk::VertexInputAttributeDescription {\n                                        location: 0,\n                                        binding: 0,\n                                        format: vk::Format::R32G32B32A32_SFLOAT,\n                                        offset: 0,\n                                    },\n                                    vk::VertexInputAttributeDescription {\n                                        location: 1,\n                                        binding: 0,\n                                        format: vk::Format::R32G32B32A32_SFLOAT,\n                                        offset: 16,\n                                    },\n                                    vk::VertexInputAttributeDescription {\n                                        location: 2,\n                                        binding: 0,\n                                        format: vk::Format::R32G32B32A32_SFLOAT,\n                                        offset: 32,\n                                    },\n                                    vk::VertexInputAttributeDescription {\n                                        location: 3,\n                                        binding: 0,\n                                        format: vk::Format::R32G32B32A32_SFLOAT,\n                                        offset: 48,\n                                    },\n                                ]),\n                        )\n                        .input_assembly_state(\n                            &vk::PipelineInputAssemblyStateCreateInfo::default()\n                                .topology(vk::PrimitiveTopology::TRIANGLE_LIST),\n                        )\n                        .viewport_state(\n                            &vk::PipelineViewportStateCreateInfo::default()\n                                .scissor_count(1)\n                                .viewport_count(1),\n                        )\n                        .rasterization_state(\n                            &vk::PipelineRasterizationStateCreateInfo::default()\n                                .cull_mode(vk::CullModeFlags::BACK)\n                                .front_face(vk::FrontFace::COUNTER_CLOCKWISE)\n                                .polygon_mode(vk::PolygonMode::FILL)\n                                .line_width(1.0),\n                        )\n                        .multisample_state(\n                            &vk::PipelineMultisampleStateCreateInfo::default()\n                                .rasterization_samples(vk::SampleCountFlags::TYPE_1),\n                        )\n                        .depth_stencil_state(\n                            &vk::PipelineDepthStencilStateCreateInfo::default()\n                                .depth_test_enable(true)\n                                .depth_write_enable(true)\n                                .depth_compare_op(vk::CompareOp::GREATER),\n                        )\n                        .color_blend_state(\n                            &vk::PipelineColorBlendStateCreateInfo::default().attachments(&[\n                                vk::PipelineColorBlendAttachmentState {\n                                    blend_enable: vk::TRUE,\n                                    src_color_blend_factor: vk::BlendFactor::ONE,\n                                    dst_color_blend_factor: vk::BlendFactor::ZERO,\n                                    color_blend_op: vk::BlendOp::ADD,\n                                    color_write_mask: vk::ColorComponentFlags::R\n                                        | vk::ColorComponentFlags::G\n                                        | vk::ColorComponentFlags::B,\n                                    ..Default::default()\n                                },\n                            ]),\n                        )\n                        .dynamic_state(\n                            &vk::PipelineDynamicStateCreateInfo::default().dynamic_states(&[\n                                vk::DynamicState::VIEWPORT,\n                                vk::DynamicState::SCISSOR,\n                            ]),\n                        )\n                        .layout(pipeline_layout)\n                        .render_pass(gfx.render_pass)\n                        .subpass(0)],\n                    None,\n                )\n                .unwrap()\n                .into_iter();\n\n            let pipeline = pipelines.next().unwrap();\n            gfx.set_name(pipeline, cstr!(\"voxels\"));\n\n            // Clean up the shaders explicitly, so the defer guards don't hold onto references we're\n            // moving into `Self` to be returned\n            v_guard.invoke();\n            f_guard.invoke();\n\n            let colors = loader.load(\n                \"voxel materials\",\n                crate::graphics::PngArray {\n                    path: \"materials\".into(),\n                    size: common::world::Material::COUNT - 1,\n                },\n            );\n\n            Self {\n                static_ds_layout,\n                pipeline_layout,\n                pipeline,\n                descriptor_pool,\n                ds,\n                colors,\n                colors_view: vk::ImageView::null(),\n            }\n        }\n    }\n\n    pub unsafe fn bind(\n        &mut self,\n        device: &Device,\n        loader: &Loader,\n        dimension: u32,\n        common_ds: vk::DescriptorSet,\n        frame: &Frame,\n        cmd: vk::CommandBuffer,\n    ) -> bool {\n        unsafe {\n            if self.colors_view == vk::ImageView::null() {\n                if let Some(colors) = loader.get(self.colors) {\n                    self.colors_view = device\n                        .create_image_view(\n                            &vk::ImageViewCreateInfo::default()\n                                .image(colors.handle)\n                                .view_type(vk::ImageViewType::TYPE_2D_ARRAY)\n                                .format(vk::Format::R8G8B8A8_SRGB)\n                                .subresource_range(vk::ImageSubresourceRange {\n                                    aspect_mask: vk::ImageAspectFlags::COLOR,\n                                    base_mip_level: 0,\n                                    level_count: 1,\n                                    base_array_layer: 0,\n                                    layer_count: (Material::COUNT - 1) as u32,\n                                }),\n                            None,\n                        )\n                        .unwrap();\n                    device.update_descriptor_sets(\n                        &[vk::WriteDescriptorSet::default()\n                            .dst_set(self.ds)\n                            .dst_binding(1)\n                            .descriptor_type(vk::DescriptorType::COMBINED_IMAGE_SAMPLER)\n                            .image_info(&[vk::DescriptorImageInfo {\n                                sampler: vk::Sampler::null(),\n                                image_view: self.colors_view,\n                                image_layout: vk::ImageLayout::SHADER_READ_ONLY_OPTIMAL,\n                            }])],\n                        &[],\n                    );\n                } else {\n                    return false;\n                }\n            }\n\n            device.cmd_bind_pipeline(cmd, vk::PipelineBindPoint::GRAPHICS, self.pipeline);\n            device.cmd_bind_descriptor_sets(\n                cmd,\n                vk::PipelineBindPoint::GRAPHICS,\n                self.pipeline_layout,\n                0,\n                &[common_ds, self.ds],\n                &[],\n            );\n            device.cmd_bind_vertex_buffers(cmd, 0, &[frame.transforms.buffer()], &[0]);\n\n            device.cmd_push_constants(\n                cmd,\n                self.pipeline_layout,\n                vk::ShaderStageFlags::VERTEX,\n                0,\n                &dimension.to_ne_bytes(),\n            );\n\n            true\n        }\n    }\n\n    pub unsafe fn draw(\n        &self,\n        device: &Device,\n        cmd: vk::CommandBuffer,\n        buffer: &DrawBuffer,\n        chunk: u32,\n    ) {\n        unsafe {\n            device.cmd_draw_indirect(\n                cmd,\n                buffer.indirect_buffer(),\n                buffer.indirect_offset(chunk),\n                1,\n                16,\n            );\n        }\n    }\n\n    pub unsafe fn destroy(&mut self, device: &Device) {\n        unsafe {\n            device.destroy_pipeline(self.pipeline, None);\n            device.destroy_pipeline_layout(self.pipeline_layout, None);\n            device.destroy_descriptor_set_layout(self.static_ds_layout, None);\n            device.destroy_descriptor_pool(self.descriptor_pool, None);\n            if self.colors_view != vk::ImageView::null() {\n                device.destroy_image_view(self.colors_view, None);\n            }\n        }\n    }\n}\n\npub struct Frame {\n    transforms: DedicatedMapping<[na::Matrix4<f32>]>,\n}\n\nimpl Frame {\n    pub fn new(gfx: &Base, count: u32) -> Self {\n        unsafe {\n            let transforms = DedicatedMapping::zeroed_array(\n                &gfx.device,\n                &gfx.memory_properties,\n                vk::BufferUsageFlags::VERTEX_BUFFER | vk::BufferUsageFlags::TRANSFER_DST,\n                count as usize * TRANSFORM_SIZE as usize,\n            );\n            gfx.set_name(transforms.buffer(), cstr!(\"voxel transforms\"));\n            Self { transforms }\n        }\n    }\n\n    pub fn transforms_mut(&mut self) -> &mut [na::Matrix4<f32>] {\n        &mut self.transforms\n    }\n}\n\nimpl Frame {\n    pub unsafe fn destroy(&mut self, device: &Device) {\n        unsafe {\n            self.transforms.destroy(device);\n        }\n    }\n}\n\n// 4x4 f32 matrix\npub const TRANSFORM_SIZE: vk::DeviceSize = 64;\n"
  },
  {
    "path": "client/src/graphics/voxels/surface_extraction.rs",
    "content": "use std::ffi::c_char;\nuse std::mem;\n\nuse ash::{Device, vk};\nuse lahar::{DedicatedBuffer, DedicatedMapping};\nuse vk_shader_macros::include_glsl;\n\nuse crate::graphics::{Base, VkDrawIndirectCommand, as_bytes};\nuse common::{defer, world::Material};\n\nconst EXTRACT: &[u32] = include_glsl!(\"shaders/surface-extraction/extract.comp\", target: vulkan1_1);\n\n/// GPU-accelerated surface extraction from voxel chunks\npub struct SurfaceExtraction {\n    params_layout: vk::DescriptorSetLayout,\n    ds_layout: vk::DescriptorSetLayout,\n    pipeline_layout: vk::PipelineLayout,\n    extract: vk::Pipeline,\n}\n\nimpl SurfaceExtraction {\n    pub fn new(gfx: &Base) -> Self {\n        let device = &*gfx.device;\n        unsafe {\n            let params_layout = device\n                .create_descriptor_set_layout(\n                    &vk::DescriptorSetLayoutCreateInfo::default().bindings(&[\n                        vk::DescriptorSetLayoutBinding {\n                            binding: 0,\n                            descriptor_type: vk::DescriptorType::UNIFORM_BUFFER,\n                            descriptor_count: 1,\n                            stage_flags: vk::ShaderStageFlags::COMPUTE,\n                            ..Default::default()\n                        },\n                    ]),\n                    None,\n                )\n                .unwrap();\n            let ds_layout = device\n                .create_descriptor_set_layout(\n                    &vk::DescriptorSetLayoutCreateInfo::default().bindings(&[\n                        vk::DescriptorSetLayoutBinding {\n                            binding: 0,\n                            descriptor_type: vk::DescriptorType::STORAGE_BUFFER,\n                            descriptor_count: 1,\n                            stage_flags: vk::ShaderStageFlags::COMPUTE,\n                            ..Default::default()\n                        },\n                        vk::DescriptorSetLayoutBinding {\n                            binding: 1,\n                            descriptor_type: vk::DescriptorType::STORAGE_BUFFER,\n                            descriptor_count: 1,\n                            stage_flags: vk::ShaderStageFlags::COMPUTE,\n                            ..Default::default()\n                        },\n                        vk::DescriptorSetLayoutBinding {\n                            binding: 2,\n                            descriptor_type: vk::DescriptorType::STORAGE_BUFFER,\n                            descriptor_count: 1,\n                            stage_flags: vk::ShaderStageFlags::COMPUTE,\n                            ..Default::default()\n                        },\n                        vk::DescriptorSetLayoutBinding {\n                            binding: 3,\n                            descriptor_type: vk::DescriptorType::STORAGE_BUFFER,\n                            descriptor_count: 1,\n                            stage_flags: vk::ShaderStageFlags::COMPUTE,\n                            ..Default::default()\n                        },\n                    ]),\n                    None,\n                )\n                .unwrap();\n            let pipeline_layout = device\n                .create_pipeline_layout(\n                    &vk::PipelineLayoutCreateInfo::default()\n                        .set_layouts(&[params_layout, ds_layout])\n                        .push_constant_ranges(&[vk::PushConstantRange {\n                            stage_flags: vk::ShaderStageFlags::COMPUTE,\n                            offset: 0,\n                            size: 4,\n                        }]),\n                    None,\n                )\n                .unwrap();\n\n            let extract = device\n                .create_shader_module(&vk::ShaderModuleCreateInfo::default().code(EXTRACT), None)\n                .unwrap();\n            let extract_guard = defer(|| device.destroy_shader_module(extract, None));\n\n            let specialization_map_entries = [\n                vk::SpecializationMapEntry {\n                    constant_id: 0,\n                    offset: 0,\n                    size: 4,\n                },\n                vk::SpecializationMapEntry {\n                    constant_id: 1,\n                    offset: 4,\n                    size: 4,\n                },\n                vk::SpecializationMapEntry {\n                    constant_id: 2,\n                    offset: 8,\n                    size: 4,\n                },\n            ];\n            let specialization = vk::SpecializationInfo::default()\n                .map_entries(&specialization_map_entries)\n                .data(as_bytes(&WORKGROUP_SIZE));\n\n            let p_name = c\"main\".as_ptr() as *const c_char;\n            let mut pipelines = device\n                .create_compute_pipelines(\n                    gfx.pipeline_cache,\n                    &[vk::ComputePipelineCreateInfo {\n                        stage: vk::PipelineShaderStageCreateInfo {\n                            stage: vk::ShaderStageFlags::COMPUTE,\n                            module: extract,\n                            p_name,\n                            p_specialization_info: &specialization,\n                            ..Default::default()\n                        },\n                        layout: pipeline_layout,\n                        ..Default::default()\n                    }],\n                    None,\n                )\n                .unwrap()\n                .into_iter();\n\n            // Free shader modules now that the actual pipelines are built\n            extract_guard.invoke();\n\n            let extract = pipelines.next().unwrap();\n            gfx.set_name(extract, cstr!(\"extract\"));\n\n            Self {\n                params_layout,\n                ds_layout,\n                pipeline_layout,\n                extract,\n            }\n        }\n    }\n\n    pub unsafe fn destroy(&mut self, device: &Device) {\n        unsafe {\n            device.destroy_descriptor_set_layout(self.params_layout, None);\n            device.destroy_descriptor_set_layout(self.ds_layout, None);\n            device.destroy_pipeline_layout(self.pipeline_layout, None);\n            device.destroy_pipeline(self.extract, None);\n        }\n    }\n}\n\n/// Scratch space for actually performing the extraction\npub struct ScratchBuffer {\n    dimension: u32,\n    params: DedicatedBuffer,\n    /// Size of a single entry in the voxel buffer\n    voxel_buffer_unit: vk::DeviceSize,\n    /// Size of a single entry in the state buffer\n    state_buffer_unit: vk::DeviceSize,\n    voxels_staging: DedicatedMapping<[Material]>,\n    voxels: DedicatedBuffer,\n    state: DedicatedBuffer,\n    descriptor_pool: vk::DescriptorPool,\n    params_ds: vk::DescriptorSet,\n    descriptor_sets: Vec<vk::DescriptorSet>,\n    free_slots: Vec<u32>,\n    concurrency: u32,\n}\n\nimpl ScratchBuffer {\n    pub fn new(gfx: &Base, ctx: &SurfaceExtraction, concurrency: u32, dimension: u32) -> Self {\n        let device = &*gfx.device;\n        // Padded by 2 on each dimension so each voxel of interest has a full neighborhood\n        let voxel_buffer_unit = round_up(\n            mem::size_of::<Material>() as vk::DeviceSize * (dimension as vk::DeviceSize + 2).pow(3),\n            // Pad at least to multiples of 4 so the shaders can safely read in 32 bit units\n            gfx.limits.min_storage_buffer_offset_alignment.max(4),\n        );\n        let voxels_size = concurrency as vk::DeviceSize * voxel_buffer_unit;\n\n        let state_buffer_unit = round_up(4, gfx.limits.min_storage_buffer_offset_alignment);\n        unsafe {\n            let params = DedicatedBuffer::new(\n                device,\n                &gfx.memory_properties,\n                &vk::BufferCreateInfo::default()\n                    .size(mem::size_of::<Params>() as vk::DeviceSize)\n                    .usage(\n                        vk::BufferUsageFlags::UNIFORM_BUFFER | vk::BufferUsageFlags::TRANSFER_DST,\n                    )\n                    .sharing_mode(vk::SharingMode::EXCLUSIVE),\n                vk::MemoryPropertyFlags::DEVICE_LOCAL,\n            );\n            gfx.set_name(params.handle, cstr!(\"surface extraction params\"));\n\n            let voxels_staging = DedicatedMapping::zeroed_array(\n                device,\n                &gfx.memory_properties,\n                vk::BufferUsageFlags::TRANSFER_SRC,\n                (voxels_size / mem::size_of::<Material>() as vk::DeviceSize) as usize,\n            );\n            gfx.set_name(voxels_staging.buffer(), cstr!(\"voxels staging\"));\n\n            let voxels = DedicatedBuffer::new(\n                device,\n                &gfx.memory_properties,\n                &vk::BufferCreateInfo::default()\n                    .size(voxels_size)\n                    .usage(\n                        vk::BufferUsageFlags::STORAGE_BUFFER | vk::BufferUsageFlags::TRANSFER_DST,\n                    )\n                    .sharing_mode(vk::SharingMode::EXCLUSIVE),\n                vk::MemoryPropertyFlags::DEVICE_LOCAL,\n            );\n            gfx.set_name(voxels.handle, cstr!(\"voxels\"));\n\n            let state = DedicatedBuffer::new(\n                device,\n                &gfx.memory_properties,\n                &vk::BufferCreateInfo::default()\n                    .size(state_buffer_unit * vk::DeviceSize::from(concurrency))\n                    .usage(\n                        vk::BufferUsageFlags::STORAGE_BUFFER | vk::BufferUsageFlags::TRANSFER_DST,\n                    )\n                    .sharing_mode(vk::SharingMode::EXCLUSIVE),\n                vk::MemoryPropertyFlags::DEVICE_LOCAL,\n            );\n            gfx.set_name(state.handle, cstr!(\"surface extraction state\"));\n\n            let descriptor_pool = device\n                .create_descriptor_pool(\n                    &vk::DescriptorPoolCreateInfo::default()\n                        .max_sets(concurrency + 1)\n                        .pool_sizes(&[\n                            vk::DescriptorPoolSize {\n                                ty: vk::DescriptorType::UNIFORM_BUFFER,\n                                descriptor_count: 1,\n                            },\n                            vk::DescriptorPoolSize {\n                                ty: vk::DescriptorType::STORAGE_BUFFER,\n                                descriptor_count: 4 * concurrency,\n                            },\n                        ]),\n                    None,\n                )\n                .unwrap();\n            let mut layouts = Vec::with_capacity(concurrency as usize + 1);\n            layouts.resize(concurrency as usize, ctx.ds_layout);\n            layouts.push(ctx.params_layout);\n            let mut descriptor_sets = device\n                .allocate_descriptor_sets(\n                    &vk::DescriptorSetAllocateInfo::default()\n                        .descriptor_pool(descriptor_pool)\n                        .set_layouts(&layouts),\n                )\n                .unwrap();\n\n            let params_ds = descriptor_sets.pop().unwrap();\n            device.update_descriptor_sets(\n                &[vk::WriteDescriptorSet::default()\n                    .dst_set(params_ds)\n                    .dst_binding(0)\n                    .descriptor_type(vk::DescriptorType::UNIFORM_BUFFER)\n                    .buffer_info(&[vk::DescriptorBufferInfo {\n                        buffer: params.handle,\n                        offset: 0,\n                        range: vk::WHOLE_SIZE,\n                    }])],\n                &[],\n            );\n\n            Self {\n                dimension,\n                params,\n                voxel_buffer_unit,\n                state_buffer_unit,\n                voxels_staging,\n                voxels,\n                state,\n                descriptor_pool,\n                params_ds,\n                descriptor_sets,\n                free_slots: (0..concurrency).collect(),\n                concurrency,\n            }\n        }\n    }\n\n    pub fn alloc(&mut self) -> Option<u32> {\n        self.free_slots.pop()\n    }\n\n    pub fn free(&mut self, index: u32) {\n        debug_assert!(\n            !self.free_slots.contains(&index),\n            \"double-free of surface extraction scratch slot\"\n        );\n        self.free_slots.push(index);\n    }\n\n    /// Includes a one-voxel margin around the entire volume\n    pub fn storage(&mut self, index: u32) -> &mut [Material] {\n        let start = index as usize * (self.voxel_buffer_unit as usize / mem::size_of::<Material>());\n        let length = (self.dimension + 2).pow(3) as usize;\n        &mut self.voxels_staging[start..start + length]\n    }\n\n    pub unsafe fn extract(\n        &mut self,\n        device: &Device,\n        ctx: &SurfaceExtraction,\n        indirect_buffer: vk::Buffer,\n        face_buffer: vk::Buffer,\n        cmd: vk::CommandBuffer,\n        tasks: &[ExtractTask],\n    ) {\n        unsafe {\n            // Prevent overlap with the last batch of work\n            device.cmd_pipeline_barrier(\n                cmd,\n                vk::PipelineStageFlags::COMPUTE_SHADER,\n                vk::PipelineStageFlags::TRANSFER,\n                Default::default(),\n                &[vk::MemoryBarrier {\n                    src_access_mask: vk::AccessFlags::SHADER_READ,\n                    dst_access_mask: vk::AccessFlags::TRANSFER_WRITE,\n                    ..Default::default()\n                }],\n                &[],\n                &[],\n            );\n            // HACKITY HACK: Queue submit synchronization validation thinks we're\n            // racing with the preceding chunk draws. Our logic to allocate unique\n            // ranges should be preventing this, so this may be a false positive.\n            // However, if that's true, why does the validation error only trigger a\n            // handful of times at startup? Perhaps we're freeing and reusing\n            // storage before the previous draw completes, and validation is somehow\n            // smart enough to notice?\n            device.cmd_pipeline_barrier(\n                cmd,\n                vk::PipelineStageFlags::VERTEX_SHADER,\n                vk::PipelineStageFlags::COMPUTE_SHADER,\n                Default::default(),\n                &[],\n                &[vk::BufferMemoryBarrier {\n                    buffer: face_buffer,\n                    src_access_mask: vk::AccessFlags::SHADER_READ,\n                    dst_access_mask: vk::AccessFlags::SHADER_WRITE,\n                    offset: 0,\n                    size: vk::WHOLE_SIZE,\n                    ..Default::default()\n                }],\n                &[],\n            );\n\n            // Prepare shared state\n            device.cmd_update_buffer(\n                cmd,\n                self.params.handle,\n                0,\n                as_bytes(&Params {\n                    dimension: self.dimension,\n                }),\n            );\n            device.cmd_fill_buffer(cmd, self.state.handle, 0, vk::WHOLE_SIZE, 0);\n\n            let voxel_count = (self.dimension + 2).pow(3) as usize;\n            let voxels_range =\n                voxel_count as vk::DeviceSize * mem::size_of::<Material>() as vk::DeviceSize;\n            let max_faces = 3 * (self.dimension.pow(3) + self.dimension.pow(2));\n            let dispatch = dispatch_sizes(self.dimension);\n            device.cmd_bind_descriptor_sets(\n                cmd,\n                vk::PipelineBindPoint::COMPUTE,\n                ctx.pipeline_layout,\n                0,\n                &[self.params_ds],\n                &[],\n            );\n\n            // Prepare each task\n            for task in tasks {\n                assert!(\n                    task.index < self.concurrency,\n                    \"index {} out of bounds for concurrency {}\",\n                    task.index,\n                    self.concurrency\n                );\n                let index = task.index as usize;\n\n                let voxels_offset = self.voxel_buffer_unit * index as vk::DeviceSize;\n\n                device.update_descriptor_sets(\n                    &[\n                        vk::WriteDescriptorSet::default()\n                            .dst_set(self.descriptor_sets[index])\n                            .dst_binding(0)\n                            .descriptor_type(vk::DescriptorType::STORAGE_BUFFER)\n                            .buffer_info(&[vk::DescriptorBufferInfo {\n                                buffer: self.voxels.handle,\n                                offset: voxels_offset,\n                                range: voxels_range,\n                            }]),\n                        vk::WriteDescriptorSet::default()\n                            .dst_set(self.descriptor_sets[index])\n                            .dst_binding(1)\n                            .descriptor_type(vk::DescriptorType::STORAGE_BUFFER)\n                            .buffer_info(&[vk::DescriptorBufferInfo {\n                                buffer: self.state.handle,\n                                offset: self.state_buffer_unit * vk::DeviceSize::from(task.index),\n                                range: 4,\n                            }]),\n                        vk::WriteDescriptorSet::default()\n                            .dst_set(self.descriptor_sets[index])\n                            .dst_binding(2)\n                            .descriptor_type(vk::DescriptorType::STORAGE_BUFFER)\n                            .buffer_info(&[vk::DescriptorBufferInfo {\n                                buffer: indirect_buffer,\n                                offset: task.indirect_offset,\n                                range: INDIRECT_SIZE,\n                            }]),\n                        vk::WriteDescriptorSet::default()\n                            .dst_set(self.descriptor_sets[index])\n                            .dst_binding(3)\n                            .descriptor_type(vk::DescriptorType::STORAGE_BUFFER)\n                            .buffer_info(&[vk::DescriptorBufferInfo {\n                                buffer: face_buffer,\n                                offset: task.face_offset,\n                                range: max_faces as vk::DeviceSize * FACE_SIZE,\n                            }]),\n                    ],\n                    &[],\n                );\n\n                device.cmd_copy_buffer(\n                    cmd,\n                    self.voxels_staging.buffer(),\n                    self.voxels.handle,\n                    &[vk::BufferCopy {\n                        src_offset: voxels_offset,\n                        dst_offset: voxels_offset,\n                        size: voxels_range,\n                    }],\n                );\n                device.cmd_update_buffer(\n                    cmd,\n                    indirect_buffer,\n                    task.indirect_offset,\n                    as_bytes(&VkDrawIndirectCommand {\n                        vertex_count: 0,\n                        instance_count: 1,\n                        first_vertex: (task.face_offset / FACE_SIZE) as u32 * 6,\n                        first_instance: task.draw_id,\n                    }),\n                )\n            }\n\n            device.cmd_pipeline_barrier(\n                cmd,\n                vk::PipelineStageFlags::TRANSFER,\n                vk::PipelineStageFlags::COMPUTE_SHADER,\n                Default::default(),\n                &[vk::MemoryBarrier {\n                    src_access_mask: vk::AccessFlags::TRANSFER_WRITE,\n                    dst_access_mask: vk::AccessFlags::SHADER_READ\n                        | vk::AccessFlags::SHADER_WRITE\n                        | vk::AccessFlags::UNIFORM_READ,\n                    ..Default::default()\n                }],\n                &[],\n                &[],\n            );\n\n            // Write faces to memory\n            device.cmd_bind_pipeline(cmd, vk::PipelineBindPoint::COMPUTE, ctx.extract);\n            for task in tasks {\n                device.cmd_push_constants(\n                    cmd,\n                    ctx.pipeline_layout,\n                    vk::ShaderStageFlags::COMPUTE,\n                    0,\n                    &u32::from(task.reverse_winding).to_ne_bytes(),\n                );\n                device.cmd_bind_descriptor_sets(\n                    cmd,\n                    vk::PipelineBindPoint::COMPUTE,\n                    ctx.pipeline_layout,\n                    1,\n                    &[self.descriptor_sets[task.index as usize]],\n                    &[],\n                );\n                device.cmd_dispatch(cmd, dispatch.x, dispatch.y, dispatch.z);\n            }\n\n            device.cmd_pipeline_barrier(\n                cmd,\n                vk::PipelineStageFlags::COMPUTE_SHADER,\n                vk::PipelineStageFlags::VERTEX_SHADER | vk::PipelineStageFlags::DRAW_INDIRECT,\n                Default::default(),\n                &[vk::MemoryBarrier {\n                    src_access_mask: vk::AccessFlags::SHADER_WRITE,\n                    dst_access_mask: vk::AccessFlags::SHADER_READ\n                        | vk::AccessFlags::INDIRECT_COMMAND_READ,\n                    ..Default::default()\n                }],\n                &[],\n                &[],\n            );\n        }\n    }\n\n    pub unsafe fn destroy(&mut self, device: &Device) {\n        unsafe {\n            device.destroy_descriptor_pool(self.descriptor_pool, None);\n            self.params.destroy(device);\n            self.voxels_staging.destroy(device);\n            self.voxels.destroy(device);\n            self.state.destroy(device);\n        }\n    }\n}\n\n/// Specifies a single chunk's worth of surface extraction work\n#[derive(Debug, Copy, Clone)]\npub struct ExtractTask {\n    pub indirect_offset: vk::DeviceSize,\n    pub face_offset: vk::DeviceSize,\n    pub index: u32,\n    pub draw_id: u32,\n    pub reverse_winding: bool,\n}\n\nfn dispatch_sizes(dimension: u32) -> na::Vector3<u32> {\n    fn divide_rounding_up(x: u32, y: u32) -> u32 {\n        debug_assert!(x > 0 && y > 0);\n        (x - 1) / y + 1\n    }\n\n    // We add 1 to each dimension because we only look at negative-facing faces of the target voxel\n    na::Vector3::new(\n        // Extending the X axis accounts for 3 possible faces per voxel\n        divide_rounding_up((dimension + 1) * 3, WORKGROUP_SIZE[0]),\n        divide_rounding_up(dimension + 1, WORKGROUP_SIZE[1]),\n        divide_rounding_up(dimension + 1, WORKGROUP_SIZE[2]),\n    )\n}\n\n#[repr(C)]\n#[derive(Copy, Clone)]\nstruct Params {\n    dimension: u32,\n}\n\n/// Manages storage for ready-to-render voxels\npub struct DrawBuffer {\n    indirect: DedicatedBuffer,\n    faces: DedicatedBuffer,\n    dimension: u32,\n    face_buffer_unit: vk::DeviceSize,\n    count: u32,\n}\n\nimpl DrawBuffer {\n    /// Allocate a buffer suitable for rendering at most `count` chunks having `dimension` voxels\n    /// along each edge\n    pub fn new(gfx: &Base, count: u32, dimension: u32) -> Self {\n        let device = &*gfx.device;\n\n        let max_faces = 3 * (dimension.pow(3) + dimension.pow(2));\n        let face_buffer_unit = round_up(\n            max_faces as vk::DeviceSize * FACE_SIZE,\n            gfx.limits.min_storage_buffer_offset_alignment,\n        );\n        let face_buffer_size = count as vk::DeviceSize * face_buffer_unit;\n\n        unsafe {\n            let indirect = DedicatedBuffer::new(\n                device,\n                &gfx.memory_properties,\n                &vk::BufferCreateInfo::default()\n                    .size(count as vk::DeviceSize * INDIRECT_SIZE)\n                    .usage(\n                        vk::BufferUsageFlags::STORAGE_BUFFER\n                            | vk::BufferUsageFlags::INDIRECT_BUFFER\n                            | vk::BufferUsageFlags::TRANSFER_DST,\n                    )\n                    .sharing_mode(vk::SharingMode::EXCLUSIVE),\n                vk::MemoryPropertyFlags::DEVICE_LOCAL,\n            );\n            gfx.set_name(indirect.handle, cstr!(\"indirect\"));\n\n            let faces = DedicatedBuffer::new(\n                device,\n                &gfx.memory_properties,\n                &vk::BufferCreateInfo::default()\n                    .size(face_buffer_size)\n                    .usage(vk::BufferUsageFlags::STORAGE_BUFFER)\n                    .sharing_mode(vk::SharingMode::EXCLUSIVE),\n                vk::MemoryPropertyFlags::DEVICE_LOCAL,\n            );\n            gfx.set_name(faces.handle, cstr!(\"faces\"));\n\n            Self {\n                indirect,\n                faces,\n                dimension,\n                face_buffer_unit,\n                count,\n            }\n        }\n    }\n\n    /// Buffer containing face data\n    pub fn face_buffer(&self) -> vk::Buffer {\n        self.faces.handle\n    }\n\n    /// Buffer containing face counts for use with cmd_draw_indirect\n    pub fn indirect_buffer(&self) -> vk::Buffer {\n        self.indirect.handle\n    }\n\n    /// The offset into the face buffer at which a chunk's face data can be found\n    pub fn face_offset(&self, chunk: u32) -> vk::DeviceSize {\n        assert!(chunk < self.count);\n        vk::DeviceSize::from(chunk) * self.face_buffer_unit\n    }\n\n    /// The offset into the indirect buffer at which a chunk's face data can be found\n    pub fn indirect_offset(&self, chunk: u32) -> vk::DeviceSize {\n        assert!(chunk < self.count);\n        vk::DeviceSize::from(chunk) * INDIRECT_SIZE\n    }\n\n    /// Number of voxels along a chunk edge\n    pub fn dimension(&self) -> u32 {\n        self.dimension\n    }\n\n    pub unsafe fn destroy(&mut self, device: &Device) {\n        unsafe {\n            self.indirect.destroy(device);\n            self.faces.destroy(device);\n        }\n    }\n}\n\n// Size of the VkDrawIndirectCommand struct\nconst INDIRECT_SIZE: vk::DeviceSize = 16;\n\nconst FACE_SIZE: vk::DeviceSize = 8;\n\nconst WORKGROUP_SIZE: [u32; 3] = [4, 4, 4];\n\nfn round_up(value: vk::DeviceSize, alignment: vk::DeviceSize) -> vk::DeviceSize {\n    value.div_ceil(alignment) * alignment\n}\n"
  },
  {
    "path": "client/src/graphics/voxels/tests.rs",
    "content": "use std::{mem, sync::Arc};\n\nuse ash::vk;\nuse lahar::DedicatedMapping;\nuse renderdoc::{RenderDoc, V110};\n\nuse super::{SurfaceExtraction, surface_extraction};\nuse crate::graphics::{Base, VkDrawIndirectCommand};\nuse common::world::Material;\n\nstruct SurfaceExtractionTest {\n    gfx: Arc<Base>,\n    extract: SurfaceExtraction,\n    scratch: surface_extraction::ScratchBuffer,\n    indirect: DedicatedMapping<VkDrawIndirectCommand>,\n    surfaces: DedicatedMapping<[Surface]>,\n    cmd_pool: vk::CommandPool,\n    cmd: vk::CommandBuffer,\n    rd: Option<RenderDoc<V110>>,\n}\n\nimpl SurfaceExtractionTest {\n    pub fn new() -> Self {\n        let gfx = Arc::new(Base::headless());\n        let extract = SurfaceExtraction::new(&gfx);\n        let scratch = surface_extraction::ScratchBuffer::new(&gfx, &extract, 1, DIMENSION as u32);\n\n        let device = &*gfx.device;\n\n        unsafe {\n            let indirect = DedicatedMapping::<VkDrawIndirectCommand>::zeroed(\n                device,\n                &gfx.memory_properties,\n                vk::BufferUsageFlags::STORAGE_BUFFER | vk::BufferUsageFlags::TRANSFER_DST,\n            );\n\n            let surfaces = DedicatedMapping::<[Surface]>::zeroed_array(\n                device,\n                &gfx.memory_properties,\n                vk::BufferUsageFlags::STORAGE_BUFFER,\n                3 * (DIMENSION.pow(3) + DIMENSION.pow(2)),\n            );\n\n            let cmd_pool = device\n                .create_command_pool(\n                    &vk::CommandPoolCreateInfo::default()\n                        .queue_family_index(gfx.queue_family)\n                        .flags(vk::CommandPoolCreateFlags::RESET_COMMAND_BUFFER),\n                    None,\n                )\n                .unwrap();\n\n            let cmd = device\n                .allocate_command_buffers(\n                    &vk::CommandBufferAllocateInfo::default()\n                        .command_pool(cmd_pool)\n                        .command_buffer_count(1),\n                )\n                .unwrap()[0];\n\n            Self {\n                gfx,\n                extract,\n                scratch,\n                indirect,\n                surfaces,\n                cmd_pool,\n                cmd,\n                rd: RenderDoc::new().ok(),\n            }\n        }\n    }\n\n    fn run(&mut self) {\n        let device = &*self.gfx.device;\n\n        if let Some(ref mut rd) = self.rd {\n            rd.start_frame_capture(std::ptr::null(), std::ptr::null());\n        }\n\n        unsafe {\n            device\n                .begin_command_buffer(\n                    self.cmd,\n                    &vk::CommandBufferBeginInfo::default()\n                        .flags(vk::CommandBufferUsageFlags::ONE_TIME_SUBMIT),\n                )\n                .unwrap();\n\n            self.scratch.extract(\n                device,\n                &self.extract,\n                self.indirect.buffer(),\n                self.surfaces.buffer(),\n                self.cmd,\n                &[surface_extraction::ExtractTask {\n                    indirect_offset: 0,\n                    face_offset: 0,\n                    index: 0,\n                    draw_id: 0,\n                    reverse_winding: false,\n                }],\n            );\n            device.end_command_buffer(self.cmd).unwrap();\n\n            device\n                .queue_submit(\n                    self.gfx.queue,\n                    &[vk::SubmitInfo::default().command_buffers(&[self.cmd])],\n                    vk::Fence::null(),\n                )\n                .unwrap();\n            device.device_wait_idle().unwrap();\n        }\n\n        if let Some(ref mut rd) = self.rd {\n            rd.end_frame_capture(std::ptr::null(), std::ptr::null());\n        }\n    }\n}\n\nimpl Drop for SurfaceExtractionTest {\n    fn drop(&mut self) {\n        let device = &*self.gfx.device;\n        unsafe {\n            self.extract.destroy(device);\n            self.scratch.destroy(device);\n            self.indirect.destroy(device);\n            self.surfaces.destroy(device);\n            device.destroy_command_pool(self.cmd_pool, None);\n        }\n    }\n}\n\nconst DIMENSION: usize = 2;\n\n#[repr(C)]\n#[derive(Debug, Eq, PartialEq)]\nstruct Surface {\n    x: u8,\n    y: u8,\n    z: u8,\n    axis: u8,\n    mat: Material,\n    _padding: u8,\n    occlusion: u8,\n}\n\n#[test]\n#[ignore]\nfn surface_extraction() {\n    assert_eq!(mem::size_of::<Surface>(), 8);\n\n    let _guard = common::tracing_guard();\n    let mut test = SurfaceExtractionTest::new();\n\n    for x in test.scratch.storage(0) {\n        *x = Material::Void;\n    }\n\n    test.run();\n\n    assert_eq!(\n        test.indirect.vertex_count, 0,\n        \"empty chunks have no surfaces\"\n    );\n\n    for x in test.scratch.storage(0) {\n        *x = Material::Dirt;\n    }\n\n    test.run();\n\n    assert_eq!(\n        test.indirect.vertex_count, 0,\n        \"solid chunks have no surfaces\"\n    );\n\n    let storage = test.scratch.storage(0);\n    for x in &mut *storage {\n        *x = Material::Void;\n    }\n    for z in 0..((DIMENSION + 2) / 2) {\n        for y in 0..(DIMENSION + 2) {\n            for x in 0..(DIMENSION + 2) {\n                storage[x + y * (DIMENSION + 2) + z * (DIMENSION + 2).pow(2)] = Material::Dirt;\n            }\n        }\n    }\n\n    test.run();\n\n    assert_eq!(\n        test.indirect.vertex_count,\n        6 * DIMENSION.pow(2) as u32,\n        \"half-solid chunks have n^2 surfaces\"\n    );\n    let surfaces = &test.surfaces[..DIMENSION.pow(2)];\n    for expected in &[\n        Surface {\n            x: 0,\n            y: 0,\n            z: 1,\n            axis: 5,\n            mat: Material::Dirt,\n            _padding: 0,\n            occlusion: 0xFF,\n        },\n        Surface {\n            x: 1,\n            y: 0,\n            z: 1,\n            axis: 5,\n            mat: Material::Dirt,\n            _padding: 0,\n            occlusion: 0xFF,\n        },\n        Surface {\n            x: 0,\n            y: 1,\n            z: 1,\n            axis: 5,\n            mat: Material::Dirt,\n            _padding: 0,\n            occlusion: 0xFF,\n        },\n        Surface {\n            x: 1,\n            y: 1,\n            z: 1,\n            axis: 5,\n            mat: Material::Dirt,\n            _padding: 0,\n            occlusion: 0xFF,\n        },\n    ] {\n        assert!(surfaces.contains(expected));\n    }\n}\n"
  },
  {
    "path": "client/src/graphics/window.rs",
    "content": "use std::sync::Arc;\nuse std::time::Instant;\nuse std::{f32, os::raw::c_char};\n\nuse ash::{khr, vk};\nuse lahar::DedicatedImage;\nuse raw_window_handle::{HasDisplayHandle, HasWindowHandle};\nuse tracing::{error, info};\nuse winit::event::KeyEvent;\nuse winit::event_loop::ActiveEventLoop;\nuse winit::keyboard::{KeyCode, PhysicalKey};\nuse winit::{\n    dpi::PhysicalSize,\n    event::{DeviceEvent, ElementState, MouseButton, WindowEvent},\n    window::{CursorGrabMode, Window as WinitWindow},\n};\n\nuse super::gui::GuiState;\nuse super::{Base, Core, Draw, Frustum};\nuse crate::{Config, Sim};\n\n/// OS window\npub struct EarlyWindow {\n    window: WinitWindow,\n    required_extensions: &'static [*const c_char],\n}\n\nimpl EarlyWindow {\n    pub fn new(event_loop: &ActiveEventLoop) -> Self {\n        let mut attrs = WinitWindow::default_attributes();\n        attrs.title = \"hypermine\".into();\n        let window = event_loop.create_window(attrs).unwrap();\n        Self {\n            window,\n            required_extensions: ash_window::enumerate_required_extensions(\n                event_loop.display_handle().unwrap().as_raw(),\n            )\n            .expect(\"unsupported platform\"),\n        }\n    }\n\n    /// Identify the Vulkan extension needed to render to this window\n    pub fn required_extensions(&self) -> &'static [*const c_char] {\n        self.required_extensions\n    }\n}\n\n/// OS window + rendering handles\npub struct Window {\n    _core: Arc<Core>,\n    pub window: WinitWindow,\n    config: Arc<Config>,\n    surface_fn: khr::surface::Instance,\n    surface: vk::SurfaceKHR,\n    swapchain: Option<SwapchainMgr>,\n    swapchain_needs_update: bool,\n    draw: Option<Draw>,\n    sim: Option<Sim>,\n    gui_state: GuiState,\n    yak: yakui::Yakui,\n    net: server::Handle,\n    input: InputState,\n    last_frame: Option<Instant>,\n}\n\nimpl Window {\n    /// Finish constructing a window\n    pub fn new(\n        early: EarlyWindow,\n        core: Arc<Core>,\n        config: Arc<Config>,\n        net: server::Handle,\n    ) -> Self {\n        let surface = unsafe {\n            ash_window::create_surface(\n                &core.entry,\n                &core.instance,\n                early.window.display_handle().unwrap().as_raw(),\n                early.window.window_handle().unwrap().as_raw(),\n                None,\n            )\n            .unwrap()\n        };\n        let surface_fn = khr::surface::Instance::new(&core.entry, &core.instance);\n\n        Self {\n            _core: core,\n            window: early.window,\n            config,\n            surface,\n            surface_fn,\n            swapchain: None,\n            swapchain_needs_update: false,\n            draw: None,\n            sim: None,\n            gui_state: GuiState::new(),\n            yak: yakui::Yakui::new(),\n            net,\n            input: InputState::default(),\n            last_frame: None,\n        }\n    }\n\n    /// Determine whether this window can be rendered to from a particular device and queue family\n    pub fn supports(&self, physical: vk::PhysicalDevice, queue_family_index: u32) -> bool {\n        unsafe {\n            self.surface_fn\n                .get_physical_device_surface_support(physical, queue_family_index, self.surface)\n                .unwrap()\n        }\n    }\n\n    pub fn init_rendering(&mut self, gfx: Arc<Base>) {\n        // Allocate the presentable images we'll be rendering to\n        self.swapchain = Some(SwapchainMgr::new(\n            self,\n            gfx.clone(),\n            self.window.inner_size(),\n        ));\n        // Construct the core rendering object\n        self.draw = Some(Draw::new(gfx, self.config.clone()));\n    }\n\n    pub fn handle_device_event(&mut self, event: DeviceEvent) {\n        match event {\n            DeviceEvent::MouseMotion { delta } if self.input.mouse_captured => {\n                if let Some(sim) = self.sim.as_mut() {\n                    const SENSITIVITY: f32 = 2e-3;\n                    sim.look(\n                        -delta.0 as f32 * SENSITIVITY,\n                        -delta.1 as f32 * SENSITIVITY,\n                        0.0,\n                    );\n                }\n            }\n            _ => {}\n        }\n    }\n\n    pub fn handle_event(&mut self, event: WindowEvent, event_loop: &ActiveEventLoop) {\n        match event {\n            WindowEvent::RedrawRequested => {\n                while let Ok(msg) = self.net.incoming.try_recv() {\n                    self.handle_net(msg);\n                }\n\n                if let Some(sim) = self.sim.as_mut() {\n                    let this_frame = Instant::now();\n                    let dt = this_frame - self.last_frame.unwrap_or(this_frame);\n                    sim.set_movement_input(self.input.movement());\n                    sim.set_jump_held(self.input.jump);\n\n                    sim.look(0.0, 0.0, 2.0 * self.input.roll() * dt.as_secs_f32());\n\n                    sim.step(dt, &mut self.net);\n                    self.last_frame = Some(this_frame);\n                }\n\n                self.draw();\n            }\n            WindowEvent::Resized(_) => {\n                // Some environments may not emit the vulkan signals that recommend or\n                // require surface reconstruction, so we need to check for messages from the\n                // windowing system too. We defer actually performing the update until\n                // drawing to avoid doing unnecessary work between frames.\n                self.swapchain_needs_update = true;\n            }\n            WindowEvent::CloseRequested => {\n                info!(\"exiting due to closed window\");\n                event_loop.exit();\n            }\n            WindowEvent::MouseInput {\n                button: MouseButton::Left,\n                state: ElementState::Pressed,\n                ..\n            } => {\n                if self.input.mouse_captured\n                    && let Some(sim) = self.sim.as_mut()\n                {\n                    sim.set_break_block_pressed_true();\n                }\n                let _ = self\n                    .window\n                    .set_cursor_grab(CursorGrabMode::Confined)\n                    .or_else(|_e| self.window.set_cursor_grab(CursorGrabMode::Locked));\n                self.window.set_cursor_visible(false);\n                self.input.mouse_captured = true;\n            }\n            WindowEvent::MouseInput {\n                button: MouseButton::Right,\n                state: ElementState::Pressed,\n                ..\n            } => {\n                if self.input.mouse_captured\n                    && let Some(sim) = self.sim.as_mut()\n                {\n                    sim.set_place_block_pressed_true();\n                }\n            }\n            WindowEvent::KeyboardInput {\n                event:\n                    KeyEvent {\n                        state,\n                        physical_key: PhysicalKey::Code(key),\n                        ..\n                    },\n                ..\n            } => match key {\n                KeyCode::KeyW => {\n                    self.input.forward = state == ElementState::Pressed;\n                }\n                KeyCode::KeyA => {\n                    self.input.left = state == ElementState::Pressed;\n                }\n                KeyCode::KeyS => {\n                    self.input.back = state == ElementState::Pressed;\n                }\n                KeyCode::KeyD => {\n                    self.input.right = state == ElementState::Pressed;\n                }\n                KeyCode::KeyQ => {\n                    self.input.anticlockwise = state == ElementState::Pressed;\n                }\n                KeyCode::KeyE => {\n                    self.input.clockwise = state == ElementState::Pressed;\n                }\n                KeyCode::KeyR => {\n                    self.input.up = state == ElementState::Pressed;\n                }\n                KeyCode::KeyF => {\n                    self.input.down = state == ElementState::Pressed;\n                }\n                KeyCode::Space => {\n                    if let Some(sim) = self.sim.as_mut() {\n                        if !self.input.jump && state == ElementState::Pressed {\n                            sim.set_jump_pressed_true();\n                        }\n                        self.input.jump = state == ElementState::Pressed;\n                    }\n                }\n                KeyCode::KeyV if state == ElementState::Pressed => {\n                    if let Some(sim) = self.sim.as_mut() {\n                        sim.toggle_no_clip();\n                    }\n                }\n                KeyCode::F1 if state == ElementState::Pressed => {\n                    self.gui_state.toggle_gui();\n                }\n                KeyCode::Escape => {\n                    let _ = self.window.set_cursor_grab(CursorGrabMode::None);\n                    self.window.set_cursor_visible(true);\n                    self.input.mouse_captured = false;\n                }\n                KeyCode::Minus => {\n                    if state == ElementState::Pressed\n                        && let Some(sim) = self.sim.as_mut()\n                    {\n                        sim.prev_material();\n                    }\n                }\n                KeyCode::Equal => {\n                    if state == ElementState::Pressed\n                        && let Some(sim) = self.sim.as_mut()\n                    {\n                        sim.next_material();\n                    }\n                }\n                KeyCode::KeyG => {\n                    if let Some(sim) = self.sim.as_mut() {\n                        sim.pick_material();\n                    }\n                }\n                _ => {\n                    if let Some(material_idx) = number_key_to_index(key)\n                        && state == ElementState::Pressed\n                        && let Some(sim) = self.sim.as_mut()\n                    {\n                        sim.select_material(material_idx);\n                    }\n                }\n            },\n            WindowEvent::Focused(focused) => {\n                if !focused {\n                    let _ = self.window.set_cursor_grab(CursorGrabMode::None);\n                    self.window.set_cursor_visible(true);\n                    self.input.mouse_captured = false;\n                }\n            }\n            _ => {}\n        }\n    }\n\n    fn handle_net(&mut self, msg: server::Message) {\n        match msg {\n            server::Message::ConnectionLost(e) => {\n                error!(\"connection lost: {}\", e);\n            }\n            server::Message::Hello(msg) => {\n                let sim = Sim::new(\n                    msg.sim_config,\n                    self.config.chunk_load_parallelism as usize,\n                    msg.character,\n                );\n                if let Some(draw) = self.draw.as_mut() {\n                    draw.configure(sim.cfg());\n                }\n                self.sim = Some(sim);\n            }\n            msg => {\n                if let Some(sim) = self.sim.as_mut() {\n                    sim.handle_net(msg);\n                } else {\n                    error!(\"Received game data before ServerHello\");\n                }\n            }\n        }\n    }\n\n    /// Draw a new frame\n    fn draw(&mut self) {\n        let swapchain = self.swapchain.as_mut().unwrap();\n        let draw = self.draw.as_mut().unwrap();\n        unsafe {\n            // Wait for a frame's worth of rendering resources to become available\n            draw.wait();\n            // Get the index of the swapchain image we'll render to\n            let frame_id = loop {\n                // Check whether the window has been resized or similar\n                if self.swapchain_needs_update {\n                    // Wait for all in-flight frames to complete so we don't have a use-after-free\n                    draw.wait_idle();\n                    // Recreate the swapchain at a new size (or whatever)\n                    swapchain.update(&self.surface_fn, self.surface, self.window.inner_size());\n                    self.swapchain_needs_update = false;\n                }\n                match swapchain.acquire_next_image(draw.image_acquired()) {\n                    Ok((idx, suboptimal)) => {\n                        self.swapchain_needs_update = suboptimal;\n                        break idx;\n                    }\n                    Err(vk::Result::ERROR_OUT_OF_DATE_KHR) => {\n                        self.swapchain_needs_update = true;\n                    }\n                    Err(e) => {\n                        panic!(\"acquire_next_image: {e}\");\n                    }\n                }\n            };\n            let extent = swapchain.state.extent;\n            let aspect_ratio = extent.width as f32 / extent.height as f32;\n            let frame = &swapchain.state.frames[frame_id as usize];\n            let frustum = Frustum::from_vfov(f32::consts::FRAC_PI_4 * 1.2, aspect_ratio);\n            // Render the GUI\n            self.yak\n                .set_surface_size([extent.width as f32, extent.height as f32].into());\n            self.yak\n                .set_unscaled_viewport(yakui::geometry::Rect::from_pos_size(\n                    Default::default(),\n                    [extent.width as f32, extent.height as f32].into(),\n                ));\n            self.yak.start();\n            if let Some(sim) = self.sim.as_ref() {\n                self.gui_state.run(sim);\n            }\n            self.yak.finish();\n            // Render the frame\n            draw.draw(\n                self.sim.as_mut(),\n                self.yak.paint(),\n                frame.buffer,\n                frame.depth_view,\n                extent,\n                frame.present,\n                &frustum,\n            );\n            // Submit the frame to be presented on the window\n            match swapchain.queue_present(frame_id) {\n                Ok(false) => {}\n                Ok(true) | Err(vk::Result::ERROR_OUT_OF_DATE_KHR) => {\n                    self.swapchain_needs_update = true;\n                }\n                Err(e) => panic!(\"queue_present: {e}\"),\n            };\n        }\n    }\n}\n\nfn number_key_to_index(key: KeyCode) -> Option<usize> {\n    match key {\n        KeyCode::Digit1 => Some(0),\n        KeyCode::Digit2 => Some(1),\n        KeyCode::Digit3 => Some(2),\n        KeyCode::Digit4 => Some(3),\n        KeyCode::Digit5 => Some(4),\n        KeyCode::Digit6 => Some(5),\n        KeyCode::Digit7 => Some(6),\n        KeyCode::Digit8 => Some(7),\n        KeyCode::Digit9 => Some(8),\n        KeyCode::Digit0 => Some(9),\n        _ => None,\n    }\n}\n\nimpl Drop for Window {\n    fn drop(&mut self) {\n        self.draw.take();\n        self.swapchain.take();\n        unsafe {\n            self.surface_fn.destroy_surface(self.surface, None);\n        }\n    }\n}\n\nstruct SwapchainMgr {\n    state: SwapchainState,\n    format: vk::SurfaceFormatKHR,\n}\n\nimpl SwapchainMgr {\n    /// Construct a swapchain manager for a certain window\n    fn new(window: &Window, gfx: Arc<Base>, fallback_size: PhysicalSize<u32>) -> Self {\n        let device = &*gfx.device;\n        let swapchain_fn = khr::swapchain::Device::new(&gfx.core.instance, device);\n        let surface_formats = unsafe {\n            window\n                .surface_fn\n                .get_physical_device_surface_formats(gfx.physical, window.surface)\n                .unwrap()\n        };\n        let desired_format = vk::SurfaceFormatKHR {\n            format: super::base::COLOR_FORMAT,\n            color_space: vk::ColorSpaceKHR::SRGB_NONLINEAR,\n        };\n\n        let desirable_format = |x: &vk::SurfaceFormatKHR| -> bool {\n            x.format == desired_format.format && x.color_space == desired_format.color_space\n        };\n\n        if (surface_formats.len() != 1\n            || (surface_formats[0].format != vk::Format::UNDEFINED\n                || surface_formats[0].color_space != desired_format.color_space))\n            && !surface_formats.iter().any(desirable_format)\n        {\n            panic!(\"no suitable surface format: {surface_formats:?}\");\n        }\n\n        Self {\n            state: unsafe {\n                SwapchainState::new(\n                    &window.surface_fn,\n                    swapchain_fn,\n                    gfx,\n                    window.surface,\n                    desired_format,\n                    vk::SwapchainKHR::null(),\n                    fallback_size,\n                )\n            },\n            format: desired_format,\n        }\n    }\n\n    /// Recreate the swapchain based on the window's current capabilities\n    ///\n    /// # Safety\n    /// - There must be no operations scheduled that access the current swapchain\n    unsafe fn update(\n        &mut self,\n        surface_fn: &khr::surface::Instance,\n        surface: vk::SurfaceKHR,\n        fallback_size: PhysicalSize<u32>,\n    ) {\n        unsafe {\n            self.state = SwapchainState::new(\n                surface_fn,\n                self.state.swapchain_fn.clone(),\n                self.state.gfx.clone(),\n                surface,\n                self.format,\n                self.state.handle,\n                fallback_size,\n            );\n        }\n    }\n\n    /// Get the index of the next frame to use\n    unsafe fn acquire_next_image(&self, signal: vk::Semaphore) -> Result<(u32, bool), vk::Result> {\n        unsafe {\n            self.state.swapchain_fn.acquire_next_image(\n                self.state.handle,\n                u64::MAX,\n                signal,\n                vk::Fence::null(),\n            )\n        }\n    }\n\n    /// Present a frame on the window\n    unsafe fn queue_present(&self, index: u32) -> Result<bool, vk::Result> {\n        unsafe {\n            self.state.swapchain_fn.queue_present(\n                self.state.gfx.queue,\n                &vk::PresentInfoKHR::default()\n                    .wait_semaphores(&[self.state.frames[index as usize].present])\n                    .swapchains(&[self.state.handle])\n                    .image_indices(&[index]),\n            )\n        }\n    }\n}\n\n/// Data that's replaced when the swapchain is updated\nstruct SwapchainState {\n    gfx: Arc<Base>,\n    swapchain_fn: khr::swapchain::Device,\n    extent: vk::Extent2D,\n    handle: vk::SwapchainKHR,\n    frames: Vec<Frame>,\n}\n\nimpl SwapchainState {\n    unsafe fn new(\n        surface_fn: &khr::surface::Instance,\n        swapchain_fn: khr::swapchain::Device,\n        gfx: Arc<Base>,\n        surface: vk::SurfaceKHR,\n        format: vk::SurfaceFormatKHR,\n        old: vk::SwapchainKHR,\n        fallback_size: PhysicalSize<u32>,\n    ) -> Self {\n        unsafe {\n            let device = &*gfx.device;\n\n            let surface_capabilities = surface_fn\n                .get_physical_device_surface_capabilities(gfx.physical, surface)\n                .unwrap();\n            let extent = match surface_capabilities.current_extent.width {\n                // If Vulkan doesn't know, winit probably does. Known to apply at least to Wayland.\n                std::u32::MAX => vk::Extent2D {\n                    width: fallback_size.width,\n                    height: fallback_size.height,\n                },\n                _ => surface_capabilities.current_extent,\n            };\n            let pre_transform = if surface_capabilities\n                .supported_transforms\n                .contains(vk::SurfaceTransformFlagsKHR::IDENTITY)\n            {\n                vk::SurfaceTransformFlagsKHR::IDENTITY\n            } else {\n                surface_capabilities.current_transform\n            };\n            let present_modes = surface_fn\n                .get_physical_device_surface_present_modes(gfx.physical, surface)\n                .unwrap();\n            let present_mode = present_modes\n                .iter()\n                .cloned()\n                .find(|&mode| mode == vk::PresentModeKHR::MAILBOX)\n                .unwrap_or(vk::PresentModeKHR::FIFO);\n\n            let image_count = if surface_capabilities.max_image_count > 0 {\n                surface_capabilities\n                    .max_image_count\n                    .min(surface_capabilities.min_image_count + 1)\n            } else {\n                surface_capabilities.min_image_count + 1\n            };\n\n            let handle = swapchain_fn\n                .create_swapchain(\n                    &vk::SwapchainCreateInfoKHR::default()\n                        .surface(surface)\n                        .min_image_count(image_count)\n                        .image_color_space(format.color_space)\n                        .image_format(format.format)\n                        .image_extent(extent)\n                        .image_usage(vk::ImageUsageFlags::COLOR_ATTACHMENT)\n                        .image_sharing_mode(vk::SharingMode::EXCLUSIVE)\n                        .pre_transform(pre_transform)\n                        .composite_alpha(vk::CompositeAlphaFlagsKHR::OPAQUE)\n                        .present_mode(present_mode)\n                        .clipped(true)\n                        .image_array_layers(1)\n                        .old_swapchain(old),\n                    None,\n                )\n                .unwrap();\n\n            let frames = swapchain_fn\n                .get_swapchain_images(handle)\n                .unwrap()\n                .into_iter()\n                .map(|image| {\n                    let view = device\n                        .create_image_view(\n                            &vk::ImageViewCreateInfo::default()\n                                .view_type(vk::ImageViewType::TYPE_2D)\n                                .format(format.format)\n                                .subresource_range(vk::ImageSubresourceRange {\n                                    aspect_mask: vk::ImageAspectFlags::COLOR,\n                                    base_mip_level: 0,\n                                    level_count: 1,\n                                    base_array_layer: 0,\n                                    layer_count: 1,\n                                })\n                                .image(image),\n                            None,\n                        )\n                        .unwrap();\n                    gfx.set_name(view, cstr!(\"swapchain\"));\n                    let depth = DedicatedImage::new(\n                        device,\n                        &gfx.memory_properties,\n                        &vk::ImageCreateInfo::default()\n                            .image_type(vk::ImageType::TYPE_2D)\n                            .format(vk::Format::D32_SFLOAT)\n                            .extent(vk::Extent3D {\n                                width: extent.width,\n                                height: extent.height,\n                                depth: 1,\n                            })\n                            .mip_levels(1)\n                            .array_layers(1)\n                            .samples(vk::SampleCountFlags::TYPE_1)\n                            .usage(\n                                vk::ImageUsageFlags::DEPTH_STENCIL_ATTACHMENT\n                                    | vk::ImageUsageFlags::INPUT_ATTACHMENT,\n                            ),\n                    );\n                    gfx.set_name(depth.handle, cstr!(\"depth\"));\n                    gfx.set_name(depth.memory, cstr!(\"depth\"));\n                    let depth_view = device\n                        .create_image_view(\n                            &vk::ImageViewCreateInfo::default()\n                                .image(depth.handle)\n                                .view_type(vk::ImageViewType::TYPE_2D)\n                                .format(vk::Format::D32_SFLOAT)\n                                .subresource_range(vk::ImageSubresourceRange {\n                                    aspect_mask: vk::ImageAspectFlags::DEPTH,\n                                    base_mip_level: 0,\n                                    level_count: 1,\n                                    base_array_layer: 0,\n                                    layer_count: 1,\n                                }),\n                            None,\n                        )\n                        .unwrap();\n                    gfx.set_name(depth_view, cstr!(\"depth\"));\n                    let present = device.create_semaphore(&Default::default(), None).unwrap();\n                    gfx.set_name(present, cstr!(\"present\"));\n                    Frame {\n                        view,\n                        depth,\n                        depth_view,\n                        buffer: device\n                            .create_framebuffer(\n                                &vk::FramebufferCreateInfo::default()\n                                    .render_pass(gfx.render_pass)\n                                    .attachments(&[view, depth_view])\n                                    .width(extent.width)\n                                    .height(extent.height)\n                                    .layers(1),\n                                None,\n                            )\n                            .unwrap(),\n                        present,\n                    }\n                })\n                .collect();\n\n            Self {\n                swapchain_fn,\n                gfx,\n                extent,\n                handle,\n                frames,\n            }\n        }\n    }\n}\n\nimpl Drop for SwapchainState {\n    fn drop(&mut self) {\n        let device = &*self.gfx.device;\n        unsafe {\n            for frame in &mut self.frames {\n                device.destroy_framebuffer(frame.buffer, None);\n                device.destroy_image_view(frame.depth_view, None);\n                device.destroy_image_view(frame.view, None);\n                frame.depth.destroy(device);\n                device.destroy_semaphore(frame.present, None);\n            }\n            self.swapchain_fn.destroy_swapchain(self.handle, None);\n        }\n    }\n}\n\nstruct Frame {\n    /// Image view for an entire swapchain image\n    view: vk::ImageView,\n    /// Depth buffer to use when rendering to this image\n    depth: DedicatedImage,\n    /// View thereof\n    depth_view: vk::ImageView,\n    /// Framebuffer referencing `view` and `depth_view`\n    buffer: vk::Framebuffer,\n    /// Semaphore used to ensure the frame isn't presented until rendering completes\n    present: vk::Semaphore,\n}\n\n#[derive(Default)]\nstruct InputState {\n    forward: bool,\n    back: bool,\n    left: bool,\n    right: bool,\n    up: bool,\n    down: bool,\n    jump: bool,\n    clockwise: bool,\n    anticlockwise: bool,\n    mouse_captured: bool,\n}\n\nimpl InputState {\n    fn movement(&self) -> na::Vector3<f32> {\n        na::Vector3::new(\n            self.right as u8 as f32 - self.left as u8 as f32,\n            self.up as u8 as f32 - self.down as u8 as f32,\n            self.back as u8 as f32 - self.forward as u8 as f32,\n        )\n    }\n\n    fn roll(&self) -> f32 {\n        self.anticlockwise as u8 as f32 - self.clockwise as u8 as f32\n    }\n}\n"
  },
  {
    "path": "client/src/lahar_deprecated/condition.rs",
    "content": "use std::task::{Context, Waker};\n\n/// Manages tasks waiting on a single condition\npub struct Condition {\n    wakers: Vec<Waker>,\n    generation: u64,\n}\n\nimpl Condition {\n    pub fn new() -> Self {\n        Self {\n            wakers: Vec::new(),\n            generation: 0,\n        }\n    }\n\n    /// Ensure the next `wake` call will wake the calling task\n    ///\n    /// Checks the task-associated generation counter stored in `state`. If it's present and\n    /// current, we already have this task's `Waker` and no action is necessary. Otherwise, record a\n    /// `Waker` and store the current generation in `state`.\n    pub fn register(&mut self, cx: &mut Context, state: &mut State) {\n        if state.0 == Some(self.generation) {\n            return;\n        }\n        state.0 = Some(self.generation);\n        self.wakers.push(cx.waker().clone());\n    }\n\n    /// Wake all known tasks\n    pub fn notify(&mut self) {\n        self.generation = self.generation.wrapping_add(1);\n        for waker in self.wakers.drain(..) {\n            waker.wake();\n        }\n    }\n}\n\n/// State maintained by each interested task\n///\n/// Stores the generation at which the task previously registered a `Waker`, if any.\n#[derive(Default)]\npub struct State(Option<u64>);\n"
  },
  {
    "path": "client/src/lahar_deprecated/mod.rs",
    "content": "//! This code is directly copied from https://github.com/Ralith/lahar/tree/fbc889a4538e2d3b6b519a6cb7a3538d7b3bfcdf\n//! with minor modifications for interoperability with the current versions of ash and lahar. It is intended to be temporary\n//! and will be replaced when the code is sufficiently refactored to support the newer lahar structures.\nmod condition;\nmod ring_alloc;\npub mod staging;\npub mod transfer;\n"
  },
  {
    "path": "client/src/lahar_deprecated/ring_alloc.rs",
    "content": "use std::collections::VecDeque;\n\n/// State tracker for a ring buffer of contiguous variable-sized allocations with random frees\npub struct RingAlloc {\n    /// List of starting offsets, and whether they've been freed\n    allocations: VecDeque<(usize, bool)>,\n    /// Offset at which the next allocation will start\n    head: usize,\n    /// Number of allocations which have been freed\n    ///\n    /// Tracking this supports random freeing by making it easy to keep track of a single element\n    /// inside `allocations` even as items are added/removed.\n    freed: u64,\n}\n\nimpl RingAlloc {\n    pub fn new() -> Self {\n        RingAlloc {\n            allocations: VecDeque::new(),\n            head: 0,\n            freed: 0,\n        }\n    }\n\n    /// Returns the starting offset of a contiguous run of `size` units, or `None` if none exists.\n    ///\n    /// `capacity` is the total capacity of the ring.\n    pub fn alloc(&mut self, capacity: usize, size: usize) -> Option<(usize, Id)> {\n        let tail = if let Some(&(tail, _)) = self.allocations.front() {\n            tail\n        } else {\n            if size > capacity {\n                return None;\n            }\n            // No allocations, reset to initial state\n            self.allocations.push_back((0, false));\n            self.head = size;\n            self.freed = 0;\n            return Some((0, Id(0)));\n        };\n        let id = Id(self.freed.wrapping_add(self.allocations.len() as u64));\n        if self.head > tail {\n            // There's a run from the head to the end of the buffer\n            let free = capacity - self.head;\n            if free >= size {\n                let start = self.head;\n                self.allocations.push_back((start, false));\n                self.head = (start + size) % capacity;\n                return Some((start, id));\n            }\n            // and from the start of the buffer to the tail\n            if tail >= size {\n                self.allocations.push_back((0, false));\n                self.head = size;\n                return Some((0, id));\n            }\n            return None;\n        }\n        // Only one run, from head to tail\n        let free = tail - self.head;\n        if free >= size {\n            let start = self.head;\n            self.allocations.push_back((start, false));\n            self.head = start + size;\n            return Some((start, id));\n        }\n        None\n    }\n\n    pub fn free(&mut self, id: Id) {\n        self.allocations[id.0.wrapping_sub(self.freed) as usize].1 = true;\n        while let Some(&(_, true)) = self.allocations.front() {\n            self.allocations.pop_front();\n            self.freed += 1;\n        }\n    }\n}\n\n#[derive(Debug, Copy, Clone)]\npub struct Id(u64);\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn sanity() {\n        let mut r = RingAlloc::new();\n        const CAP: usize = 4;\n        let a = r.alloc(CAP, 3).unwrap();\n        assert!(r.alloc(CAP, 2).is_none());\n        let b = r.alloc(CAP, 1).unwrap();\n        assert_eq!(b.0, 3);\n        assert!(r.alloc(CAP, 1).is_none());\n        r.free(a.1);\n        let c = r.alloc(CAP, 1).unwrap();\n        assert_eq!(c.0, 0);\n        let d = r.alloc(CAP, 2).unwrap();\n        assert_eq!(d.0, 1);\n        assert!(r.alloc(CAP, 1).is_none());\n        r.free(c.1);\n        r.free(b.1);\n        let e = r.alloc(CAP, 1).unwrap();\n        assert_eq!(e.0, 3);\n        let f = r.alloc(CAP, 1).unwrap();\n        assert_eq!(f.0, 0);\n    }\n}\n"
  },
  {
    "path": "client/src/lahar_deprecated/staging.rs",
    "content": "use std::future::Future;\nuse std::ops::{Deref, DerefMut};\nuse std::sync::{Arc, Mutex};\nuse std::task::Poll;\n\nuse ash::{Device, vk};\nuse futures_util::future;\n\nuse super::condition::{self, Condition};\nuse super::ring_alloc::{self, RingAlloc};\nuse lahar::DedicatedMapping;\n\n/// A host-visible circular buffer for short-lived allocations\n///\n/// Best for transient uses like streaming transfers. Retaining an allocation of any size will block\n/// future allocations once the buffer wraps back aground.\npub struct StagingBuffer {\n    device: Arc<Device>,\n    buffer: DedicatedMapping<[u8]>,\n    state: Mutex<State>,\n}\n\nstruct State {\n    alloc: RingAlloc,\n    free: Condition,\n}\n\nimpl StagingBuffer {\n    pub fn new(\n        device: Arc<Device>,\n        props: &vk::PhysicalDeviceMemoryProperties,\n        capacity: usize,\n    ) -> Self {\n        let buffer = unsafe {\n            DedicatedMapping::zeroed_array(\n                &device,\n                props,\n                vk::BufferUsageFlags::TRANSFER_SRC,\n                capacity,\n            )\n        };\n        Self {\n            device,\n            buffer,\n            state: Mutex::new(State {\n                alloc: RingAlloc::new(),\n                free: Condition::new(),\n            }),\n        }\n    }\n\n    pub fn buffer(&self) -> vk::Buffer {\n        self.buffer.buffer()\n    }\n\n    /// Largest possible allocation\n    pub fn capacity(&self) -> usize {\n        self.buffer.len()\n    }\n\n    /// Completes when sufficient space is available\n    ///\n    /// Yields `None` if `size > self.capacity()`. No fairness guarantees, i.e. small allocations\n    /// may starve large ones.\n    pub fn alloc(&self, size: usize) -> impl Future<Output = Option<Alloc<'_>>> {\n        let mut cond_state = condition::State::default();\n        future::poll_fn(move |cx| {\n            if size > self.capacity() {\n                return Poll::Ready(None);\n            }\n            let mut state = self.state.lock().unwrap();\n            match state.alloc.alloc(self.capacity(), size) {\n                None => {\n                    state.free.register(cx, &mut cond_state);\n                    Poll::Pending\n                }\n                Some((offset, id)) => Poll::Ready(Some(Alloc {\n                    buf: self,\n                    bytes: unsafe {\n                        std::slice::from_raw_parts_mut(\n                            (self.buffer.as_ptr() as *const u8).add(offset) as *mut u8,\n                            size,\n                        )\n                    },\n                    id,\n                })),\n            }\n        })\n    }\n\n    fn free(&self, id: ring_alloc::Id) {\n        let mut state = self.state.lock().unwrap();\n        state.alloc.free(id);\n        state.free.notify();\n    }\n}\n\nimpl Drop for StagingBuffer {\n    fn drop(&mut self) {\n        unsafe {\n            self.buffer.destroy(&self.device);\n        }\n    }\n}\n\n/// An allocation from a `StagingBuffer`\npub struct Alloc<'a> {\n    buf: &'a StagingBuffer,\n    bytes: &'a mut [u8],\n    id: ring_alloc::Id,\n}\n\nimpl Alloc<'_> {\n    pub fn offset(&self) -> vk::DeviceSize {\n        self.bytes.as_ptr() as vk::DeviceSize\n            - self.buf.buffer.as_ptr() as *const u8 as vk::DeviceSize\n    }\n\n    pub fn size(&self) -> vk::DeviceSize {\n        self.bytes.len() as _\n    }\n}\n\nimpl Deref for Alloc<'_> {\n    type Target = [u8];\n\n    fn deref(&self) -> &[u8] {\n        self.bytes\n    }\n}\n\nimpl DerefMut for Alloc<'_> {\n    fn deref_mut(&mut self) -> &mut [u8] {\n        self.bytes\n    }\n}\n\nimpl Drop for Alloc<'_> {\n    fn drop(&mut self) {\n        self.buf.free(self.id);\n    }\n}\n"
  },
  {
    "path": "client/src/lahar_deprecated/transfer.rs",
    "content": "use std::convert::TryFrom;\nuse std::fmt;\nuse std::future::Future;\nuse std::sync::Arc;\nuse std::thread;\nuse std::time::Duration;\n\nuse ash::vk;\nuse futures_util::FutureExt;\nuse tokio::sync::{\n    mpsc::{self, error::TryRecvError},\n    oneshot,\n};\n\n#[derive(Clone)]\npub struct TransferHandle {\n    send: mpsc::UnboundedSender<Message>,\n}\n\nimpl TransferHandle {\n    pub unsafe fn run(\n        &self,\n        f: impl FnOnce(&mut TransferContext, vk::CommandBuffer) + Send + 'static,\n    ) -> impl Future<Output = Result<(), ShutDown>> {\n        let (sender, recv) = oneshot::channel();\n        let _ = self.send.send(Message {\n            sender,\n            op: Box::new(f),\n        });\n        recv.map(|x| x.map_err(|_| ShutDown))\n    }\n}\n\npub struct TransferContext {\n    pub device: Arc<ash::Device>,\n    pub queue_family: u32,\n    /// May be equal to queue_family\n    pub dst_queue_family: u32,\n    pub stages: vk::PipelineStageFlags,\n    pub buffer_barriers: Vec<vk::BufferMemoryBarrier<'static>>,\n    pub image_barriers: Vec<vk::ImageMemoryBarrier<'static>>,\n}\n\n#[derive(Debug, Copy, Clone)]\npub struct ShutDown;\n\nimpl fmt::Display for ShutDown {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        f.pad(\"transfer reactor shut down\")\n    }\n}\n\nimpl std::error::Error for ShutDown {}\n\n#[allow(clippy::type_complexity)]\nstruct Message {\n    sender: oneshot::Sender<()>,\n    op: Box<dyn FnOnce(&mut TransferContext, vk::CommandBuffer) + Send>,\n}\n\npub struct Reactor {\n    queue: vk::Queue,\n    spare_fences: Vec<vk::Fence>,\n    spare_cmds: Vec<vk::CommandBuffer>,\n    in_flight: Vec<Batch>,\n    /// Fences for in-flight transfer operations; directly corresponds to in_flight entries\n    in_flight_fences: Vec<vk::Fence>,\n    cmd_pool: vk::CommandPool,\n    pending: Option<Batch>,\n    recv: mpsc::UnboundedReceiver<Message>,\n    ctx: TransferContext,\n}\n\nimpl Reactor {\n    /// Safety: valid use use of queue_family, queue\n    pub unsafe fn new(\n        device: Arc<ash::Device>,\n        queue_family: u32,\n        queue: vk::Queue,\n        dst_queue_family: Option<u32>,\n    ) -> (TransferHandle, Self) {\n        unsafe {\n            let (send, recv) = mpsc::unbounded_channel();\n            let cmd_pool = device\n                .create_command_pool(\n                    &vk::CommandPoolCreateInfo::default()\n                        .queue_family_index(queue_family)\n                        .flags(vk::CommandPoolCreateFlags::RESET_COMMAND_BUFFER),\n                    None,\n                )\n                .unwrap();\n            (\n                TransferHandle { send },\n                Self {\n                    queue,\n                    spare_fences: Vec::new(),\n                    spare_cmds: Vec::new(),\n                    in_flight: Vec::new(),\n                    in_flight_fences: Vec::new(),\n                    cmd_pool,\n                    pending: None,\n                    recv,\n                    ctx: TransferContext {\n                        device,\n                        queue_family,\n                        dst_queue_family: dst_queue_family.unwrap_or(queue_family),\n                        stages: vk::PipelineStageFlags::empty(),\n                        buffer_barriers: Vec::new(),\n                        image_barriers: Vec::new(),\n                    },\n                },\n            )\n        }\n    }\n\n    pub fn poll(&mut self) -> Result<(), Disconnected> {\n        self.run_for(Duration::from_secs(0))\n    }\n\n    pub fn run_for(&mut self, timeout: Duration) -> Result<(), Disconnected> {\n        self.queue()?;\n        self.flush();\n\n        if self.in_flight.is_empty() {\n            thread::sleep(timeout);\n            return Ok(());\n        }\n\n        // We could move this to a background thread and continue to submit new work while it's\n        // waiting, but we want to batch up operations a bit anyway.\n        let result = unsafe {\n            self.ctx.device.wait_for_fences(\n                &self.in_flight_fences,\n                false,\n                u64::try_from(timeout.as_nanos()).unwrap_or(u64::MAX),\n            )\n        };\n        match result {\n            Err(vk::Result::TIMEOUT) => return Ok(()),\n            Err(e) => panic!(\"{}\", e),\n            Ok(()) => {}\n        }\n        for i in (0..self.in_flight.len()).rev() {\n            unsafe {\n                if self\n                    .ctx\n                    .device\n                    .get_fence_status(self.in_flight_fences[i])\n                    .unwrap()\n                {\n                    let fence = self.in_flight_fences.swap_remove(i);\n                    self.ctx.device.reset_fences(&[fence]).unwrap();\n                    self.spare_fences.push(fence);\n                    let batch = self.in_flight.swap_remove(i);\n                    for sender in batch.senders {\n                        let _ = sender.send(());\n                    }\n                    self.spare_cmds.push(batch.cmd);\n                }\n            }\n        }\n        Ok(())\n    }\n\n    fn queue(&mut self) -> Result<(), Disconnected> {\n        loop {\n            match self.recv.try_recv() {\n                Ok(Message { sender, op }) => {\n                    let cmd = self.prepare(sender);\n                    op(&mut self.ctx, cmd);\n                }\n                Err(TryRecvError::Disconnected) => return Err(self::Disconnected),\n                Err(TryRecvError::Empty) => return Ok(()),\n            }\n        }\n    }\n\n    fn prepare(&mut self, send: oneshot::Sender<()>) -> vk::CommandBuffer {\n        if let Some(ref mut pending) = self.pending {\n            pending.senders.push(send);\n            return pending.cmd;\n        }\n        let cmd = if let Some(cmd) = self.spare_cmds.pop() {\n            cmd\n        } else {\n            unsafe {\n                self.ctx\n                    .device\n                    .allocate_command_buffers(\n                        &vk::CommandBufferAllocateInfo::default()\n                            .command_pool(self.cmd_pool)\n                            .command_buffer_count(1),\n                    )\n                    .unwrap()\n                    .into_iter()\n                    .next()\n                    .unwrap()\n            }\n        };\n        unsafe {\n            self.ctx\n                .device\n                .begin_command_buffer(\n                    cmd,\n                    &vk::CommandBufferBeginInfo::default()\n                        .flags(vk::CommandBufferUsageFlags::ONE_TIME_SUBMIT),\n                )\n                .unwrap();\n        }\n        self.pending = Some(Batch {\n            cmd,\n            senders: vec![send],\n        });\n        cmd\n    }\n\n    /// Submit queued operations\n    fn flush(&mut self) {\n        let pending = match self.pending.take() {\n            Some(x) => x,\n            None => return,\n        };\n        let device = &self.ctx.device;\n        let fence = if let Some(fence) = self.spare_fences.pop() {\n            fence\n        } else {\n            unsafe {\n                device\n                    .create_fence(&vk::FenceCreateInfo::default(), None)\n                    .unwrap()\n            }\n        };\n        unsafe {\n            device.cmd_pipeline_barrier(\n                pending.cmd,\n                vk::PipelineStageFlags::TRANSFER,\n                self.ctx.stages,\n                vk::DependencyFlags::default(),\n                &[],\n                &self.ctx.buffer_barriers,\n                &self.ctx.image_barriers,\n            );\n            device.end_command_buffer(pending.cmd).unwrap();\n            device\n                .queue_submit(\n                    self.queue,\n                    &[vk::SubmitInfo::default().command_buffers(&[pending.cmd])],\n                    fence,\n                )\n                .unwrap();\n        }\n        self.ctx.stages = vk::PipelineStageFlags::empty();\n        self.ctx.buffer_barriers.clear();\n        self.ctx.image_barriers.clear();\n        self.in_flight.push(pending);\n        self.in_flight_fences.push(fence);\n    }\n}\n\nimpl Drop for Reactor {\n    fn drop(&mut self) {\n        let device = &self.ctx.device;\n        unsafe {\n            if !self.in_flight.is_empty() {\n                device\n                    .wait_for_fences(&self.in_flight_fences, true, u64::MAX)\n                    .unwrap();\n            }\n            device.destroy_command_pool(self.cmd_pool, None);\n            for fence in self.spare_fences.drain(..) {\n                device.destroy_fence(fence, None);\n            }\n            for fence in self.in_flight_fences.drain(..) {\n                device.destroy_fence(fence, None);\n            }\n        }\n    }\n}\n\nunsafe impl Send for Reactor {}\n\nstruct Batch {\n    cmd: vk::CommandBuffer,\n    // Future work: efficient broadcast future\n    senders: Vec<oneshot::Sender<()>>,\n}\n\n#[derive(Debug, Copy, Clone)]\npub struct Disconnected;\n"
  },
  {
    "path": "client/src/lib.rs",
    "content": "#![allow(clippy::new_without_default)]\n#![allow(clippy::needless_borrowed_reference)]\n\nmacro_rules! cstr {\n    ($x:literal) => {{\n        #[allow(unused_unsafe)]\n        unsafe {\n            std::ffi::CStr::from_bytes_with_nul_unchecked(concat!($x, \"\\0\").as_bytes())\n        }\n    }};\n}\n\nextern crate nalgebra as na;\nmod config;\npub mod graphics;\nmod lahar_deprecated;\nmod loader;\nmod local_character_controller;\npub mod metrics;\npub mod net;\nmod prediction;\npub mod sim;\nmod worldgen_driver;\n\npub use config::Config;\npub use sim::Sim;\n\nuse loader::{Asset, Loader};\n"
  },
  {
    "path": "client/src/loader.rs",
    "content": "use std::{\n    any::{Any, TypeId},\n    convert::TryFrom,\n    marker::PhantomData,\n    sync::{Arc, Mutex},\n};\n\nuse anyhow::Result;\nuse ash::vk;\nuse downcast_rs::{Downcast, impl_downcast};\nuse fxhash::FxHashMap;\nuse lahar::{BufferRegion, DedicatedImage};\nuse tokio::sync::mpsc;\nuse tracing::error;\n\nuse crate::{\n    Config,\n    graphics::Base,\n    lahar_deprecated::{\n        staging::StagingBuffer,\n        transfer::{self, TransferHandle},\n    },\n};\n\npub trait Cleanup {\n    unsafe fn cleanup(self, gfx: &Base);\n}\n\nimpl Cleanup for DedicatedImage {\n    unsafe fn cleanup(mut self, gfx: &Base) {\n        unsafe {\n            self.destroy(&gfx.device);\n        }\n    }\n}\n\npub trait Loadable: Send + 'static {\n    type Output: Send + 'static + Cleanup;\n    fn load(self, ctx: &LoadCtx) -> LoadFuture<'_, Self::Output>;\n}\n\npub type LoadFuture<'a, T> =\n    std::pin::Pin<Box<dyn std::future::Future<Output = Result<T>> + 'a + Send>>;\n\npub struct Loader {\n    runtime: tokio::runtime::Runtime,\n    recv: mpsc::UnboundedReceiver<Message>,\n    shared: Arc<Shared>,\n    reactor: transfer::Reactor,\n    tables_index: FxHashMap<TypeId, u32>,\n    tables: Vec<Box<dyn AnyTable>>,\n}\n\nimpl Loader {\n    pub fn new(cfg: Arc<Config>, gfx: Arc<Base>) -> Self {\n        let runtime = tokio::runtime::Builder::new_multi_thread().build().unwrap();\n        let (send, recv) = mpsc::unbounded_channel();\n        let staging =\n            StagingBuffer::new(gfx.device.clone(), &gfx.memory_properties, 32 * 1024 * 1024);\n        unsafe {\n            gfx.set_name(staging.buffer(), cstr!(\"staging\"));\n        }\n        let (transfer, reactor) = unsafe {\n            transfer::Reactor::new(gfx.device.clone(), gfx.queue_family, gfx.queue, None)\n        };\n        let vertex_alloc = unsafe {\n            BufferRegion::new(\n                &gfx.device,\n                &gfx.memory_properties,\n                16 * 1024 * 1024,\n                vk::BufferUsageFlags::TRANSFER_DST | vk::BufferUsageFlags::VERTEX_BUFFER,\n            )\n        };\n        let index_alloc = unsafe {\n            BufferRegion::new(\n                &gfx.device,\n                &gfx.memory_properties,\n                16 * 1024 * 1024,\n                vk::BufferUsageFlags::TRANSFER_DST | vk::BufferUsageFlags::INDEX_BUFFER,\n            )\n        };\n        let mesh_ds_layout = unsafe {\n            gfx.device\n                .create_descriptor_set_layout(\n                    &vk::DescriptorSetLayoutCreateInfo::default().bindings(&[\n                        vk::DescriptorSetLayoutBinding {\n                            binding: 0,\n                            descriptor_type: vk::DescriptorType::COMBINED_IMAGE_SAMPLER,\n                            descriptor_count: 1,\n                            stage_flags: vk::ShaderStageFlags::FRAGMENT,\n                            p_immutable_samplers: &gfx.linear_sampler,\n                            ..vk::DescriptorSetLayoutBinding::default()\n                        },\n                    ]),\n                    None,\n                )\n                .unwrap()\n        };\n        let shared = Arc::new(Shared {\n            send,\n            ctx: LoadCtx {\n                cfg,\n                gfx,\n                staging,\n                transfer,\n                vertex_alloc: Mutex::new(vertex_alloc),\n                index_alloc: Mutex::new(index_alloc),\n                mesh_ds_layout,\n            },\n        });\n        Self {\n            runtime,\n            recv,\n            shared,\n            reactor,\n            tables_index: FxHashMap::default(),\n            tables: Vec::new(),\n        }\n    }\n\n    pub fn load<L: Loadable>(&mut self, description: &'static str, x: L) -> Asset<L::Output> {\n        let tables = &mut self.tables;\n        let table = *self\n            .tables_index\n            .entry(TypeId::of::<L::Output>())\n            .or_insert_with(|| {\n                let n = u32::try_from(tables.len()).unwrap();\n                tables.push(Box::new(Table::<L::Output>::new()));\n                n\n            });\n        let index = self.tables[table as usize]\n            .downcast_mut::<Table<L::Output>>()\n            .unwrap()\n            .alloc();\n        let shared = self.shared.clone();\n        self.runtime.spawn(async move {\n            match shared.ctx.load(x).await {\n                Ok(x) => {\n                    let _ = shared.send.send(Message {\n                        table,\n                        index,\n                        result: Box::new(x),\n                    });\n                }\n                Err(e) => {\n                    error!(\"{} load failed: {:#}\", description, e);\n                }\n            }\n        });\n        Asset {\n            table,\n            index,\n            _marker: PhantomData,\n        }\n    }\n\n    pub fn make_queue<T: Loadable>(&mut self, capacity: usize) -> WorkQueue<T> {\n        let (input_send, mut input_recv) = mpsc::channel::<T>(capacity);\n        let (output_send, output_recv) = mpsc::channel::<T::Output>(capacity);\n        let shared = self.shared.clone();\n        self.runtime.spawn(async move {\n            while let Some(x) = input_recv.recv().await {\n                let shared = shared.clone();\n                let out = output_send.clone();\n                tokio::spawn(async move {\n                    match shared.ctx.load(x).await {\n                        Ok(x) => {\n                            if let Err(e) = out.send(x).await {\n                                unsafe {\n                                    e.0.cleanup(&shared.ctx.gfx);\n                                }\n                            }\n                        }\n                        Err(e) => {\n                            error!(\n                                \"streaming {} load failed: {:#}\",\n                                std::any::type_name::<T>(),\n                                e\n                            );\n                        }\n                    }\n                });\n            }\n        });\n        WorkQueue {\n            shared: self.shared.clone(),\n            send: input_send,\n            recv: output_recv,\n            capacity,\n            fill: 0,\n        }\n    }\n\n    /// Invoke `finish` functions of spawned loading operations\n    pub fn drive(&mut self) {\n        self.reactor.poll().unwrap();\n        while let Ok(msg) = self.recv.try_recv() {\n            self.tables[msg.table as usize].finish(msg.index, msg.result);\n        }\n    }\n\n    pub fn get<T: 'static + Cleanup>(&self, handle: Asset<T>) -> Option<&T> {\n        self.tables[handle.table as usize]\n            .downcast_ref::<Table<T>>()\n            .unwrap()\n            .data[handle.index as usize]\n            .as_ref()\n    }\n\n    pub fn ctx(&self) -> &LoadCtx {\n        &self.shared.ctx\n    }\n}\n\nimpl Drop for Loader {\n    fn drop(&mut self) {\n        for table in self.tables.drain(..) {\n            table.cleanup(&self.shared.ctx.gfx);\n        }\n    }\n}\n\nstruct Shared {\n    send: mpsc::UnboundedSender<Message>,\n    ctx: LoadCtx,\n}\n\nstruct Message {\n    table: u32,\n    index: u32,\n    result: Box<dyn Any + Send>,\n}\n\npub struct LoadCtx {\n    pub cfg: Arc<Config>,\n    pub gfx: Arc<Base>,\n    pub staging: StagingBuffer,\n    pub transfer: TransferHandle,\n    pub vertex_alloc: Mutex<BufferRegion>,\n    pub index_alloc: Mutex<BufferRegion>,\n    pub mesh_ds_layout: vk::DescriptorSetLayout,\n}\n\nimpl LoadCtx {\n    async fn load<T: Loadable>(&self, x: T) -> Result<T::Output> {\n        x.load(self).await\n    }\n}\n\nimpl Drop for LoadCtx {\n    fn drop(&mut self) {\n        let device = &*self.gfx.device;\n        unsafe {\n            self.index_alloc.lock().unwrap().destroy(device);\n            self.vertex_alloc.lock().unwrap().destroy(device);\n            device.destroy_descriptor_set_layout(self.mesh_ds_layout, None);\n        }\n    }\n}\n\ntrait AnyTable: Downcast {\n    fn finish(&mut self, index: u32, value: Box<dyn Any + Send>);\n    fn cleanup(self: Box<Self>, gfx: &Base);\n}\n\nimpl_downcast!(AnyTable);\n\nstruct Table<T> {\n    data: Vec<Option<T>>,\n}\n\nimpl<T> Table<T> {\n    fn new() -> Self {\n        Self { data: Vec::new() }\n    }\n\n    fn alloc(&mut self) -> u32 {\n        let n = u32::try_from(self.data.len()).unwrap();\n        self.data.push(None);\n        n\n    }\n}\n\nimpl<T: 'static + Cleanup> AnyTable for Table<T> {\n    fn finish(&mut self, index: u32, value: Box<dyn Any + Send>) {\n        self.data[index as usize] = Some(*value.downcast().unwrap());\n    }\n\n    fn cleanup(self: Box<Self>, gfx: &Base) {\n        for x in self.data.into_iter().flatten() {\n            unsafe {\n                x.cleanup(gfx);\n            }\n        }\n    }\n}\n\n#[derive(Debug, Eq, PartialEq)]\npub struct Asset<T: 'static> {\n    table: u32,\n    index: u32,\n    _marker: PhantomData<fn() -> T>,\n}\n\nimpl<T: 'static> Clone for Asset<T> {\n    fn clone(&self) -> Self {\n        *self\n    }\n}\n\nimpl<T: 'static> Copy for Asset<T> {}\n\n/// A bounded-capacity queue for streaming specific data (e.g. terrain chunks)\n///\n/// Limiting capacity ensures predictable memory usage and helps focus computational resources on\n/// recent requests when the total number of requests that could be submitted is large. This is\n/// particularly useful for terrain, where recent requests are more likely to be close to the\n/// viewpoint.\npub struct WorkQueue<T: Loadable> {\n    shared: Arc<Shared>,\n    send: mpsc::Sender<T>,\n    recv: mpsc::Receiver<T::Output>,\n    capacity: usize,\n    fill: usize,\n}\n\nimpl<T: Loadable> WorkQueue<T> {\n    /// Begin loading a single item, if capacity is available\n    pub fn load(&mut self, x: T) -> Result<(), T> {\n        use tokio::sync::mpsc::error::TrySendError::*;\n        if self.fill == self.capacity {\n            return Err(x);\n        }\n        self.fill += 1;\n        self.send.try_send(x).map_err(|e| {\n            self.fill -= 1;\n            match e {\n                Full(x) => x,\n                Closed(x) => x,\n            }\n        })\n    }\n\n    /// Fetch a load result if one is ready, freeing capacity\n    pub fn poll(&mut self) -> Option<T::Output> {\n        let result = self.recv.try_recv().ok()?;\n        self.fill -= 1;\n        Some(result)\n    }\n}\n\nimpl<T: Loadable> Drop for WorkQueue<T> {\n    fn drop(&mut self) {\n        // Ensure any future completions will be cleaned up by the loader\n        self.recv.close();\n        // Gracefully drain already-completed tasks\n        while let Ok(x) = self.recv.try_recv() {\n            self.fill -= 1;\n            unsafe {\n                x.cleanup(&self.shared.ctx.gfx);\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "client/src/local_character_controller.rs",
    "content": "use common::{math, math::MIsometry, proto::Position};\n\npub struct LocalCharacterController {\n    /// The last extrapolated inter-frame view position, used for rendering and gravity-specific\n    /// orientation computations\n    position: Position,\n\n    /// The up vector relative to position, ignoring orientation\n    up: na::UnitVector3<f32>,\n\n    /// The quaternion adjustment to the character position to represent its actual apparent orientation\n    orientation: na::UnitQuaternion<f32>,\n}\n\nimpl LocalCharacterController {\n    pub fn new() -> Self {\n        LocalCharacterController {\n            position: Position::origin(),\n            orientation: na::UnitQuaternion::identity(),\n            up: na::Vector::z_axis(),\n        }\n    }\n\n    /// Get the current position with orientation applied to it\n    pub fn oriented_position(&self) -> Position {\n        Position {\n            node: self.position.node,\n            local: self.position.local * MIsometry::from(self.orientation),\n        }\n    }\n\n    pub fn orientation(&self) -> na::UnitQuaternion<f32> {\n        self.orientation\n    }\n\n    /// Updates the LocalCharacter based on outside information. Note that the `up` parameter is relative\n    /// only to `position`, not the character's orientation.\n    pub fn update_position(\n        &mut self,\n        position: Position,\n        up: na::UnitVector3<f32>,\n        preserve_up_alignment: bool,\n    ) {\n        if preserve_up_alignment {\n            // Rotate the character orientation to stay consistent with changes in gravity\n            self.orientation = math::rotation_between_axis(&self.up, &up, 1e-5)\n                .unwrap_or(na::UnitQuaternion::identity())\n                * self.orientation;\n        }\n\n        self.position = position;\n        self.up = up;\n    }\n\n    /// Rotates the camera's view by locally adding pitch and yaw.\n    pub fn look_free(&mut self, delta_yaw: f32, delta_pitch: f32, delta_roll: f32) {\n        self.orientation *= na::UnitQuaternion::from_axis_angle(&na::Vector3::y_axis(), delta_yaw)\n            * na::UnitQuaternion::from_axis_angle(&na::Vector3::x_axis(), delta_pitch)\n            * na::UnitQuaternion::from_axis_angle(&na::Vector3::z_axis(), delta_roll);\n    }\n\n    /// Rotates the camera's view with standard first-person walking simulator mouse controls. This function\n    /// is designed to be flexible enough to work with any starting orientation, but it works best when the\n    /// camera is level, not rolled to the left or right.\n    pub fn look_level(&mut self, delta_yaw: f32, delta_pitch: f32) {\n        // Get orientation-relative up\n        let up = self.orientation.inverse() * self.up;\n\n        // Handle yaw. This is as simple as rotating the view about the up vector\n        self.orientation *= na::UnitQuaternion::from_axis_angle(&up, delta_yaw);\n\n        // Handling pitch is more compicated because the view angle needs to be capped. The rotation axis\n        // is the camera's local x-axis (left-right axis). If the camera is level, this axis is perpendicular\n        // to the up vector.\n\n        // We need to know the current pitch to properly cap pitch changes, and this is only well-defined\n        // if the pitch axis is not too similar to the up vector, so we skip applying pitch changes if this\n        // isn't the case.\n        if up.x.abs() < 0.9 {\n            // Compute the current pitch by ignoring the x-component of the up vector and assuming the camera\n            // is level.\n            let current_pitch = -up.z.atan2(up.y);\n            let mut target_pitch = current_pitch + delta_pitch;\n            if delta_pitch > 0.0 {\n                target_pitch = target_pitch\n                    .min(std::f32::consts::FRAC_PI_2) // Cap the view angle at looking straight up\n                    .max(current_pitch); // But if already upside-down, don't make any corrections.\n            } else {\n                target_pitch = target_pitch\n                    .max(-std::f32::consts::FRAC_PI_2) // Cap the view angle at looking straight down\n                    .min(current_pitch); // But if already upside-down, don't make any corrections.\n            }\n\n            self.orientation *= na::UnitQuaternion::from_axis_angle(\n                &na::Vector3::x_axis(),\n                target_pitch - current_pitch,\n            );\n        }\n    }\n\n    /// Instantly updates the current orientation quaternion to make the camera level. This function\n    /// is designed to be numerically stable for any camera orientation.\n    pub fn align_to_gravity(&mut self) {\n        // Get orientation-relative up\n        let up = self.orientation.inverse() * self.up;\n\n        if up.z.abs() < 0.9 {\n            // If facing not too vertically, roll the camera to make it level.\n            let delta_roll = -up.x.atan2(up.y);\n            self.orientation *=\n                na::UnitQuaternion::from_axis_angle(&na::Vector3::z_axis(), delta_roll);\n        } else if up.y > 0.0 {\n            // Otherwise, if not upside-down, yaw the camera to make it level.\n            let delta_yaw = (up.x / up.z).atan();\n            self.orientation *=\n                na::UnitQuaternion::from_axis_angle(&na::Vector3::y_axis(), delta_yaw);\n        } else {\n            // Otherwise, rotate the camera to look straight up or down.\n            self.orientation *=\n                na::UnitQuaternion::rotation_between(&(na::Vector3::z() * up.z.signum()), &up)\n                    .unwrap();\n        }\n    }\n\n    /// Returns an orientation quaternion that is as faithful as possible to the current orientation quaternion\n    /// while being restricted to ensuring the view is level and does not look up or down. This function's main\n    /// purpose is to figure out what direction the character should go when a movement key is pressed.\n    pub fn horizontal_orientation(&mut self) -> na::UnitQuaternion<f32> {\n        // Get orientation-relative up\n        let up = self.orientation.inverse() * self.up;\n\n        let forward = if up.x.abs() < 0.9 {\n            // Rotate the local forward vector about the locally horizontal axis until it is horizontal\n            na::Vector3::new(0.0, -up.z, up.y)\n        } else {\n            // Project the local forward vector to the level plane\n            na::Vector3::z() - up.into_inner() * up.z\n        };\n\n        self.orientation * na::UnitQuaternion::face_towards(&forward, &up)\n    }\n\n    pub fn renormalize_orientation(&mut self) {\n        self.orientation.renormalize_fast();\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use approx::assert_abs_diff_eq;\n\n    use super::*;\n\n    fn assert_aligned_to_gravity(subject: &LocalCharacterController) {\n        let up = subject.orientation.inverse() * subject.up;\n\n        // Make sure up vector doesn't point downwards, as that would mean the character is upside-down\n        assert!(up.y >= -1e-5);\n\n        // Make sure the up vector has no sideways component, as that would mean the character view is tilted\n        assert_abs_diff_eq!(up.x, 0.0, epsilon = 1.0e-5);\n    }\n\n    fn assert_yaw_and_pitch_correct(\n        base_orientation: na::UnitQuaternion<f32>,\n        yaw: f32,\n        pitch: f32,\n        actual_orientation: na::UnitQuaternion<f32>,\n    ) {\n        let expected_orientation = base_orientation\n            * na::UnitQuaternion::from_axis_angle(&na::Vector3::y_axis(), yaw)\n            * na::UnitQuaternion::from_axis_angle(&na::Vector3::x_axis(), pitch);\n        assert_abs_diff_eq!(expected_orientation, actual_orientation, epsilon = 1.0e-5);\n    }\n\n    #[test]\n    fn look_level_and_horizontal_orientation_examples() {\n        let mut subject = LocalCharacterController::new();\n\n        // Pick an arbitrary orientation\n        let base_orientation = na::UnitQuaternion::new(na::Vector3::new(1.3, -2.1, 0.5));\n        subject.orientation = base_orientation;\n\n        // Choose the up vector that makes the current orientation a horizontal orientation\n        subject.up = subject.orientation * na::Vector3::y_axis();\n\n        let mut yaw = 0.0;\n        let mut pitch = 0.0;\n\n        // Sanity check that the setup makes sense\n        assert_aligned_to_gravity(&subject);\n        assert_yaw_and_pitch_correct(base_orientation, yaw, pitch, subject.orientation);\n        assert_yaw_and_pitch_correct(base_orientation, yaw, 0.0, subject.horizontal_orientation());\n\n        // Standard look_level expression\n        subject.look_level(0.5, -0.4);\n        yaw += 0.5;\n        pitch -= 0.4;\n        assert_aligned_to_gravity(&subject);\n        assert_yaw_and_pitch_correct(base_orientation, yaw, pitch, subject.orientation);\n        assert_yaw_and_pitch_correct(base_orientation, yaw, 0.0, subject.horizontal_orientation());\n\n        // Look up past the cap\n        subject.look_level(-0.2, 3.0);\n        yaw -= 0.2;\n        pitch = std::f32::consts::FRAC_PI_2;\n        assert_aligned_to_gravity(&subject);\n        assert_yaw_and_pitch_correct(base_orientation, yaw, pitch, subject.orientation);\n        assert_yaw_and_pitch_correct(base_orientation, yaw, 0.0, subject.horizontal_orientation());\n\n        // Look down past the cap\n        subject.look_level(6.2, -7.2);\n        yaw += 6.2;\n        pitch = -std::f32::consts::FRAC_PI_2;\n        assert_aligned_to_gravity(&subject);\n        assert_yaw_and_pitch_correct(base_orientation, yaw, pitch, subject.orientation);\n        assert_yaw_and_pitch_correct(base_orientation, yaw, 0.0, subject.horizontal_orientation());\n\n        // Go back to a less unusual orientation\n        subject.look_level(-1.2, 0.8);\n        yaw -= 1.2;\n        pitch += 0.8;\n        assert_aligned_to_gravity(&subject);\n        assert_yaw_and_pitch_correct(base_orientation, yaw, pitch, subject.orientation);\n        assert_yaw_and_pitch_correct(base_orientation, yaw, 0.0, subject.horizontal_orientation());\n    }\n\n    #[test]\n    fn align_to_gravity_examples() {\n        // Pick an arbitrary orientation\n        let base_orientation = na::UnitQuaternion::new(na::Vector3::new(1.3, -2.1, 0.5));\n\n        // Choose the up vector that makes the current orientation close to horizontal orientation\n        let mut subject = LocalCharacterController::new();\n        subject.orientation = base_orientation;\n        subject.up =\n            subject.orientation * na::UnitVector3::new_normalize(na::Vector3::new(0.1, 1.0, 0.2));\n        let look_direction = subject.orientation * na::Vector3::z_axis();\n\n        subject.align_to_gravity();\n\n        assert_aligned_to_gravity(&subject);\n        // The look_direction shouldn't change\n        assert_abs_diff_eq!(\n            look_direction,\n            subject.orientation * na::Vector3::z_axis(),\n            epsilon = 1e-5\n        );\n\n        // Choose the up vector that makes the current orientation close to horizontal orientation but upside-down\n        let mut subject = LocalCharacterController::new();\n        subject.orientation = base_orientation;\n        subject.up =\n            subject.orientation * na::UnitVector3::new_normalize(na::Vector3::new(0.1, -1.0, 0.2));\n        let look_direction = subject.orientation * na::Vector3::z_axis();\n\n        subject.align_to_gravity();\n\n        assert_aligned_to_gravity(&subject);\n        // The look_direction still shouldn't change\n        assert_abs_diff_eq!(\n            look_direction,\n            subject.orientation * na::Vector3::z_axis(),\n            epsilon = 1e-5\n        );\n\n        // Make the character face close to straight up\n        let mut subject = LocalCharacterController::new();\n        subject.orientation = base_orientation;\n        subject.up = subject.orientation\n            * na::UnitVector3::new_normalize(na::Vector3::new(-0.03, 0.05, 1.0));\n        subject.align_to_gravity();\n        assert_aligned_to_gravity(&subject);\n\n        // Make the character face close to straight down and be slightly upside-down\n        let mut subject = LocalCharacterController::new();\n        subject.orientation = base_orientation;\n        subject.up = subject.orientation\n            * na::UnitVector3::new_normalize(na::Vector3::new(-0.03, -0.05, -1.0));\n        subject.align_to_gravity();\n        assert_aligned_to_gravity(&subject);\n    }\n\n    #[test]\n    fn update_position_example() {\n        // Pick an arbitrary orientation\n        let base_orientation = na::UnitQuaternion::new(na::Vector3::new(1.3, -2.1, 0.5));\n\n        let mut subject = LocalCharacterController::new();\n        subject.orientation = base_orientation;\n        subject.up =\n            subject.orientation * na::UnitVector3::new_normalize(na::Vector3::new(0.0, 1.0, 0.2));\n\n        // Sanity check setup (character should already be aligned to gravity)\n        assert_aligned_to_gravity(&subject);\n        let old_up_vector_y_component = (subject.orientation.inverse() * subject.up).y;\n\n        subject.update_position(\n            Position::origin(),\n            na::UnitVector3::new_normalize(na::Vector3::new(0.1, 0.2, 0.5)),\n            true,\n        );\n        assert_aligned_to_gravity(&subject);\n        let new_up_vector_y_component = (subject.orientation.inverse() * subject.up).y;\n\n        // We don't want the camera pitch to drift as the up vector changes\n        assert_abs_diff_eq!(\n            old_up_vector_y_component,\n            new_up_vector_y_component,\n            epsilon = 1e-5\n        );\n    }\n}\n"
  },
  {
    "path": "client/src/main.rs",
    "content": "use std::{sync::Arc, thread};\n\nuse client::{Config, graphics, metrics, net};\nuse common::{Anonymize, proto};\nuse save::Save;\n\nuse ash::khr;\nuse server::Server;\nuse tracing::{Instrument, debug, error, error_span, info};\nuse winit::{\n    application::ApplicationHandler,\n    event_loop::{ActiveEventLoop, EventLoop},\n};\n\nfn main() {\n    // Set up logging\n    common::init_tracing();\n    let metrics = crate::metrics::init();\n\n    let dirs = directories::ProjectDirs::from(\"\", \"\", \"hypermine\").unwrap();\n    let config = Arc::new(Config::load(&dirs));\n\n    let net = match config.server {\n        None => {\n            // spawn an in-process server\n            let sim_cfg = config.local_simulation.clone();\n\n            let save = dirs.data_local_dir().join(&config.save);\n            info!(\"using save file {}\", save.anonymize().display());\n            std::fs::create_dir_all(save.parent().unwrap()).unwrap();\n            let save = match Save::open(&save, config.local_simulation.chunk_size) {\n                Ok(x) => x,\n                Err(e) => {\n                    error!(\"couldn't open save: {}\", e);\n                    return;\n                }\n            };\n\n            let mut server = match Server::new(None, sim_cfg, save) {\n                Ok(server) => server,\n                Err(e) => {\n                    eprintln!(\"{e:#}\");\n                    std::process::exit(1);\n                }\n            };\n\n            let (handle, backend) = server::Handle::loopback();\n            let name = (*config.name).into();\n\n            thread::spawn(move || {\n                let runtime = tokio::runtime::Builder::new_current_thread()\n                    .enable_time()\n                    .build()\n                    .unwrap();\n                let _guard = runtime.enter();\n                server\n                    .connect(proto::ClientHello { name }, backend)\n                    .unwrap();\n                runtime.block_on(server.run().instrument(error_span!(\"server\")));\n                debug!(\"server thread terminated\");\n            });\n\n            handle\n        }\n        Some(_) => net::spawn(config.clone()),\n    };\n    let mut app = App {\n        config,\n        dirs,\n        metrics,\n        window: None,\n        net: Some(net),\n    };\n\n    let event_loop = EventLoop::new().unwrap();\n    event_loop.set_control_flow(winit::event_loop::ControlFlow::Poll);\n    event_loop.run_app(&mut app).unwrap();\n}\n\nstruct App {\n    config: Arc<Config>,\n    dirs: directories::ProjectDirs,\n    metrics: Arc<metrics::Recorder>,\n    window: Option<graphics::Window>,\n    net: Option<server::Handle>,\n}\n\nimpl ApplicationHandler for App {\n    fn resumed(&mut self, event_loop: &ActiveEventLoop) {\n        // Create the OS window\n        let window = graphics::EarlyWindow::new(event_loop);\n        // Initialize Vulkan with the extensions needed to render to the window\n        let core = Arc::new(graphics::Core::new(window.required_extensions()));\n\n        // Finish creating the window, including the Vulkan resources used to render to it\n        let mut window = graphics::Window::new(\n            window,\n            core.clone(),\n            self.config.clone(),\n            self.net.take().unwrap(),\n        );\n\n        // Initialize widely-shared graphics resources\n        let gfx = Arc::new(\n            graphics::Base::new(\n                core,\n                Some(self.dirs.cache_dir().join(\"pipeline_cache\")),\n                &[khr::swapchain::NAME],\n                |physical, queue_family| window.supports(physical, queue_family),\n            )\n            .unwrap(),\n        );\n        window.init_rendering(gfx.clone());\n        self.window = Some(window);\n    }\n\n    fn suspended(&mut self, _event_loop: &ActiveEventLoop) {\n        self.window = None;\n    }\n\n    fn window_event(\n        &mut self,\n        event_loop: &ActiveEventLoop,\n        _window_id: winit::window::WindowId,\n        event: winit::event::WindowEvent,\n    ) {\n        let Some(window) = self.window.as_mut() else {\n            return;\n        };\n        window.handle_event(event, event_loop);\n    }\n\n    fn device_event(\n        &mut self,\n        _event_loop: &ActiveEventLoop,\n        _device_id: winit::event::DeviceId,\n        event: winit::event::DeviceEvent,\n    ) {\n        let Some(window) = self.window.as_mut() else {\n            return;\n        };\n        window.handle_device_event(event);\n    }\n\n    fn about_to_wait(&mut self, _event_loop: &ActiveEventLoop) {\n        let Some(window) = self.window.as_mut() else {\n            return;\n        };\n        window.window.request_redraw();\n    }\n\n    fn exiting(&mut self, _event_loop: &ActiveEventLoop) {\n        self.metrics.report();\n    }\n}\n"
  },
  {
    "path": "client/src/metrics.rs",
    "content": "use std::{\n    collections::HashMap,\n    sync::{Arc, Mutex, OnceLock, RwLock},\n    time::Duration,\n};\n\nuse hdrhistogram::Histogram;\nuse tracing::info;\n\npub fn init() -> Arc<Recorder> {\n    let recorder = Arc::new(Recorder {\n        histograms: RwLock::new(HashMap::new()),\n    });\n    metrics::set_global_recorder(ArcRecorder(recorder.clone())).unwrap();\n    recorder\n}\n\npub struct Recorder {\n    histograms: RwLock<HashMap<metrics::Key, Mutex<Histogram<u64>>>>,\n}\n\nimpl Recorder {\n    pub fn report(&self) {\n        // metrics crate documentation assures us that Key's interior mutability does not affect the hash code.\n        #[allow(clippy::mutable_key_type)]\n        let histograms = &*self.histograms.read().unwrap();\n        // Sort histogram entries before displaying them\n        let mut histograms = histograms.iter().collect::<Vec<_>>();\n        histograms.sort_unstable_by_key(|(key, _)| *key);\n        for (key, histogram) in histograms {\n            let histogram = histogram.lock().unwrap();\n            info!(\n                key = %key.name(),\n                percentile_25 = ?Duration::from_nanos(histogram.value_at_quantile(0.25)),\n                percentile_50 = ?Duration::from_nanos(histogram.value_at_quantile(0.50)),\n                percentile_75 = ?Duration::from_nanos(histogram.value_at_quantile(0.75)),\n                max = ?Duration::from_nanos(histogram.value_at_quantile(1.0)),\n                \"metric\"\n            );\n        }\n    }\n}\n\nstruct ArcRecorder(Arc<Recorder>);\n\nimpl metrics::Recorder for ArcRecorder {\n    fn describe_counter(\n        &self,\n        _key: metrics::KeyName,\n        _unit: Option<metrics::Unit>,\n        _description: metrics::SharedString,\n    ) {\n        todo!()\n    }\n\n    fn describe_gauge(\n        &self,\n        _key: metrics::KeyName,\n        _unit: Option<metrics::Unit>,\n        _description: metrics::SharedString,\n    ) {\n        todo!()\n    }\n\n    fn describe_histogram(\n        &self,\n        _key: metrics::KeyName,\n        _unit: Option<metrics::Unit>,\n        _description: metrics::SharedString,\n    ) {\n        todo!()\n    }\n\n    fn register_counter(\n        &self,\n        _key: &metrics::Key,\n        _metadata: &metrics::Metadata<'_>,\n    ) -> metrics::Counter {\n        todo!()\n    }\n\n    fn register_gauge(\n        &self,\n        _key: &metrics::Key,\n        _metadata: &metrics::Metadata<'_>,\n    ) -> metrics::Gauge {\n        todo!()\n    }\n\n    fn register_histogram(\n        &self,\n        key: &metrics::Key,\n        _metadata: &metrics::Metadata<'_>,\n    ) -> metrics::Histogram {\n        metrics::Histogram::from_arc(Arc::new(Handle {\n            recorder: self.0.clone(),\n            key: key.clone(),\n        }))\n    }\n}\n\nstruct Handle {\n    recorder: Arc<Recorder>,\n    key: metrics::Key,\n}\n\nimpl metrics::HistogramFn for Handle {\n    fn record(&self, value: f64) {\n        if !is_ready_for_profiling() {\n            // We include an extra check here to avoid profiling when there is\n            // nothing to render.\n            return;\n        }\n        let mut histograms = self.recorder.histograms.read().unwrap();\n        let mut histogram = match histograms.get(&self.key) {\n            Some(x) => x.lock().unwrap(),\n            None => {\n                drop(histograms);\n                self.recorder\n                    .histograms\n                    .write()\n                    .unwrap()\n                    .insert(self.key.clone(), Mutex::new(Histogram::new(3).unwrap()));\n                histograms = self.recorder.histograms.read().unwrap();\n                histograms.get(&self.key).unwrap().lock().unwrap()\n            }\n        };\n        histogram.record((value * 1e9) as u64).unwrap();\n    }\n}\n\nstatic PROFILING_LOCK: OnceLock<()> = OnceLock::new();\n\n/// This function will keep returning false until `declare_ready_for_profiling is called`\nfn is_ready_for_profiling() -> bool {\n    PROFILING_LOCK.get().is_some()\n}\n\n/// Once this function is called, calls to \"histogram!\" will be effective.\npub fn declare_ready_for_profiling() {\n    let _ = PROFILING_LOCK.set(());\n}\n"
  },
  {
    "path": "client/src/net.rs",
    "content": "use std::{sync::Arc, thread};\n\nuse anyhow::{Result, anyhow};\nuse quinn::rustls;\nuse tokio::sync::mpsc;\n\nuse common::{\n    codec,\n    proto::{self, connection_error_codes},\n};\nuse server::Message;\n\nuse crate::Config;\n\npub fn spawn(cfg: Arc<Config>) -> server::Handle {\n    let (incoming_send, incoming_recv) = mpsc::unbounded_channel();\n    let (outgoing_send, outgoing_recv) = mpsc::unbounded_channel();\n    thread::spawn(move || {\n        if let Err(e) = run(cfg, incoming_send.clone(), outgoing_recv) {\n            let _ = incoming_send.send(Message::ConnectionLost(e));\n        }\n    });\n    server::Handle {\n        incoming: incoming_recv,\n        outgoing: outgoing_send,\n    }\n}\n\n#[tokio::main(worker_threads = 1)]\nasync fn run(\n    cfg: Arc<Config>,\n    incoming: mpsc::UnboundedSender<Message>,\n    outgoing: mpsc::UnboundedReceiver<proto::Command>,\n) -> Result<()> {\n    let mut endpoint = quinn::Endpoint::client(\"[::]:0\".parse().unwrap())?;\n    let crypto = rustls::ClientConfig::builder()\n        .dangerous()\n        .with_custom_certificate_verifier(Arc::new(AcceptAnyCert))\n        .with_no_client_auth();\n    let client_cfg = quinn::ClientConfig::new(Arc::new(\n        quinn::crypto::rustls::QuicClientConfig::try_from(crypto).unwrap(),\n    ));\n    endpoint.set_default_client_config(client_cfg);\n\n    let result = inner(cfg, incoming, outgoing, endpoint.clone()).await;\n    // Close the connection with a generic message, as graceful disconnections are not yet implemented.\n    // See https://github.com/Ralith/hypermine/issues/26\n    endpoint.close(\n        connection_error_codes::CLIENT_CLOSED_CONNECTION,\n        b\"client closed connection\",\n    );\n    endpoint.wait_idle().await;\n    result\n}\n\nasync fn inner(\n    cfg: Arc<Config>,\n    incoming: mpsc::UnboundedSender<Message>,\n    outgoing: mpsc::UnboundedReceiver<proto::Command>,\n    endpoint: quinn::Endpoint,\n) -> Result<()> {\n    let server = cfg.server.unwrap();\n    let connection = endpoint.connect(server, \"localhost\").unwrap().await?;\n\n    // Open the first stream for our hello message\n    let clienthello_stream = connection.open_uni().await?;\n    // Start sending commands asynchronously\n    tokio::spawn(handle_outgoing(outgoing, connection.clone()));\n    // Actually send the hello message\n    codec::send_whole(\n        clienthello_stream,\n        &proto::ClientHello {\n            name: (*cfg.name).into(),\n        },\n    )\n    .await?;\n\n    let mut ordered = connection.accept_uni().await?;\n    // Handle unordered messages\n    tokio::spawn(handle_unordered(incoming.clone(), connection));\n\n    // Receive the server's hello message\n    let hello = codec::recv::<proto::ServerHello>(&mut ordered)\n        .await?\n        .ok_or_else(|| anyhow!(\"ordered stream closed unexpectedly\"))?;\n    // Forward it on\n    incoming.send(Message::Hello(hello)).unwrap();\n\n    // Receive ordered messages from the server\n    loop {\n        let spawns = codec::recv::<proto::Spawns>(&mut ordered)\n            .await?\n            .ok_or_else(|| anyhow!(\"ordered stream closed unexpectedly\"))?;\n        incoming.send(Message::Spawns(spawns)).unwrap();\n    }\n}\n\n/// Send commands to the server\nasync fn handle_outgoing(\n    mut outgoing: mpsc::UnboundedReceiver<proto::Command>,\n    connection: quinn::Connection,\n) -> Result<()> {\n    while let Some(cmd) = outgoing.recv().await {\n        let stream = connection.open_uni().await?;\n        // TODO: Don't silently die on parse errors\n        codec::send_whole(stream, &cmd).await?;\n    }\n    Ok(())\n}\n\n/// Receive unordered messages from the server\nasync fn handle_unordered(incoming: mpsc::UnboundedSender<Message>, connection: quinn::Connection) {\n    loop {\n        let Ok(stream) = connection.accept_uni().await else {\n            // accept_uni should only fail if the connection is closed, which is already handled elsewhere.\n            return;\n        };\n        let incoming = incoming.clone();\n        let connection = connection.clone();\n        tokio::spawn(async move {\n            match codec::recv_whole::<proto::StateDelta>(2usize.pow(16), stream).await {\n                Err(e) => {\n                    tracing::error!(\"Error when parsing unordered stream from server: {e}\");\n                    connection.close(\n                        connection_error_codes::STREAM_ERROR,\n                        b\"could not process stream\",\n                    );\n                }\n                Ok(msg) => {\n                    let _ = incoming.send(Message::StateDelta(msg));\n                }\n            }\n        });\n    }\n}\n\n#[derive(Debug)]\nstruct AcceptAnyCert;\n\nimpl rustls::client::danger::ServerCertVerifier for AcceptAnyCert {\n    fn verify_server_cert(\n        &self,\n        _end_entity: &rustls::pki_types::CertificateDer,\n        _intermediates: &[rustls::pki_types::CertificateDer],\n        _server_name: &rustls::pki_types::ServerName,\n        _ocsp_response: &[u8],\n        _now: rustls::pki_types::UnixTime,\n    ) -> Result<rustls::client::danger::ServerCertVerified, rustls::Error> {\n        Ok(rustls::client::danger::ServerCertVerified::assertion())\n    }\n\n    fn verify_tls12_signature(\n        &self,\n        _message: &[u8],\n        _cert: &rustls::pki_types::CertificateDer<'_>,\n        _dss: &rustls::DigitallySignedStruct,\n    ) -> Result<rustls::client::danger::HandshakeSignatureValid, rustls::Error> {\n        // QUIC is TLS 1.3 only\n        unreachable!();\n    }\n\n    fn verify_tls13_signature(\n        &self,\n        message: &[u8],\n        cert: &rustls::pki_types::CertificateDer<'_>,\n        dss: &rustls::DigitallySignedStruct,\n    ) -> Result<rustls::client::danger::HandshakeSignatureValid, rustls::Error> {\n        rustls::crypto::verify_tls13_signature(\n            message,\n            cert,\n            dss,\n            &rustls::crypto::CryptoProvider::get_default()\n                .unwrap()\n                .signature_verification_algorithms,\n        )\n    }\n\n    fn supported_verify_schemes(&self) -> Vec<rustls::SignatureScheme> {\n        rustls::crypto::CryptoProvider::get_default()\n            .unwrap()\n            .signature_verification_algorithms\n            .supported_schemes()\n    }\n}\n"
  },
  {
    "path": "client/src/prediction.rs",
    "content": "use std::collections::VecDeque;\n\nuse common::{\n    SimConfig, character_controller,\n    graph::Graph,\n    proto::{CharacterInput, Position},\n};\n\n/// Predicts the result of motion inputs in-flight to the server\n///\n/// When sending input to the server, call `push` to record the input in a local queue of in-flight\n/// inputs, and to obtaining a generation tag to send alongside the input. The server echos the\n/// highest tag it's received alongside every state update, which we then use in `reconcile` to\n/// determine which inputs have been integrated into the server's state and no longer need to be\n/// predicted.\npub struct PredictedMotion {\n    log: VecDeque<CharacterInput>,\n    generation: u16,\n    predicted_position: Position,\n    predicted_velocity: na::Vector3<f32>,\n    predicted_on_ground: bool,\n}\n\nimpl PredictedMotion {\n    pub fn new(initial_position: Position) -> Self {\n        Self {\n            log: VecDeque::new(),\n            generation: 0,\n            predicted_position: initial_position,\n            predicted_velocity: na::Vector3::zeros(),\n            predicted_on_ground: false,\n        }\n    }\n\n    /// Update for input about to be sent to the server, returning the generation it should be\n    /// tagged with\n    pub fn push(&mut self, cfg: &SimConfig, graph: &Graph, input: &CharacterInput) -> u16 {\n        character_controller::run_character_step(\n            cfg,\n            graph,\n            &mut self.predicted_position,\n            &mut self.predicted_velocity,\n            &mut self.predicted_on_ground,\n            input,\n            cfg.step_interval.as_secs_f32(),\n        );\n        self.log.push_back(input.clone());\n        self.generation = self.generation.wrapping_add(1);\n        self.generation\n    }\n\n    /// Update with the latest state received from the server and the generation it was based on\n    pub fn reconcile(\n        &mut self,\n        cfg: &SimConfig,\n        graph: &Graph,\n        generation: u16,\n        position: Position,\n        velocity: na::Vector3<f32>,\n        on_ground: bool,\n    ) {\n        let first_gen = self.generation.wrapping_sub(self.log.len() as u16);\n        let obsolete = usize::from(generation.wrapping_sub(first_gen));\n        if obsolete > self.log.len() || obsolete == 0 {\n            // We've already processed a state incorporating equal or more recent input\n            return;\n        }\n        self.log.drain(..obsolete);\n        self.predicted_position = position;\n        self.predicted_velocity = velocity;\n        self.predicted_on_ground = on_ground;\n\n        for input in self.log.iter() {\n            character_controller::run_character_step(\n                cfg,\n                graph,\n                &mut self.predicted_position,\n                &mut self.predicted_velocity,\n                &mut self.predicted_on_ground,\n                input,\n                cfg.step_interval.as_secs_f32(),\n            );\n        }\n    }\n\n    /// Latest estimate of the server's state after receiving all `push`ed inputs.\n    pub fn predicted_position(&self) -> &Position {\n        &self.predicted_position\n    }\n\n    pub fn predicted_velocity(&self) -> &na::Vector3<f32> {\n        &self.predicted_velocity\n    }\n\n    pub fn predicted_on_ground(&self) -> &bool {\n        &self.predicted_on_ground\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use common::{graph::NodeId, math::MIsometry};\n\n    /// An arbitrary position\n    fn pos() -> Position {\n        Position {\n            node: common::graph::NodeId::ROOT,\n            local: MIsometry::identity(),\n        }\n    }\n\n    #[test]\n    fn wraparound() {\n        let mock_cfg = SimConfig::from_raw(&common::SimConfigRaw::default());\n        let mut mock_graph = Graph::new(1);\n        mock_graph.ensure_node_state(NodeId::ROOT);\n        let mock_character_input = CharacterInput {\n            movement: na::Vector3::x(),\n            jump: false,\n            no_clip: true,\n            block_update: None,\n        };\n\n        let mut pred = PredictedMotion::new(pos());\n\n        // Helper functions to make test more readable\n        let push =\n            |pred: &mut PredictedMotion| pred.push(&mock_cfg, &mock_graph, &mock_character_input);\n        let reconcile = |pred: &mut PredictedMotion, generation| {\n            pred.reconcile(\n                &mock_cfg,\n                &mock_graph,\n                generation,\n                pos(),\n                na::Vector3::zeros(),\n                false,\n            )\n        };\n\n        pred.generation = u16::MAX - 1;\n\n        assert_eq!(push(&mut pred), u16::MAX);\n        assert_eq!(push(&mut pred), 0);\n        assert_eq!(pred.log.len(), 2);\n\n        reconcile(&mut pred, u16::MAX - 1);\n        assert_eq!(pred.log.len(), 2);\n        reconcile(&mut pred, u16::MAX);\n        assert_eq!(pred.log.len(), 1);\n        reconcile(&mut pred, 0);\n        assert_eq!(pred.log.len(), 0);\n    }\n}\n"
  },
  {
    "path": "client/src/sim.rs",
    "content": "use std::time::Duration;\n\nuse fxhash::FxHashMap;\nuse hecs::Entity;\nuse tracing::{debug, error, trace};\n\nuse crate::{\n    local_character_controller::LocalCharacterController, metrics, prediction::PredictedMotion,\n    worldgen_driver::WorldgenDriver,\n};\nuse common::{\n    EntityId, GraphEntities, SimConfig, Step, character_controller,\n    collision_math::Ray,\n    graph::{Graph, NodeId},\n    graph_ray_casting,\n    math::{MDirection, MIsometry, MPoint},\n    node::VoxelData,\n    proto::{\n        self, BlockUpdate, Character, CharacterInput, CharacterState, Command, Component,\n        Inventory, Position,\n    },\n    sanitize_motion_input,\n    world::Material,\n};\n\nconst MATERIAL_PALETTE: [Material; 10] = [\n    Material::WoodPlanks,\n    Material::Grass,\n    Material::Dirt,\n    Material::Sand,\n    Material::Snow,\n    Material::WhiteBrick,\n    Material::GreyBrick,\n    Material::Basalt,\n    Material::Water,\n    Material::Lava,\n];\n\n/// Game state\npub struct Sim {\n    // World state\n    pub graph: Graph,\n    /// Drives chunk generation\n    worldgen_driver: WorldgenDriver,\n    pub graph_entities: GraphEntities,\n    entity_ids: FxHashMap<EntityId, Entity>,\n    pub world: hecs::World,\n    pub cfg: SimConfig,\n    pub local_character_id: EntityId,\n    pub local_character: Option<Entity>,\n    step: Option<Step>,\n\n    // Input state\n    since_input_sent: Duration,\n    /// Most recent input\n    ///\n    /// Units are relative to movement speed.\n    movement_input: na::Vector3<f32>,\n    /// Average input over the current time step. The portion of the timestep which has not yet\n    /// elapsed is considered to have zero input.\n    ///\n    /// Units are relative to movement speed.\n    average_movement_input: na::Vector3<f32>,\n    no_clip: bool,\n    /// Whether no_clip will be toggled next step\n    toggle_no_clip: bool,\n    /// Whether the current step starts with a jump\n    is_jumping: bool,\n    /// Whether the jump button has been pressed since the last step\n    jump_pressed: bool,\n    /// Whether the jump button is currently held down\n    jump_held: bool,\n    /// Whether the place-block button has been pressed since the last step\n    place_block_pressed: bool,\n    /// Whether the break-block button has been pressed since the last step\n    break_block_pressed: bool,\n\n    selected_material: Material,\n\n    prediction: PredictedMotion,\n    local_character_controller: LocalCharacterController,\n}\n\nimpl Sim {\n    pub fn new(\n        cfg: SimConfig,\n        chunk_load_parallelism: usize,\n        local_character_id: EntityId,\n    ) -> Self {\n        let mut graph = Graph::new(cfg.chunk_size);\n        graph.ensure_node_state(NodeId::ROOT);\n        Self {\n            graph,\n            worldgen_driver: WorldgenDriver::new(chunk_load_parallelism),\n            graph_entities: GraphEntities::new(),\n            entity_ids: FxHashMap::default(),\n            world: hecs::World::new(),\n            cfg,\n            local_character_id,\n            local_character: None,\n            step: None,\n\n            since_input_sent: Duration::new(0, 0),\n            movement_input: na::zero(),\n            average_movement_input: na::zero(),\n            no_clip: true,\n            toggle_no_clip: false,\n            is_jumping: false,\n            jump_pressed: false,\n            jump_held: false,\n            place_block_pressed: false,\n            break_block_pressed: false,\n            selected_material: Material::WoodPlanks,\n            prediction: PredictedMotion::new(proto::Position {\n                node: NodeId::ROOT,\n                local: MIsometry::identity(),\n            }),\n            local_character_controller: LocalCharacterController::new(),\n        }\n    }\n\n    /// Rotates the camera's view in a context-dependent manner based on the desired yaw and pitch angles.\n    pub fn look(&mut self, delta_yaw: f32, delta_pitch: f32, delta_roll: f32) {\n        if self.no_clip {\n            self.local_character_controller\n                .look_free(delta_yaw, delta_pitch, delta_roll);\n        } else {\n            self.local_character_controller\n                .look_level(delta_yaw, delta_pitch);\n        }\n    }\n\n    pub fn set_movement_input(&mut self, mut raw_movement_input: na::Vector3<f32>) {\n        if !self.no_clip {\n            // Vertical movement keys shouldn't do anything unless no-clip is on.\n            raw_movement_input.y = 0.0;\n        }\n        if raw_movement_input.norm_squared() >= 1.0 {\n            // Cap movement input at 1\n            raw_movement_input.normalize_mut();\n        }\n        self.movement_input = raw_movement_input;\n    }\n\n    pub fn toggle_no_clip(&mut self) {\n        // We prepare to toggle no_clip after the next step instead of immediately, as otherwise,\n        // there would be a discontinuity when predicting the player's position within a given step,\n        // causing an undesirable jolt.\n        self.toggle_no_clip = true;\n    }\n\n    pub fn set_jump_held(&mut self, jump_held: bool) {\n        self.jump_held = jump_held;\n        self.jump_pressed = jump_held || self.jump_pressed;\n    }\n\n    pub fn set_jump_pressed_true(&mut self) {\n        self.jump_pressed = true;\n    }\n\n    pub fn set_place_block_pressed_true(&mut self) {\n        self.place_block_pressed = true;\n    }\n\n    /// Returns the block the player is looking at, if any. Also includes distance and face\n    pub fn looking_at(&self) -> Option<graph_ray_casting::GraphCastHit> {\n        let view_position = self.view();\n        let ray_casting_result = graph_ray_casting::ray_cast(\n            &self.graph,\n            &view_position,\n            &Ray::new(MPoint::w(), -MDirection::z()),\n            self.cfg.character.block_reach,\n        );\n        if let Ok(ray_casting_result) = ray_casting_result {\n            ray_casting_result\n        } else {\n            tracing::warn!(\"Tried to run a raycast beyond generated terrain.\");\n            None\n        }\n    }\n\n    /// Selects the material from a preset palette.\n    pub fn select_material(&mut self, idx: usize) {\n        self.selected_material = *MATERIAL_PALETTE.get(idx).unwrap_or(&MATERIAL_PALETTE[0]);\n    }\n\n    /// Cycles the selected material through all materials.\n    pub fn next_material(&mut self) {\n        self.selected_material =\n            Material::VALUES[(self.selected_material as usize + 1) % Material::COUNT];\n    }\n\n    pub fn prev_material(&mut self) {\n        self.selected_material = Material::VALUES\n            [(self.selected_material as usize + Material::COUNT - 1) % Material::COUNT];\n    }\n\n    /// selects the material of the block the player is looking at. Will never select void.\n    pub fn pick_material(&mut self) {\n        let Some(hit) = self.looking_at() else {\n            return;\n        };\n\n        let mat = self.graph.get_material(hit.chunk, hit.voxel_coords);\n        let Some(mat) = mat else {\n            return;\n        };\n        self.selected_material = mat;\n    }\n\n    pub fn selected_material(&self) -> Material {\n        self.selected_material\n    }\n\n    /// Returns an EntityId in the inventory with the given material\n    pub fn get_any_inventory_entity_matching_material(\n        &self,\n        material: Material,\n    ) -> Option<EntityId> {\n        self.world\n            .get::<&Inventory>(self.local_character?)\n            .ok()?\n            .contents\n            .iter()\n            .copied()\n            .find(|e| {\n                self.entity_ids.get(e).is_some_and(|&entity| {\n                    self.world\n                        .get::<&Material>(entity)\n                        .is_ok_and(|m| *m == material)\n                })\n            })\n    }\n\n    /// Returns the number of entities in the inventory with the given material\n    pub fn count_inventory_entities_matching_material(&self, material: Material) -> usize {\n        let Some(local_character) = self.local_character else {\n            return 0;\n        };\n        let Ok(inventory) = self.world.get::<&Inventory>(local_character) else {\n            return 0;\n        };\n        inventory\n            .contents\n            .iter()\n            .copied()\n            .filter(|e| {\n                self.entity_ids.get(e).is_some_and(|&entity| {\n                    self.world\n                        .get::<&Material>(entity)\n                        .is_ok_and(|m| *m == material)\n                })\n            })\n            .count()\n    }\n\n    pub fn set_break_block_pressed_true(&mut self) {\n        self.break_block_pressed = true;\n    }\n\n    pub fn cfg(&self) -> &SimConfig {\n        &self.cfg\n    }\n\n    pub fn step(&mut self, dt: Duration, net: &mut server::Handle) {\n        self.local_character_controller.renormalize_orientation();\n        self.worldgen_driver.drive(\n            self.view(),\n            self.cfg.chunk_generation_distance,\n            &mut self.graph,\n        );\n\n        let step_interval = self.cfg.step_interval;\n        self.since_input_sent += dt;\n        if let Some(overflow) = self.since_input_sent.checked_sub(step_interval) {\n            // At least one step interval has passed since we last sent input, so it's time to\n            // send again.\n\n            // Update average movement input for the time between the last input sample and the end of\n            // the previous step. dt > overflow because we check whether a step has elapsed\n            // after each increment.\n            self.average_movement_input +=\n                self.movement_input * (dt - overflow).as_secs_f32() / step_interval.as_secs_f32();\n\n            // Send fresh input\n            self.send_input(net);\n            self.place_block_pressed = false;\n            self.break_block_pressed = false;\n\n            // Toggle no clip at the start of a new step\n            if self.toggle_no_clip {\n                self.no_clip = !self.no_clip;\n                self.toggle_no_clip = false;\n            }\n\n            self.is_jumping = self.jump_held || self.jump_pressed;\n            self.jump_pressed = false;\n\n            // Reset state for the next step\n            if overflow > step_interval {\n                // If it's been more than two timesteps since we last sent input, skip ahead\n                // rather than spamming the server.\n                self.average_movement_input = na::zero();\n                self.since_input_sent = Duration::new(0, 0);\n            } else {\n                self.average_movement_input =\n                    self.movement_input * overflow.as_secs_f32() / step_interval.as_secs_f32();\n                // Send the next input a little sooner if necessary to stay in sync\n                self.since_input_sent = overflow;\n            }\n        } else {\n            // Update average movement input for the time within the current step\n            self.average_movement_input +=\n                self.movement_input * dt.as_secs_f32() / step_interval.as_secs_f32();\n        }\n        self.update_view_position();\n        if !self.no_clip {\n            self.local_character_controller.align_to_gravity();\n        }\n    }\n\n    pub fn handle_net(&mut self, msg: server::Message) {\n        use server::Message::*;\n        match msg {\n            ConnectionLost(_) | Hello(_) => {\n                unreachable!(\"Case already handled by caller\");\n            }\n            Spawns(msg) => self.handle_spawns(msg),\n            StateDelta(msg) => {\n                // Discard out-of-order messages, taking care to account for step counter wrapping.\n                if self.step.is_some_and(|x| x.wrapping_sub(msg.step) >= 0) {\n                    return;\n                }\n                self.step = Some(msg.step);\n                for &(id, ref new_pos) in &msg.positions {\n                    self.update_position(id, new_pos);\n                }\n                for &(id, ref new_state) in &msg.character_states {\n                    self.update_character_state(id, new_state);\n                }\n                self.reconcile_prediction(msg.latest_input);\n            }\n        }\n    }\n\n    fn update_position(&mut self, id: EntityId, new_pos: &Position) {\n        match self.entity_ids.get(&id) {\n            None => debug!(%id, \"position update for unknown entity\"),\n            Some(&entity) => match self.world.get::<&mut Position>(entity) {\n                Ok(mut pos) => {\n                    if pos.node != new_pos.node {\n                        self.graph_entities.remove(pos.node, entity);\n                        self.graph_entities.insert(new_pos.node, entity);\n                    }\n                    *pos = *new_pos;\n                }\n                Err(e) => error!(%id, \"position update error: {}\", e),\n            },\n        }\n    }\n\n    fn update_character_state(&mut self, id: EntityId, new_character_state: &CharacterState) {\n        match self.entity_ids.get(&id) {\n            None => debug!(%id, \"character state update for unknown entity\"),\n            Some(&entity) => match self.world.get::<&mut Character>(entity) {\n                Ok(mut ch) => {\n                    ch.state = new_character_state.clone();\n                }\n                Err(e) => {\n                    error!(%id, \"character state update error: {}\", e)\n                }\n            },\n        }\n    }\n\n    fn reconcile_prediction(&mut self, latest_input: u16) {\n        let id = self.local_character_id;\n        let Some(&entity) = self.entity_ids.get(&id) else {\n            debug!(%id, \"reconciliation attempted for unknown entity\");\n            return;\n        };\n        let pos = match self.world.get::<&Position>(entity) {\n            Ok(pos) => pos,\n            Err(e) => {\n                error!(%id, \"reconciliation error: {}\", e);\n                return;\n            }\n        };\n        let ch = match self.world.get::<&Character>(entity) {\n            Ok(ch) => ch,\n            Err(e) => {\n                error!(%id, \"reconciliation error: {}\", e);\n                return;\n            }\n        };\n        self.prediction.reconcile(\n            &self.cfg,\n            &self.graph,\n            latest_input,\n            *pos,\n            ch.state.velocity,\n            ch.state.on_ground,\n        );\n    }\n\n    fn handle_spawns(&mut self, msg: proto::Spawns) {\n        self.step = self.step.max(Some(msg.step));\n        let mut builder = hecs::EntityBuilder::new();\n        for (id, components) in msg.spawns {\n            self.spawn(&mut builder, id, components);\n        }\n        for &id in &msg.despawns {\n            match self.entity_ids.get(&id) {\n                Some(&entity) => self.destroy(entity),\n                None => error!(%id, \"despawned unknown entity\"),\n            }\n        }\n        if !msg.nodes.is_empty() {\n            trace!(count = msg.nodes.len(), \"adding nodes\");\n            // The first \"Spawns\" message from the server populates the graph and allows CPU/GPU metrics\n            // to be accurate instead of measuring thousands of no-op frames\n            metrics::declare_ready_for_profiling();\n        }\n        for node in &msg.nodes {\n            // We need to get a list of nodes from the server, especially on first log-in,\n            // since otherwise, we won't be able to know where the local character is with\n            // just the NodeId alone.\n            let node_id = self.graph.ensure_neighbor(node.parent, node.side);\n            self.graph.ensure_node_state(node_id);\n        }\n        for block_update in msg.block_updates.into_iter() {\n            self.worldgen_driver\n                .apply_block_update(&mut self.graph, block_update);\n        }\n        for (chunk_id, voxel_data) in msg.voxel_data {\n            let Some(voxel_data) = VoxelData::deserialize(&voxel_data, self.cfg.chunk_size) else {\n                tracing::error!(\"Voxel data received from server is of incorrect dimension\");\n                continue;\n            };\n            self.worldgen_driver\n                .apply_voxel_data(&mut self.graph, chunk_id, voxel_data);\n        }\n        for (subject, new_entity) in msg.inventory_additions {\n            self.world\n                .get::<&mut Inventory>(*self.entity_ids.get(&subject).unwrap())\n                .unwrap()\n                .contents\n                .push(new_entity);\n        }\n        for (subject, removed_entity) in msg.inventory_removals {\n            self.world\n                .get::<&mut Inventory>(*self.entity_ids.get(&subject).unwrap())\n                .unwrap()\n                .contents\n                .retain(|&id| id != removed_entity);\n        }\n    }\n\n    fn spawn(\n        &mut self,\n        builder: &mut hecs::EntityBuilder,\n        id: EntityId,\n        components: Vec<Component>,\n    ) {\n        trace!(%id, \"spawning entity\");\n        builder.add(id);\n        let mut node = None;\n        for component in components {\n            use common::proto::Component::*;\n            match component {\n                Character(x) => {\n                    builder.add(x);\n                }\n                Position(x) => {\n                    node = Some(x.node);\n                    builder.add(x);\n                }\n                Inventory(x) => {\n                    builder.add(x);\n                }\n                Material(x) => {\n                    builder.add(x);\n                }\n            };\n        }\n        let entity = self.world.spawn(builder.build());\n        if let Some(node) = node {\n            self.graph_entities.insert(node, entity);\n        }\n        if id == self.local_character_id {\n            self.local_character = Some(entity);\n        }\n        if let Some(x) = self.entity_ids.insert(id, entity) {\n            self.destroy_idless(x);\n            error!(%id, \"id collision\");\n        }\n    }\n\n    fn send_input(&mut self, net: &mut server::Handle) {\n        let orientation = if self.no_clip {\n            self.local_character_controller.orientation()\n        } else {\n            self.local_character_controller.horizontal_orientation()\n        };\n        let character_input = CharacterInput {\n            movement: sanitize_motion_input(orientation * self.average_movement_input),\n            jump: self.is_jumping,\n            no_clip: self.no_clip,\n            block_update: self.get_local_character_block_update(),\n        };\n        let generation = self\n            .prediction\n            .push(&self.cfg, &self.graph, &character_input);\n\n        // Any failure here will be better handled in handle_net's ConnectionLost case\n        let _ = net.outgoing.send(Command {\n            generation,\n            character_input,\n            orientation: self.local_character_controller.orientation(),\n        });\n    }\n\n    fn update_view_position(&mut self) {\n        let mut view_position = *self.prediction.predicted_position();\n        let mut view_velocity = *self.prediction.predicted_velocity();\n        let mut view_on_ground = *self.prediction.predicted_on_ground();\n        let orientation = if self.no_clip {\n            self.local_character_controller.orientation()\n        } else {\n            self.local_character_controller.horizontal_orientation()\n        };\n        // Apply input that hasn't been sent yet\n        let predicted_input = CharacterInput {\n            // We divide by how far we are through the timestep because self.average_movement_input\n            // is always over the entire timestep, filling in zeroes for the future, and we\n            // want to use the average over what we have so far. Dividing by zero is handled\n            // by the character_controller sanitizing this input.\n            movement: orientation * self.average_movement_input\n                / (self.since_input_sent.as_secs_f32() / self.cfg.step_interval.as_secs_f32()),\n            jump: self.is_jumping,\n            no_clip: self.no_clip,\n            block_update: None,\n        };\n        character_controller::run_character_step(\n            &self.cfg,\n            &self.graph,\n            &mut view_position,\n            &mut view_velocity,\n            &mut view_on_ground,\n            &predicted_input,\n            self.since_input_sent.as_secs_f32(),\n        );\n\n        self.local_character_controller.update_position(\n            view_position,\n            self.graph.get_relative_up(&view_position).unwrap(),\n            !self.no_clip,\n        )\n    }\n\n    pub fn view(&self) -> Position {\n        let mut pos = self.local_character_controller.oriented_position();\n        let up = self.graph.get_relative_up(&pos).unwrap();\n        pos.local *= MIsometry::translation_along(\n            &(up.as_ref() * (self.cfg.character.character_radius - 1e-3)),\n        );\n        pos\n    }\n\n    /// Destroy all aspects of an entity\n    fn destroy(&mut self, entity: Entity) {\n        let id = *self\n            .world\n            .get::<&EntityId>(entity)\n            .expect(\"destroyed nonexistent entity\");\n        self.entity_ids.remove(&id);\n        self.destroy_idless(entity);\n    }\n\n    /// Destroy an entity without an EntityId mapped\n    fn destroy_idless(&mut self, entity: Entity) {\n        if let Ok(position) = self.world.get::<&Position>(entity) {\n            self.graph_entities.remove(position.node, entity);\n        }\n        self.world\n            .despawn(entity)\n            .expect(\"destroyed nonexistent entity\");\n    }\n\n    /// Provides the logic for the player to be able to place and break blocks at will\n    fn get_local_character_block_update(&self) -> Option<BlockUpdate> {\n        let placing = if self.place_block_pressed {\n            true\n        } else if self.break_block_pressed {\n            false\n        } else {\n            return None;\n        };\n\n        let hit = self.looking_at()?;\n\n        let block_pos = if placing {\n            self.graph.get_block_neighbor(\n                hit.chunk,\n                hit.voxel_coords,\n                hit.face_axis,\n                hit.face_sign,\n            )?\n        } else {\n            (hit.chunk, hit.voxel_coords)\n        };\n\n        let material = if placing {\n            self.selected_material\n        } else {\n            Material::Void\n        };\n\n        let consumed_entity = if placing && self.cfg.gameplay_enabled {\n            Some(self.get_any_inventory_entity_matching_material(material)?)\n        } else {\n            None\n        };\n\n        Some(BlockUpdate {\n            chunk_id: block_pos.0,\n            coords: block_pos.1,\n            new_material: material,\n            consumed_entity,\n        })\n    }\n}\n"
  },
  {
    "path": "client/src/worldgen_driver.rs",
    "content": "use std::time::Instant;\n\nuse common::{\n    dodeca::{self, Vertex},\n    graph::{Graph, NodeId},\n    math::MPoint,\n    node::{Chunk, ChunkId, VoxelData},\n    proto::{BlockUpdate, Position},\n    traversal,\n};\nuse fxhash::FxHashMap;\nuse metrics::histogram;\nuse tokio::sync::mpsc;\n\npub struct WorldgenDriver {\n    work_queue: WorkQueue,\n    /// Voxel data that have been downloaded from the server for chunks not yet introduced to the graph\n    preloaded_block_updates: FxHashMap<ChunkId, Vec<BlockUpdate>>,\n    /// Voxel data that has been fetched from the server but not yet introduced to the graph\n    preloaded_voxel_data: FxHashMap<ChunkId, VoxelData>,\n}\n\nimpl WorldgenDriver {\n    pub fn new(chunk_load_parallelism: usize) -> Self {\n        Self {\n            work_queue: WorkQueue::new(chunk_load_parallelism),\n            preloaded_block_updates: FxHashMap::default(),\n            preloaded_voxel_data: FxHashMap::default(),\n        }\n    }\n\n    pub fn drive(&mut self, view: Position, chunk_generation_distance: f32, graph: &mut Graph) {\n        let drive_worldgen_started = Instant::now();\n\n        // Check for chunks that have finished generating\n        while let Some(chunk) = self.work_queue.poll() {\n            self.add_chunk_to_graph(graph, ChunkId::new(chunk.node, chunk.chunk), chunk.voxels);\n        }\n\n        if !graph.contains(view.node) {\n            // Graph is temporarily out of sync with the server; we don't know where we are, so\n            // there's no point trying to generate chunks.\n            return;\n        }\n        let local_to_view = view.local.inverse();\n\n        traversal::ensure_nearby(graph, &view, chunk_generation_distance);\n        let nearby_nodes = traversal::nearby_nodes(graph, &view, chunk_generation_distance);\n\n        'nearby_nodes: for &(node, ref node_transform) in &nearby_nodes {\n            let node_to_view = local_to_view * node_transform;\n            for vertex in Vertex::iter() {\n                let chunk_id = ChunkId::new(node, vertex);\n\n                if !matches!(graph[chunk_id], Chunk::Fresh) {\n                    continue;\n                }\n\n                // Skip chunks beyond the chunk generation distance\n                if (node_to_view * vertex.chunk_bounding_sphere_center())\n                    .distance(&MPoint::origin())\n                    > chunk_generation_distance + dodeca::CHUNK_BOUNDING_SPHERE_RADIUS\n                {\n                    continue;\n                }\n\n                // Generate voxel data\n                let params = common::worldgen::ChunkParams::new(graph, chunk_id);\n                if let Some(voxel_data) = self.preloaded_voxel_data.remove(&chunk_id) {\n                    self.add_chunk_to_graph(graph, chunk_id, voxel_data);\n                } else if self.work_queue.load(ChunkDesc { node, params }) {\n                    graph[chunk_id] = Chunk::Generating;\n                } else {\n                    // No capacity is available in the work queue. Stop trying to prepare chunks to generate.\n                    break 'nearby_nodes;\n                }\n            }\n        }\n        histogram!(\"frame.cpu.drive_worldgen\").record(drive_worldgen_started.elapsed());\n    }\n\n    /// Adds established voxel data to the graph. This could come from world generation or sent from the server,\n    /// depending on whether the chunk has been modified.\n    pub fn add_chunk_to_graph(\n        &mut self,\n        graph: &mut Graph,\n        chunk_id: ChunkId,\n        voxel_data: VoxelData,\n    ) {\n        graph.populate_chunk(chunk_id, voxel_data);\n\n        if let Some(block_updates) = self.preloaded_block_updates.remove(&chunk_id) {\n            for block_update in block_updates {\n                // The chunk was just populated, so a block update should always succeed.\n                assert!(graph.update_block(&block_update));\n            }\n        }\n    }\n\n    pub fn apply_block_update(&mut self, graph: &mut Graph, block_update: BlockUpdate) {\n        if graph.update_block(&block_update) {\n            return;\n        }\n        self.preloaded_block_updates\n            .entry(block_update.chunk_id)\n            .or_default()\n            .push(block_update);\n    }\n\n    pub fn apply_voxel_data(\n        &mut self,\n        graph: &mut Graph,\n        chunk_id: ChunkId,\n        voxel_data: VoxelData,\n    ) {\n        if graph.contains(chunk_id.node) {\n            self.add_chunk_to_graph(graph, chunk_id, voxel_data);\n        } else {\n            self.preloaded_voxel_data.insert(chunk_id, voxel_data);\n        }\n    }\n}\n\nstruct ChunkDesc {\n    node: NodeId,\n    params: common::worldgen::ChunkParams,\n}\n\nstruct LoadedChunk {\n    node: NodeId,\n    chunk: Vertex,\n    voxels: VoxelData,\n}\n\nstruct WorkQueue {\n    _runtime: tokio::runtime::Runtime,\n    send: tokio::sync::mpsc::Sender<ChunkDesc>,\n    recv: tokio::sync::mpsc::Receiver<LoadedChunk>,\n    capacity: usize,\n    fill: usize,\n}\n\nimpl WorkQueue {\n    pub fn new(chunk_load_parallelism: usize) -> Self {\n        let runtime = tokio::runtime::Builder::new_multi_thread().build().unwrap();\n\n        let (input_send, mut input_recv) = mpsc::channel::<ChunkDesc>(chunk_load_parallelism);\n        let (output_send, output_recv) = mpsc::channel::<LoadedChunk>(chunk_load_parallelism);\n        runtime.spawn(async move {\n            while let Some(x) = input_recv.recv().await {\n                let out = output_send.clone();\n                tokio::spawn(async move {\n                    let loaded_chunk = LoadedChunk {\n                        node: x.node,\n                        chunk: x.params.chunk(),\n                        voxels: x.params.generate_voxels(),\n                    };\n                    let _ = out.send(loaded_chunk).await;\n                });\n            }\n        });\n\n        Self {\n            _runtime: runtime,\n            send: input_send,\n            recv: output_recv,\n            capacity: chunk_load_parallelism,\n            fill: 0,\n        }\n    }\n\n    /// Begin loading a single item, if capacity is available\n    #[must_use]\n    pub fn load(&mut self, x: ChunkDesc) -> bool {\n        if self.fill == self.capacity {\n            return false;\n        }\n        if self.send.try_send(x).is_ok() {\n            self.fill += 1;\n            true\n        } else {\n            false\n        }\n    }\n\n    /// Fetch a load result if one is ready, freeing capacity\n    pub fn poll(&mut self) -> Option<LoadedChunk> {\n        let result = self.recv.try_recv().ok()?;\n        self.fill -= 1;\n        Some(result)\n    }\n}\n"
  },
  {
    "path": "common/Cargo.toml",
    "content": "[package]\nname = \"common\"\nversion = \"0.1.0\"\nauthors = [\"Benjamin Saunders <ben.e.saunders@gmail.com>\"]\nedition = \"2024\"\npublish = false\nlicense = \"Apache-2.0 OR Zlib\"\n\n# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html\n\n[dependencies]\narrayvec = \"0.7.6\"\nblake3 = \"1.3.3\"\nserde = { version = \"1.0.104\", features = [\"derive\"] }\nnalgebra = { workspace = true, features = [\"serde-serialize\"] }\npostcard = { version = \"1.0.4\", default-features = false, features = [\"use-std\"] }\nanyhow = \"1.0.26\"\nquinn = { workspace = true }\nlibm = \"0.2.16\"\nfxhash = \"0.2.1\"\ntracing = \"0.1.10\"\nhecs = { workspace = true }\ntracing-subscriber = { version = \"0.3.15\", default-features = false, features = [\"env-filter\", \"smallvec\", \"fmt\", \"ansi\", \"time\", \"parking_lot\"] }\nrand = \"0.9.0\"\nrand_pcg = \"0.9.0\"\nrand_distr = \"0.5.0\"\nsimba = \"0.9.0\"\n\n[dev-dependencies]\napprox = \"0.5.1\"\ncriterion = \"0.8.2\"\n\n\n[[bench]]\nname = \"bench\"\nharness = false\n"
  },
  {
    "path": "common/benches/bench.rs",
    "content": "use criterion::{Criterion, criterion_group, criterion_main};\n\nuse common::{\n    dodeca::{Side, Vertex},\n    graph::{Graph, NodeId},\n    node::{Chunk, ChunkId},\n    proto::Position,\n    traversal::{ensure_nearby, nearby_nodes},\n    worldgen::ChunkParams,\n};\n\nfn build_graph(c: &mut Criterion) {\n    c.bench_function(\"build_graph 1000\", |b| {\n        b.iter(|| {\n            let mut graph = Graph::new(12);\n            let mut n = NodeId::ROOT;\n            for _ in 0..500 {\n                n = graph.ensure_neighbor(n, Side::A);\n                n = graph.ensure_neighbor(n, Side::J);\n            }\n            assert_eq!(graph.len(), 1001);\n        })\n    });\n\n    c.bench_function(\"nodegen 1000\", |b| {\n        b.iter(|| {\n            let mut graph = Graph::new(12);\n            let mut n = NodeId::ROOT;\n            for _ in 0..500 {\n                n = graph.ensure_neighbor(n, Side::A);\n                graph.ensure_node_state(n);\n                n = graph.ensure_neighbor(n, Side::J);\n                graph.ensure_node_state(n);\n            }\n            assert_eq!(graph.len(), 1001);\n        })\n    });\n\n    c.bench_function(\"worldgen\", |b| {\n        b.iter(|| {\n            let mut graph = Graph::new(12);\n            ensure_nearby(&mut graph, &Position::origin(), 1.0);\n            let all_nodes = nearby_nodes(&graph, &Position::origin(), 1.0);\n            let mut n = 0;\n            for (node, _) in all_nodes {\n                for vertex in Vertex::iter() {\n                    let chunk = ChunkId::new(node, vertex);\n                    let params = ChunkParams::new(&mut graph, chunk);\n                    graph[chunk] = Chunk::Populated {\n                        voxels: params.generate_voxels(),\n                        surface: None,\n                        old_surface: None,\n                    };\n                    n += 1;\n                }\n            }\n            assert_eq!(n, 860);\n        })\n    });\n}\n\ncriterion_group!(benches, build_graph);\ncriterion_main!(benches);\n"
  },
  {
    "path": "common/src/character_controller/collision.rs",
    "content": "//! This module is used to encapsulate character collision checking for the character controller\n\nuse tracing::error;\n\nuse crate::{\n    collision_math::Ray,\n    graph::Graph,\n    graph_collision,\n    math::{MDirection, MIsometry, MPoint},\n    proto::Position,\n};\n\n/// Checks for collisions when a character moves with a character-relative displacement vector of `relative_displacement`.\npub fn check_collision(\n    collision_context: &CollisionContext,\n    position: &Position,\n    relative_displacement: &na::Vector3<f32>,\n) -> CollisionCheckingResult {\n    // Split relative_displacement into its norm and a unit vector\n    let displacement_sqr = relative_displacement.norm_squared();\n    if displacement_sqr < 1e-16 {\n        // Fallback for if the displacement vector isn't large enough to reliably be normalized.\n        // Any value that is sufficiently large compared to f32::MIN_POSITIVE should work as the cutoff.\n        return CollisionCheckingResult::stationary();\n    }\n\n    let displacement_norm = displacement_sqr.sqrt();\n    let displacement_normalized =\n        na::UnitVector3::new_unchecked(relative_displacement / displacement_norm);\n\n    let ray = Ray::new(\n        MPoint::origin(),\n        MDirection::<f32>::from(displacement_normalized),\n    );\n    let tanh_distance = displacement_norm.tanh();\n\n    let cast_hit = graph_collision::sphere_cast(\n        collision_context.radius,\n        collision_context.graph,\n        position,\n        &ray,\n        tanh_distance,\n    );\n\n    let cast_hit = match cast_hit {\n        Ok(r) => r,\n        Err(e) => {\n            error!(\"Collision checking returned {:?}\", e);\n            return CollisionCheckingResult::stationary();\n        }\n    };\n\n    let distance = cast_hit\n        .as_ref()\n        .map_or(tanh_distance, |hit| hit.tanh_distance)\n        .atanh();\n\n    let displacement_vector = displacement_normalized.xyz() * distance;\n    let displacement_transform = MIsometry::translation_along(&displacement_vector);\n\n    CollisionCheckingResult {\n        displacement_vector,\n        displacement_transform,\n        collision: cast_hit.map(|hit| Collision {\n            // `CastEndpoint` has its `normal` given relative to the character's original position,\n            // but we want the normal relative to the character after the character moves to meet the wall.\n            // This normal now represents a contact point at the origin, so we omit the w-coordinate\n            // to ensure that it's orthogonal to the origin.\n            normal: na::UnitVector3::new_normalize(\n                (displacement_transform.inverse() * hit.normal).xyz(),\n            ),\n        }),\n    }\n}\n\n/// Contains information about the character and the world that is only relevant for collision checking\npub struct CollisionContext<'a> {\n    pub graph: &'a Graph,\n    pub radius: f32,\n}\n\npub struct CollisionCheckingResult {\n    /// The displacement allowed for the character before hitting a wall. The result of\n    /// `math::translate_along(&displacement_vector)` is `displacement_transform`.\n    pub displacement_vector: na::Vector3<f32>,\n\n    /// Multiplying the character's position by this matrix will move the character as far as it can up to its intended\n    /// displacement until it hits the wall.\n    pub displacement_transform: MIsometry<f32>,\n\n    pub collision: Option<Collision>,\n}\n\nimpl CollisionCheckingResult {\n    /// Return a CollisionCheckingResult with no movement and no collision; useful if the character is not moving\n    /// and has nothing to check collision against. Also useful as a last resort fallback if an unexpected error occurs.\n    pub fn stationary() -> CollisionCheckingResult {\n        CollisionCheckingResult {\n            displacement_vector: na::Vector3::zeros(),\n            displacement_transform: MIsometry::identity(),\n            collision: None,\n        }\n    }\n}\n\npub struct Collision {\n    /// This collision normal faces away from the collision surface and is given in the perspective of the character\n    /// _after_ it is transformed by `allowed_displacement`. The 4th coordinate of this normal vector is assumed to be\n    /// 0.0 and is therefore omitted.\n    pub normal: na::UnitVector3<f32>,\n}\n"
  },
  {
    "path": "common/src/character_controller/mod.rs",
    "content": "mod collision;\nmod vector_bounds;\n\nuse std::mem::replace;\n\nuse tracing::warn;\n\nuse crate::{\n    SimConfig,\n    character_controller::{\n        collision::{Collision, CollisionContext, check_collision},\n        vector_bounds::{BoundedVectors, VectorBound},\n    },\n    graph::Graph,\n    math::{self, MIsometry},\n    proto::{CharacterInput, Position},\n    sanitize_motion_input,\n    sim_config::CharacterConfig,\n};\n\n/// Runs a single step of character movement\npub fn run_character_step(\n    sim_config: &SimConfig,\n    graph: &Graph,\n    position: &mut Position,\n    velocity: &mut na::Vector3<f32>,\n    on_ground: &mut bool,\n    input: &CharacterInput,\n    dt_seconds: f32,\n) {\n    let ctx = CharacterControllerContext {\n        cfg: &sim_config.character,\n        collision_context: CollisionContext {\n            graph,\n            radius: sim_config.character.character_radius,\n        },\n        up: graph.get_relative_up(position).unwrap(),\n        dt_seconds,\n        movement_input: sanitize_motion_input(input.movement),\n        jump_input: input.jump,\n    };\n\n    if input.no_clip {\n        run_no_clip_character_step(&ctx, position, velocity, on_ground);\n    } else {\n        run_standard_character_step(&ctx, position, velocity, on_ground);\n    }\n\n    // Renormalize\n    position.local = position.local.renormalized();\n    let (next_node, transition_xf) = graph.normalize_transform(position.node, &position.local);\n    if next_node != position.node {\n        position.node = next_node;\n        position.local = transition_xf * position.local;\n    }\n}\n\nfn run_standard_character_step(\n    ctx: &CharacterControllerContext,\n    position: &mut Position,\n    velocity: &mut na::Vector3<f32>,\n    on_ground: &mut bool,\n) {\n    let mut ground_normal = None;\n    if *on_ground {\n        ground_normal = get_ground_normal(ctx, position);\n    }\n\n    // Handle jumping\n    if ctx.jump_input && ground_normal.is_some() {\n        let horizontal_velocity = *velocity - *ctx.up * ctx.up.dot(velocity);\n        *velocity = horizontal_velocity + *ctx.up * ctx.cfg.jump_speed;\n        ground_normal = None;\n    }\n\n    let old_velocity = *velocity;\n\n    // Update velocity\n    if let Some(ground_normal) = ground_normal {\n        apply_ground_controls(ctx, &ground_normal, velocity);\n    } else {\n        apply_air_controls(ctx, velocity);\n\n        // Apply air resistance\n        *velocity *= (-ctx.cfg.air_resistance * ctx.dt_seconds).exp();\n    }\n\n    // Apply gravity\n    *velocity -= *ctx.up * ctx.cfg.gravity_acceleration * ctx.dt_seconds;\n\n    // Apply speed cap\n    *velocity = velocity.cap_magnitude(ctx.cfg.speed_cap);\n\n    // Estimate the average velocity by using the average of the old velocity and new velocity,\n    // which has the effect of modeling a velocity that changes linearly over the timestep.\n    // This is necessary to avoid the following two issues:\n    // 1. Input lag, which would occur if only the old velocity was used\n    // 2. Movement artifacts, which would occur if only the new velocity was used. One\n    //    example of such an artifact is the character moving backwards slightly when they\n    //    stop moving after releasing a direction key.\n    let average_velocity = (*velocity + old_velocity) * 0.5;\n\n    // Handle actual movement\n    apply_velocity(\n        ctx,\n        average_velocity * ctx.dt_seconds,\n        position,\n        velocity,\n        &mut ground_normal,\n    );\n\n    *on_ground = ground_normal.is_some();\n}\n\nfn run_no_clip_character_step(\n    ctx: &CharacterControllerContext,\n    position: &mut Position,\n    velocity: &mut na::Vector3<f32>,\n    on_ground: &mut bool,\n) {\n    *velocity = ctx.movement_input * ctx.cfg.no_clip_movement_speed;\n    *on_ground = false;\n    position.local *= MIsometry::translation_along(&(*velocity * ctx.dt_seconds));\n}\n\n/// Returns the normal corresponding to the ground below the character, up to the `allowed_distance`. If\n/// no such ground exists, returns `None`.\nfn get_ground_normal(\n    ctx: &CharacterControllerContext,\n    position: &Position,\n) -> Option<na::UnitVector3<f32>> {\n    // Since the character can be at a corner between a slanted wall and the ground, the first collision\n    // directly below the character is not guaranteed to be part of the ground regardless of whether the\n    // character is on the ground. To handle this, we repeatedly redirect the direction we search to be\n    // parallel to walls we collide with to ensure that we find the ground if is indeed below the character.\n    const MAX_COLLISION_ITERATIONS: u32 = 6;\n    let mut allowed_displacement = BoundedVectors::new(\n        -ctx.up.into_inner() * ctx.cfg.ground_distance_tolerance,\n        None,\n    );\n\n    for _ in 0..MAX_COLLISION_ITERATIONS {\n        let collision_result = check_collision(\n            &ctx.collision_context,\n            position,\n            allowed_displacement.displacement(),\n        );\n        if let Some(collision) = collision_result.collision.as_ref() {\n            if is_ground(ctx, &collision.normal) {\n                // We found the ground, so return its normal.\n                return Some(collision.normal);\n            }\n            allowed_displacement.add_bound(VectorBound::new(\n                collision.normal,\n                collision.normal,\n                true,\n            ));\n        } else {\n            // Return `None` if we travel the whole `allowed_displacement` and don't find the ground.\n            return None;\n        }\n    }\n    // Return `None` if we fail to find the ground after the maximum number of attempts\n    None\n}\n\n/// Checks whether the given normal is flat enough to be considered part of the ground\nfn is_ground(ctx: &CharacterControllerContext, normal: &na::UnitVector3<f32>) -> bool {\n    let min_slope_up_component = 1.0 / (ctx.cfg.max_ground_slope.powi(2) + 1.0).sqrt();\n    normal.dot(&ctx.up) > min_slope_up_component\n}\n\n/// Updates the velocity based on user input assuming the character is on the ground\nfn apply_ground_controls(\n    ctx: &CharacterControllerContext,\n    ground_normal: &na::UnitVector3<f32>,\n    velocity: &mut na::Vector3<f32>,\n) {\n    // Set `target_ground_velocity` to have a consistent magnitude regardless\n    // of the movement direction, but ensure that the horizontal direction matches\n    // the horizontal direction of the intended movement direction.\n    let movement_norm = ctx.movement_input.norm();\n    let target_ground_velocity = if movement_norm < 1e-16 {\n        na::Vector3::zeros()\n    } else {\n        let mut unit_movement = ctx.movement_input / movement_norm;\n        math::project_to_plane(&mut unit_movement, ground_normal, &ctx.up, 0.0);\n        unit_movement.try_normalize_mut(1e-16);\n        unit_movement * movement_norm * ctx.cfg.max_ground_speed\n    };\n\n    // Set `ground_velocity` to be the current velocity's ground-parallel component,\n    // using a basis that contains the up vector to ensure that the result is unaffected\n    // by gravity.\n    let mut ground_velocity = *velocity;\n    math::project_to_plane(&mut ground_velocity, ground_normal, &ctx.up, 0.0);\n\n    // Adjust the ground-parallel component of the velocity vector to be closer to the\n    // target velocity.\n    let current_to_target_velocity = target_ground_velocity - ground_velocity;\n    let max_delta_velocity = ctx.cfg.ground_acceleration * ctx.dt_seconds;\n    if current_to_target_velocity.norm_squared() > max_delta_velocity.powi(2) {\n        *velocity += current_to_target_velocity.normalize() * max_delta_velocity;\n    } else {\n        *velocity += current_to_target_velocity;\n    }\n}\n\n/// Updates the velocity based on user input assuming the character is in the air\nfn apply_air_controls(ctx: &CharacterControllerContext, velocity: &mut na::Vector3<f32>) {\n    *velocity += ctx.movement_input * ctx.cfg.air_acceleration * ctx.dt_seconds;\n}\n\n/// Updates the character's position based on the given average velocity while handling collisions.\n/// Also updates the velocity and ground normal based on collisions that occur.\nfn apply_velocity(\n    ctx: &CharacterControllerContext,\n    expected_displacement: na::Vector3<f32>,\n    position: &mut Position,\n    velocity: &mut na::Vector3<f32>,\n    ground_normal: &mut Option<na::UnitVector3<f32>>,\n) {\n    // To prevent an unbounded runtime, we only allow a limited number of collisions to be processed in\n    // a single step. If the character encounters excessively complex geometry, it is possible to hit this limit,\n    // in which case further movement processing is delayed until the next time step.\n    const MAX_COLLISION_ITERATIONS: u32 = 6;\n\n    let mut bounded_vectors = BoundedVectors::new(expected_displacement, Some(*velocity));\n    let mut bounded_vectors_without_collisions = bounded_vectors.clone();\n\n    let mut ground_collision_handled = false;\n\n    let mut all_collisions_resolved = false;\n    for _ in 0..MAX_COLLISION_ITERATIONS {\n        let collision_result = check_collision(\n            &ctx.collision_context,\n            position,\n            bounded_vectors.displacement(),\n        );\n        position.local *= collision_result.displacement_transform;\n\n        if let Some(collision) = collision_result.collision {\n            // Update the expected displacement to represent a reduction in the remaining dt\n            let displacement_reduction_factor = 1.0\n                - collision_result.displacement_vector.magnitude()\n                    / bounded_vectors.displacement().magnitude();\n            bounded_vectors.scale_displacement(displacement_reduction_factor);\n            bounded_vectors_without_collisions.scale_displacement(displacement_reduction_factor);\n\n            handle_collision(\n                ctx,\n                collision,\n                &bounded_vectors_without_collisions,\n                &mut bounded_vectors,\n                ground_normal,\n                &mut ground_collision_handled,\n            );\n        } else {\n            all_collisions_resolved = true;\n            break;\n        }\n    }\n\n    if !all_collisions_resolved {\n        warn!(\n            \"A character entity processed too many collisions and collision resolution was cut short.\"\n        );\n    }\n\n    *velocity = *bounded_vectors.velocity().unwrap();\n}\n\n/// Updates character information based on the results of a single collision\nfn handle_collision(\n    ctx: &CharacterControllerContext,\n    collision: Collision,\n    bounded_vectors_without_collisions: &BoundedVectors,\n    bounded_vectors: &mut BoundedVectors,\n    ground_normal: &mut Option<na::UnitVector3<f32>>,\n    ground_collision_handled: &mut bool,\n) {\n    // Collisions are divided into two categories: Ground collisions and wall collisions.\n    // Ground collisions will only affect vertical movement of the character, while wall collisions will\n    // push the character away from the wall in a perpendicular direction. If the character is on the ground,\n    // we have extra logic: Using a temporary bound locking the character to the ground plane to ensure that\n    // slanted wall collisions do not lift the character off the ground (temporary because the ground plane\n    // can change after a ground collision, such as with uneven terrain).\n    if is_ground(ctx, &collision.normal) {\n        if !*ground_collision_handled {\n            // Wall collisions can turn vertical momentum into unwanted horizontal momentum. This can\n            // occur if the character jumps at the corner between the ground and a slanted wall. If the wall\n            // collision is handled first, this horizontal momentum will push the character away from the wall.\n            // This can also occur if the character is on the ground and walks into a slanted wall. A single frame\n            // of downward momentum caused by gravity can turn into unwanted horizontal momentum that pushes\n            // the character away from the wall. Neither of these issues can occur if the ground collision is\n            // handled first, so when computing how the velocity vectors change, we rewrite history as if\n            // the ground collision was first. This is only necessary for the first ground collision, since\n            // afterwards, there is no more unexpected vertical momentum.\n            let old_bounded_vectors =\n                replace(bounded_vectors, bounded_vectors_without_collisions.clone());\n            bounded_vectors.add_temp_bound(VectorBound::new(collision.normal, ctx.up, false));\n            bounded_vectors.add_bound(VectorBound::new(collision.normal, ctx.up, true));\n            for bound in old_bounded_vectors.bounds() {\n                bounded_vectors.add_bound(bound.clone());\n            }\n            bounded_vectors.clear_temp_bounds();\n\n            *ground_collision_handled = true;\n        } else {\n            bounded_vectors.add_temp_bound(VectorBound::new(collision.normal, ctx.up, false));\n            bounded_vectors.add_bound(VectorBound::new(collision.normal, ctx.up, true));\n            bounded_vectors.clear_temp_bounds();\n        }\n\n        *ground_normal = Some(collision.normal);\n    } else {\n        if let Some(ground_normal) = ground_normal {\n            bounded_vectors.add_temp_bound(VectorBound::new(*ground_normal, ctx.up, false));\n        }\n        bounded_vectors.add_bound(VectorBound::new(collision.normal, collision.normal, true));\n        bounded_vectors.clear_temp_bounds();\n    }\n}\n\n/// Contains all information about a character that the character controller doesn't change during\n/// one of its simulation steps\nstruct CharacterControllerContext<'a> {\n    collision_context: CollisionContext<'a>,\n    up: na::UnitVector3<f32>,\n    cfg: &'a CharacterConfig,\n    dt_seconds: f32,\n    movement_input: na::Vector3<f32>,\n    jump_input: bool,\n}\n"
  },
  {
    "path": "common/src/character_controller/vector_bounds.rs",
    "content": "//! This module is used to transform vectors to ensure that they fit constraints discovered during collision checking.\n\nuse rand_distr::num_traits::Zero;\nuse tracing::warn;\n\nuse crate::math;\n\n/// Encapsulates all the information needed to constrain a vector (displacement) based on a set of `VectorBound`s and apply those\n/// same constraints to a secondary vector (velocity).\n#[derive(Clone)]\npub struct BoundedVectors {\n    displacement: na::Vector3<f32>,\n    velocity: Option<na::Vector3<f32>>,\n    bounds: Vec<VectorBound>,\n    temp_bounds: Vec<VectorBound>,\n    error_margin: f32,\n}\n\nimpl BoundedVectors {\n    /// Initializes a `BoundedVectors` with an empty list of bounds. The `displacement` is the vector\n    /// we will apply the bounds to. The size of this vector also determins the error margin\n    /// to prevent floating point approximation limits from causing phantom collisions. Note that this\n    /// error margin is not needed if the resulting vector is zero, since no phantom collision can occur\n    /// if the character is stopped. The `velocity` is a vector that should have similar bounds applied to\n    /// it as `displacement`, but it is not used to compute which bounds to apply.\n    pub fn new(displacement: na::Vector3<f32>, velocity: Option<na::Vector3<f32>>) -> Self {\n        let error_margin = displacement.magnitude() * 1e-4;\n\n        BoundedVectors {\n            displacement,\n            velocity,\n            bounds: vec![],\n            temp_bounds: vec![],\n            error_margin,\n        }\n    }\n\n    pub fn displacement(&self) -> &na::Vector3<f32> {\n        &self.displacement\n    }\n\n    /// Scales the displacement vector without invalidating any of the `VectorBound`s\n    pub fn scale_displacement(&mut self, scale_factor: f32) {\n        self.displacement *= scale_factor;\n        self.error_margin *= scale_factor;\n    }\n\n    pub fn velocity(&self) -> Option<&na::Vector3<f32>> {\n        self.velocity.as_ref()\n    }\n\n    /// Returns the internal list of `VectorBound`s contained in the `BoundedVectors` struct.\n    pub fn bounds(&self) -> &[VectorBound] {\n        &self.bounds\n    }\n\n    /// Constrains `displacement` with `new_bound` while keeping the existing constraints satisfied. All projection\n    /// transformations applied to `displacement` are also applied to `velocity` to allow two vectors to be transformed consistently\n    /// with each other.\n    pub fn add_bound(&mut self, new_bound: VectorBound) {\n        self.apply_bound(&new_bound);\n        self.bounds.push(new_bound);\n    }\n\n    /// Temporarily constrains `displacement` with `new_bound` while keeping the existing constraints satisfied. All projection\n    /// transformations applied to `displacement` are also applied to `velocity` to allow two vectors to be transformed consistently\n    /// with each other. Use `clear_temporary_bounds` to get rid of any existing temporary bounds\n    pub fn add_temp_bound(&mut self, new_bound: VectorBound) {\n        self.apply_bound(&new_bound);\n        self.temp_bounds.push(new_bound);\n    }\n\n    /// Removes all temporary bounds\n    pub fn clear_temp_bounds(&mut self) {\n        self.temp_bounds.clear();\n    }\n\n    /// Helper function to apply a new bound without adding it to any lists.\n    fn apply_bound(&mut self, new_bound: &VectorBound) {\n        // There likely isn't a perfect way to get a vector properly constrained with a list of bounds. The main\n        // difficulty is finding which set of linearly independent bounds need to be applied so that all bounds are\n        // satisfied. Since bounds are one-sided and not guaranteed to be linearly independent from each other, this\n        // requires some ad-hoc choices. The algorithm we choose here is to (1) assume that `new_bound` is one of these\n        // linearly independent bounds, (2) if necessary, pair it up with each existing bound to find the first such\n        // bound that allows all bounds to be satisfied, and (3) zero out the vector if no such pairing works, as we\n        // assume that we need to apply three linearly independent bounds.\n\n        // Combine existing bounds with temporary bounds into an iterator\n        let bounds_iter = self.bounds.iter().chain(self.temp_bounds.iter());\n\n        // Apply new_bound if necessary.\n        if !new_bound.check_vector(&self.displacement, self.error_margin) {\n            new_bound.constrain_vector(&mut self.displacement, self.error_margin);\n            if let Some(ref mut velocity) = self.velocity {\n                // Note: The velocity vector does not need an error margin.\n                new_bound.constrain_vector(velocity, 0.0);\n            }\n        }\n\n        // Check if all constraints are satisfied\n        if (bounds_iter.clone()).all(|b| b.check_vector(&self.displacement, self.error_margin)) {\n            return;\n        }\n\n        // If not all constraints are satisfied, find the first constraint that if applied will satisfy\n        // the remaining constriants\n        for bound in\n            (bounds_iter.clone()).filter(|b| !b.check_vector(&self.displacement, self.error_margin))\n        {\n            let Some(ortho_bound) = bound.get_self_constrained_with_bound(new_bound) else {\n                warn!(\n                    \"Unsatisfied existing bound is parallel to new bound. Is the character squeezed between two walls?\"\n                );\n                continue;\n            };\n\n            let mut candidate = self.displacement;\n            ortho_bound.constrain_vector(&mut candidate, self.error_margin);\n\n            if (bounds_iter.clone()).all(|b| b.check_vector(&candidate, self.error_margin)) {\n                self.displacement = candidate;\n                if let Some(ref mut velocity) = self.velocity {\n                    ortho_bound.constrain_vector(velocity, 0.0);\n                }\n                return;\n            }\n        }\n\n        // If no choice satisfies all constraints, it means that there are three\n        // bounds that need to be applied at the same time, so the resulting\n        // vector must be 0.\n        self.displacement.set_zero();\n        if let Some(ref mut velocity) = self.velocity {\n            velocity.set_zero();\n        }\n    }\n}\n\n/// Represents a single constraint for a vector. `VectorBound`s alone conceptually contain\n/// enough information to apply to a vector, but practically, one other piece of information\n/// is needed: `error_margin`, which exists in `BoundedVectors`.\n#[derive(Clone)]\npub struct VectorBound {\n    normal: na::UnitVector3<f32>,\n    projection_direction: na::UnitVector3<f32>,\n    front_facing: bool, // Only used for `check_vector` function\n}\n\nimpl VectorBound {\n    /// Creates a `VectorBound` that pushes vectors away from the plane given\n    /// by the normal in `projection_direction`. After applying such a bound to\n    /// a vector, its dot product with `normal` should be close to zero but positive\n    /// even considering floating point error.\n    ///\n    /// The `VectorBound` will only push vectors that do not currently fulfill the bounds.\n    /// If `front_facing` is true, the bound wants the vector to be \"in front\" of the plane,\n    /// in the direction given by `normal`. Otherwise, the bound wants the vector to be \"behind\"\n    /// the plane. Error margins are set so that two planes, one front_facing and one not, with the\n    /// same `normal` and `projection_direction`, can both act on a vector without interfering.\n    pub fn new(\n        normal: na::UnitVector3<f32>,\n        projection_direction: na::UnitVector3<f32>,\n        front_facing: bool,\n    ) -> Self {\n        VectorBound {\n            normal,\n            projection_direction,\n            front_facing,\n        }\n    }\n\n    /// Updates `subject` with a projection transformation based on the constraint given by `self`.\n    /// This function does not check whether such a constraint is needed.\n    fn constrain_vector(&self, subject: &mut na::Vector3<f32>, error_margin: f32) {\n        math::project_to_plane(\n            subject,\n            &self.normal,\n            &self.projection_direction,\n            error_margin,\n        );\n    }\n\n    /// Checks whether `subject` satisfies the constraint given by `self`. Note that `check_vector` will\n    /// return `true` after a vector is constrained by `constrain_vector` with the same error margin, even\n    /// if it's perturbed slightly. However, that property only holds if the error margin is not too small.\n    fn check_vector(&self, subject: &na::Vector3<f32>, error_margin: f32) -> bool {\n        if subject.is_zero() {\n            return true;\n        }\n\n        // An additional margin of error is needed when the bound is checked to ensure that an\n        // applied bound always passes the check. Ostensibly, for an applied bound, the dot\n        // product is equal to the error margin.\n        if self.front_facing {\n            // Using 0.5 here should ensure that the check will pass after the bound is applied, and it will fail if the\n            // dot product is too close to zero to guarantee that it won't be treated as negative during collision checking\n            subject.dot(&self.normal) >= error_margin * 0.5\n        } else {\n            // Using 1.5 here keeps the additional margin of error equivalent in magnitude to the front-facing case\n            subject.dot(&self.normal) <= error_margin * 1.5\n        }\n    }\n\n    /// Returns a `VectorBound` that is an altered version of `self` so that it no longer interferes\n    /// with `bound`. This is achieved by altering the projection direction by a factor of\n    /// `bound`'s projection direction to be orthogonal to `bound`'s normal. If this is not\n    /// possible, returns `None`.\n    fn get_self_constrained_with_bound(&self, bound: &VectorBound) -> Option<VectorBound> {\n        let mut ortho_bound_projection_direction = self.projection_direction.into_inner();\n        math::project_to_plane(\n            &mut ortho_bound_projection_direction,\n            &bound.normal,\n            &bound.projection_direction,\n            0.0,\n        );\n\n        na::UnitVector3::try_new(ortho_bound_projection_direction, 1e-5).map(|d| VectorBound {\n            normal: self.normal,\n            projection_direction: d,\n            front_facing: self.front_facing,\n        })\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use approx::assert_abs_diff_eq;\n\n    use super::*;\n\n    #[test]\n    fn vector_bound_group_example() {\n        let mut bounded_vector = BoundedVectors::new(na::Vector3::new(-4.0, -3.0, 1.0), None);\n\n        // Add a bunch of bounds that are achievable with nonzero vectors\n        bounded_vector.add_bound(VectorBound::new(\n            unit_vector(1.0, 3.0, 4.0),\n            unit_vector(1.0, 2.0, 2.0),\n            true,\n        ));\n\n        assert_ne!(bounded_vector.displacement, na::Vector3::zero());\n        assert_bounds_achieved(&bounded_vector);\n\n        bounded_vector.add_bound(VectorBound::new(\n            unit_vector(2.0, -3.0, -4.0),\n            unit_vector(1.0, -2.0, -1.0),\n            true,\n        ));\n\n        assert_ne!(bounded_vector.displacement, na::Vector3::zero());\n        assert_bounds_achieved(&bounded_vector);\n\n        bounded_vector.add_bound(VectorBound::new(\n            unit_vector(2.0, -3.0, -5.0),\n            unit_vector(1.0, -2.0, -2.0),\n            true,\n        ));\n\n        assert_ne!(bounded_vector.displacement, na::Vector3::zero());\n        assert_bounds_achieved(&bounded_vector);\n\n        // Finally, add a bound that overconstrains the system\n        bounded_vector.add_bound(VectorBound::new(\n            unit_vector(-3.0, 3.0, -2.0),\n            unit_vector(-3.0, 3.0, -2.0),\n            true,\n        ));\n\n        // Using assert_eq instead of assert_ne here\n        assert_eq!(bounded_vector.displacement, na::Vector3::zero());\n        // Special logic allows bounds checking to work with the zero vector\n        assert_bounds_achieved(&bounded_vector);\n    }\n\n    #[test]\n    fn constrain_vector_example() {\n        let normal = unit_vector(1.0, 3.0, 4.0);\n        let projection_direction = unit_vector(1.0, 2.0, 2.0);\n        let error_margin = 1e-4;\n        let bound = VectorBound::new(normal, projection_direction, true);\n\n        let initial_vector = na::Vector3::new(-4.0, -3.0, 1.0);\n\n        assert!(!bound.check_vector(&initial_vector, error_margin));\n\n        let mut constrined_vector = initial_vector;\n        bound.constrain_vector(&mut constrined_vector, error_margin);\n\n        assert!(bound.check_vector(&constrined_vector, error_margin));\n        assert_collinear(\n            constrined_vector - initial_vector,\n            projection_direction.into_inner(),\n            1e-5,\n        );\n    }\n\n    #[test]\n    fn get_self_constrained_with_bound_example() {\n        // For simplicity, we test with an error margin of 0.\n        let normal0 = unit_vector(1.0, 3.0, 4.0);\n        let projection_direction0 = unit_vector(1.0, 2.0, 2.0);\n\n        let normal1 = unit_vector(1.0, -4.0, 3.0);\n        let projection_direction1 = unit_vector(1.0, -2.0, 1.0);\n\n        let bound0 = VectorBound::new(normal0, projection_direction0, true);\n        let bound1 = VectorBound::new(normal1, projection_direction1, true);\n\n        let initial_vector = na::Vector3::new(2.0, -1.0, -3.0);\n        let mut constrained_vector = initial_vector;\n        bound0.constrain_vector(&mut constrained_vector, 0.0);\n\n        let ortho_bound1 = bound1.get_self_constrained_with_bound(&bound0).unwrap();\n        ortho_bound1.constrain_vector(&mut constrained_vector, 0.0);\n\n        // Check that the constrained vector is on the intersection between the two bound planes\n        assert_abs_diff_eq!(constrained_vector.dot(&normal0), 0.0, epsilon = 1e-5);\n        assert_abs_diff_eq!(constrained_vector.dot(&normal1), 0.0, epsilon = 1e-5);\n\n        // Check that the delta of the constrained vector is a linear combination of the projection directions.\n        // To do this, we check whether the vector is orthogonal to the normal of the plane produced by the two\n        // projection directions.\n        assert_abs_diff_eq!(\n            (constrained_vector - initial_vector)\n                .dot(&projection_direction0.cross(&projection_direction1)),\n            0.0,\n            epsilon = 1e-5\n        );\n    }\n\n    fn assert_bounds_achieved(bounds: &BoundedVectors) {\n        for bound in bounds.bounds() {\n            assert!(bound.check_vector(&bounds.displacement, bounds.error_margin));\n        }\n    }\n\n    fn assert_collinear(v0: na::Vector3<f32>, v1: na::Vector3<f32>, epsilon: f32) {\n        assert_abs_diff_eq!(\n            v0.normalize(),\n            v1.normalize() * (v0.dot(&v1)).signum(),\n            epsilon = epsilon\n        );\n    }\n\n    /// Unit vector\n    fn unit_vector(x: f32, y: f32, z: f32) -> na::UnitVector3<f32> {\n        na::UnitVector3::new_normalize(na::Vector3::new(x, y, z))\n    }\n}\n"
  },
  {
    "path": "common/src/chunk_collision.rs",
    "content": "use crate::{\n    collision_math::Ray,\n    math::{MVector, PermuteXYZ},\n    node::{ChunkLayout, VoxelAABB, VoxelData},\n    voxel_math::Coords,\n    world::Material,\n};\n\npub struct ChunkCastHit {\n    /// The tanh of the distance traveled along the ray to result in this hit.\n    pub tanh_distance: f32,\n\n    /// Represents the normal vector of the hit surface in the dual coordinate system of the chunk.\n    /// To get the actual normal vector, project it so that it is orthogonal to the endpoint in Lorentz space.\n    pub normal: MVector<f32>,\n}\n\n/// Performs sphere casting (swept collision query) against the voxels in the chunk with the given `voxel_data`\n///\n/// The `ray` parameter is given and any resulting hit normals are given in the chunk's dual coordinate system.\n///\n/// The `tanh_distance` is the hyperbolic tangent of the distance along the ray to check for hits.\npub fn chunk_sphere_cast(\n    collider_radius: f32,\n    voxel_data: &VoxelData,\n    layout: &ChunkLayout,\n    ray: &Ray,\n    tanh_distance: f32,\n) -> Option<ChunkCastHit> {\n    let mut hit: Option<ChunkCastHit> = None;\n\n    let bounding_box =\n        VoxelAABB::from_ray_segment_and_radius(layout, ray, tanh_distance, collider_radius)?;\n\n    for t_axis in 0..3 {\n        hit = find_face_collision(\n            collider_radius,\n            voxel_data,\n            layout,\n            &bounding_box,\n            t_axis,\n            ray,\n            hit.as_ref().map_or(tanh_distance, |hit| hit.tanh_distance),\n        )\n        .or(hit);\n    }\n\n    for t_axis in 0..3 {\n        hit = find_edge_collision(\n            collider_radius,\n            voxel_data,\n            layout,\n            &bounding_box,\n            t_axis,\n            ray,\n            hit.as_ref().map_or(tanh_distance, |hit| hit.tanh_distance),\n        )\n        .or(hit);\n    }\n\n    hit = find_vertex_collision(\n        collider_radius,\n        voxel_data,\n        layout,\n        &bounding_box,\n        ray,\n        hit.as_ref().map_or(tanh_distance, |hit| hit.tanh_distance),\n    )\n    .or(hit);\n\n    hit\n}\n\n/// Detect collisions where a sphere contacts the front side of a voxel face\nfn find_face_collision(\n    collider_radius: f32,\n    voxel_data: &VoxelData,\n    layout: &ChunkLayout,\n    bounding_box: &VoxelAABB,\n    t_axis: usize,\n    ray: &Ray,\n    tanh_distance: f32,\n) -> Option<ChunkCastHit> {\n    let mut hit: Option<ChunkCastHit> = None;\n\n    let u_axis = (t_axis + 1) % 3;\n    let v_axis = (t_axis + 2) % 3;\n\n    // Loop through all grid planes overlapping the bounding box\n    for t in bounding_box.grid_planes(t_axis) {\n        // Find a normal to the grid plane. Note that (t, 0, 0, x) is a normal of the plane whose closest point\n        // to the origin is (x, 0, 0, t), and we use that fact here.\n        let normal = MVector::new(1.0, 0.0, 0.0, layout.grid_to_dual(t))\n            .tuv_to_xyz(t_axis)\n            .normalized_direction();\n\n        let Some(new_tanh_distance) =\n            ray.solve_sphere_plane_intersection(&normal, collider_radius.sinh())\n        else {\n            continue;\n        };\n\n        // If new_tanh_distance is out of range, no collision occurred.\n        if new_tanh_distance >= hit.as_ref().map_or(tanh_distance, |hit| hit.tanh_distance) {\n            continue;\n        }\n\n        // Which side we approach the plane from affects which voxel we want to use for hit detection.\n        // If exiting a chunk via a chunk boundary, hit detection is handled by a different chunk.\n        // We also want to adjust the normal vector to always face outward from the hit block\n        let (normal, voxel_t) = if ray.direction.mip(&normal) < 0.0 {\n            if t == 0 {\n                continue;\n            }\n            (normal, t - 1)\n        } else {\n            if t == layout.dimension() {\n                continue;\n            }\n            (-normal, t)\n        };\n\n        let ray_endpoint = ray.ray_point(new_tanh_distance);\n        let contact_point = ray_endpoint - normal.as_ref() * ray_endpoint.mip(&normal);\n\n        // Compute the u and v-coordinates of the voxels at the contact point\n        let Some(voxel_u) = layout.dual_to_voxel(contact_point[u_axis] / contact_point.w) else {\n            continue;\n        };\n        let Some(voxel_v) = layout.dual_to_voxel(contact_point[v_axis] / contact_point.w) else {\n            continue;\n        };\n\n        // Ensure that the relevant voxel is solid\n        if !voxel_is_solid(\n            voxel_data,\n            layout,\n            [voxel_t, voxel_u, voxel_v].tuv_to_xyz(t_axis),\n        ) {\n            continue;\n        }\n\n        // A collision was found. Update the hit.\n        hit = Some(ChunkCastHit {\n            tanh_distance: new_tanh_distance,\n            normal: normal.into(),\n        });\n    }\n\n    hit\n}\n\n/// Detect collisions where a sphere contacts a voxel edge\nfn find_edge_collision(\n    collider_radius: f32,\n    voxel_data: &VoxelData,\n    layout: &ChunkLayout,\n    bounding_box: &VoxelAABB,\n    t_axis: usize,\n    ray: &Ray,\n    tanh_distance: f32,\n) -> Option<ChunkCastHit> {\n    let mut hit: Option<ChunkCastHit> = None;\n\n    let u_axis = (t_axis + 1) % 3;\n    let v_axis = (t_axis + 2) % 3;\n\n    // Loop through all grid lines overlapping the bounding box\n    for (u, v) in bounding_box.grid_lines(u_axis, v_axis) {\n        // Compute vectors Lorentz-orthogonal to the edge and to each other\n        let edge_normal0 = MVector::new(0.0, 1.0, 0.0, layout.grid_to_dual(u))\n            .tuv_to_xyz(t_axis)\n            .normalized_direction();\n\n        let edge_normal1 = MVector::new(0.0, 0.0, 1.0, layout.grid_to_dual(v)).tuv_to_xyz(t_axis);\n        let edge_normal1 = (*edge_normal1.as_ref()\n            - *edge_normal0.as_ref() * edge_normal0.mip(&edge_normal1))\n        .normalized_direction();\n\n        let Some(new_tanh_distance) = ray.solve_sphere_line_intersection(\n            &edge_normal0,\n            &edge_normal1,\n            collider_radius.sinh(),\n        ) else {\n            continue;\n        };\n\n        // If new_tanh_distance is out of range, no collision occurred.\n        if new_tanh_distance >= hit.as_ref().map_or(tanh_distance, |hit| hit.tanh_distance) {\n            continue;\n        }\n\n        let ray_endpoint = ray.ray_point(new_tanh_distance);\n        let contact_point = ray_endpoint\n            - edge_normal0.as_ref() * ray_endpoint.mip(&edge_normal0)\n            - edge_normal1.as_ref() * ray_endpoint.mip(&edge_normal1);\n\n        // Compute the t-coordinate of the voxels at the contact point\n        let Some(voxel_t) = layout.dual_to_voxel(contact_point[t_axis] / contact_point.w) else {\n            continue;\n        };\n\n        // Ensure that the edge has a solid voxel adjacent to it\n        if layout.neighboring_voxels(u).all(|voxel_u| {\n            layout.neighboring_voxels(v).all(|voxel_v| {\n                !voxel_is_solid(\n                    voxel_data,\n                    layout,\n                    [voxel_t, voxel_u, voxel_v].tuv_to_xyz(t_axis),\n                )\n            })\n        }) {\n            continue;\n        }\n\n        // A collision was found. Update the hit.\n        hit = Some(ChunkCastHit {\n            tanh_distance: new_tanh_distance,\n            normal: ray_endpoint - contact_point,\n        });\n    }\n\n    hit\n}\n\n/// Detect collisions where a sphere contacts a voxel vertex\nfn find_vertex_collision(\n    collider_radius: f32,\n    voxel_data: &VoxelData,\n    layout: &ChunkLayout,\n    bounding_box: &VoxelAABB,\n    ray: &Ray,\n    tanh_distance: f32,\n) -> Option<ChunkCastHit> {\n    let mut hit: Option<ChunkCastHit> = None;\n\n    // Loop through all grid points contained in the bounding box\n    for (x, y, z) in bounding_box.grid_points(0, 1, 2) {\n        // Skip vertices that have no solid voxels adjacent to them\n        if layout.neighboring_voxels(x).all(|voxel_x| {\n            layout.neighboring_voxels(y).all(|voxel_y| {\n                layout\n                    .neighboring_voxels(z)\n                    .all(|voxel_z| !voxel_is_solid(voxel_data, layout, [voxel_x, voxel_y, voxel_z]))\n            })\n        }) {\n            continue;\n        }\n\n        // Compute vectors Lorentz-orthogonal to the vertex and to each other\n        let vertex_normal0 =\n            MVector::new(1.0, 0.0, 0.0, layout.grid_to_dual(x)).normalized_direction();\n\n        let vertex_normal1 = MVector::new(0.0, 1.0, 0.0, layout.grid_to_dual(y));\n        let vertex_normal1 = (vertex_normal1\n            - vertex_normal0.as_ref() * vertex_normal0.mip(&vertex_normal1))\n        .normalized_direction();\n\n        let vertex_normal2 = MVector::new(0.0, 0.0, 1.0, layout.grid_to_dual(z));\n        let vertex_normal2 = (vertex_normal2\n            - vertex_normal0.as_ref() * vertex_normal0.mip(&vertex_normal2)\n            - vertex_normal1.as_ref() * vertex_normal1.mip(&vertex_normal2))\n        .normalized_direction();\n\n        let Some(new_tanh_distance) = ray.solve_sphere_point_intersection(\n            &vertex_normal0,\n            &vertex_normal1,\n            &vertex_normal2,\n            collider_radius.sinh(),\n        ) else {\n            continue;\n        };\n\n        // If new_tanh_distance is out of range, no collision occurred.\n        if new_tanh_distance >= hit.as_ref().map_or(tanh_distance, |hit| hit.tanh_distance) {\n            continue;\n        }\n\n        // Determine the cube-centric coordinates of the vertex\n        let vertex_position = MVector::new(\n            layout.grid_to_dual(x),\n            layout.grid_to_dual(y),\n            layout.grid_to_dual(z),\n            1.0,\n        )\n        .normalized_point();\n\n        // A collision was found. Update the hit.\n        let ray_endpoint = ray.ray_point(new_tanh_distance);\n        hit = Some(ChunkCastHit {\n            tanh_distance: new_tanh_distance,\n            normal: ray_endpoint - vertex_position.as_ref(),\n        });\n    }\n\n    hit\n}\n\n/// Checks whether a voxel can be collided with. Any non-void voxel falls under this category.\nfn voxel_is_solid(voxel_data: &VoxelData, layout: &ChunkLayout, coords: [u8; 3]) -> bool {\n    debug_assert!(coords[0] < layout.dimension());\n    debug_assert!(coords[1] < layout.dimension());\n    debug_assert!(coords[2] < layout.dimension());\n    voxel_data.get(Coords(coords).to_index(layout.dimension())) != Material::Void\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::{math::MIsometry, node::VoxelData};\n\n    use super::*;\n\n    /// Helper structure used to reduce the number of parameters to pass around with tests\n    struct TestSphereCastContext {\n        collider_radius: f32,\n        layout: ChunkLayout,\n        voxel_data: VoxelData,\n    }\n\n    impl TestSphereCastContext {\n        fn new(collider_radius: f32) -> Self {\n            let dimension: u8 = 12;\n\n            let mut ctx = TestSphereCastContext {\n                collider_radius,\n                layout: ChunkLayout::new(dimension),\n                voxel_data: VoxelData::Solid(Material::Void),\n            };\n\n            // Populate voxels. Consists of a single voxel with voxel coordinates (1, 1, 1). The cube corresponding\n            // to this voxel has grid coordinates from (1, 1, 1) to (2, 2, 2)\n            ctx.set_voxel([1, 1, 1], Material::Dirt);\n\n            ctx\n        }\n\n        fn set_voxel(&mut self, coords: [u8; 3], material: Material) {\n            debug_assert!(coords[0] < self.layout.dimension());\n            debug_assert!(coords[1] < self.layout.dimension());\n            debug_assert!(coords[2] < self.layout.dimension());\n            self.voxel_data.data_mut(self.layout.dimension())\n                [Coords(coords).to_index(self.layout.dimension())] = material;\n        }\n    }\n\n    /// Helper method to set up common parameters that are used\n    /// in a passed-in closure to call sphere casting methods.\n    fn cast_with_test_ray(\n        ctx: &TestSphereCastContext,\n        ray_start_grid_coords: [f32; 3],\n        ray_end_grid_coords: [f32; 3],\n        wrapped_fn: impl FnOnce(&Ray, f32),\n    ) {\n        let ray_start = MVector::new(\n            ray_start_grid_coords[0] / ctx.layout.dual_to_grid_factor(),\n            ray_start_grid_coords[1] / ctx.layout.dual_to_grid_factor(),\n            ray_start_grid_coords[2] / ctx.layout.dual_to_grid_factor(),\n            1.0,\n        )\n        .normalized_point();\n\n        let ray_end = MVector::new(\n            ray_end_grid_coords[0] / ctx.layout.dual_to_grid_factor(),\n            ray_end_grid_coords[1] / ctx.layout.dual_to_grid_factor(),\n            ray_end_grid_coords[2] / ctx.layout.dual_to_grid_factor(),\n            1.0,\n        )\n        .normalized_point();\n\n        let ray = Ray::new(\n            ray_start,\n            ((ray_end.as_ref() - ray_start.as_ref())\n                + ray_start.as_ref() * ray_start.mip(&(ray_end.as_ref() - ray_start.as_ref())))\n            .normalized_direction(),\n        );\n\n        let tanh_distance = (-ray_start.mip(&ray_end)).acosh();\n\n        wrapped_fn(&ray, tanh_distance)\n    }\n\n    fn chunk_sphere_cast_wrapper(\n        ctx: &TestSphereCastContext,\n        ray: &Ray,\n        tanh_distance: f32,\n    ) -> Option<ChunkCastHit> {\n        chunk_sphere_cast(\n            ctx.collider_radius,\n            &ctx.voxel_data,\n            &ctx.layout,\n            ray,\n            tanh_distance,\n        )\n    }\n\n    fn find_face_collision_wrapper(\n        ctx: &TestSphereCastContext,\n        ray: &Ray,\n        t_axis: usize,\n        tanh_distance: f32,\n    ) -> Option<ChunkCastHit> {\n        find_face_collision(\n            ctx.collider_radius,\n            &ctx.voxel_data,\n            &ctx.layout,\n            &VoxelAABB::from_ray_segment_and_radius(\n                &ctx.layout,\n                ray,\n                tanh_distance,\n                ctx.collider_radius,\n            )\n            .unwrap(),\n            t_axis,\n            ray,\n            tanh_distance,\n        )\n    }\n\n    fn find_edge_collision_wrapper(\n        ctx: &TestSphereCastContext,\n        ray: &Ray,\n        t_axis: usize,\n        tanh_distance: f32,\n    ) -> Option<ChunkCastHit> {\n        find_edge_collision(\n            ctx.collider_radius,\n            &ctx.voxel_data,\n            &ctx.layout,\n            &VoxelAABB::from_ray_segment_and_radius(\n                &ctx.layout,\n                ray,\n                tanh_distance,\n                ctx.collider_radius,\n            )\n            .unwrap(),\n            t_axis,\n            ray,\n            tanh_distance,\n        )\n    }\n\n    fn find_vertex_collision_wrapper(\n        ctx: &TestSphereCastContext,\n        ray: &Ray,\n        tanh_distance: f32,\n    ) -> Option<ChunkCastHit> {\n        find_vertex_collision(\n            ctx.collider_radius,\n            &ctx.voxel_data,\n            &ctx.layout,\n            &VoxelAABB::from_ray_segment_and_radius(\n                &ctx.layout,\n                ray,\n                tanh_distance,\n                ctx.collider_radius,\n            )\n            .unwrap(),\n            ray,\n            tanh_distance,\n        )\n    }\n\n    fn test_face_collision(\n        ctx: &TestSphereCastContext,\n        ray: &Ray,\n        t_axis: usize,\n        tanh_distance: f32,\n    ) {\n        let hit = chunk_sphere_cast_wrapper(ctx, ray, tanh_distance);\n        assert_hits_exist_and_eq(\n            &hit,\n            &find_face_collision_wrapper(ctx, ray, t_axis, tanh_distance),\n        );\n        sanity_check_normal(ray, &hit.unwrap());\n    }\n\n    fn test_edge_collision(\n        ctx: &TestSphereCastContext,\n        ray: &Ray,\n        t_axis: usize,\n        tanh_distance: f32,\n    ) {\n        let hit = chunk_sphere_cast_wrapper(ctx, ray, tanh_distance);\n        assert_hits_exist_and_eq(\n            &hit,\n            &find_edge_collision_wrapper(ctx, ray, t_axis, tanh_distance),\n        );\n        sanity_check_normal(ray, &hit.unwrap());\n    }\n\n    fn test_vertex_collision(ctx: &TestSphereCastContext, ray: &Ray, tanh_distance: f32) {\n        let hit = chunk_sphere_cast_wrapper(ctx, ray, tanh_distance);\n        assert_hits_exist_and_eq(\n            &hit,\n            &find_vertex_collision_wrapper(ctx, ray, tanh_distance),\n        );\n        sanity_check_normal(ray, &hit.unwrap());\n    }\n\n    /// Check that the two hits exist and are equal to each other. Useful for ensuring that\n    /// a particular intersection type is detected by the general `chunk_sphere_cast` method.\n    fn assert_hits_exist_and_eq(hit0: &Option<ChunkCastHit>, hit1: &Option<ChunkCastHit>) {\n        assert!(hit0.is_some());\n        assert!(hit1.is_some());\n        assert_eq!(\n            hit0.as_ref().unwrap().tanh_distance,\n            hit1.as_ref().unwrap().tanh_distance\n        );\n        assert_eq!(hit0.as_ref().unwrap().normal, hit1.as_ref().unwrap().normal);\n    }\n\n    /// Ensures that the normal is pointing outward, opposite the ray direction.\n    fn sanity_check_normal(ray: &Ray, hit: &ChunkCastHit) {\n        // The ray we care about is after its start point has moved to the contact point.\n        let ray = MIsometry::translation(\n            &ray.position,\n            &ray.ray_point(hit.tanh_distance).normalized_point(),\n        ) * ray;\n\n        // Project normal to be perpendicular to the ray's position\n        let corrected_normal = (hit.normal + ray.position.as_ref() * hit.normal.mip(&ray.position))\n            .normalized_direction();\n\n        // Check that the normal and ray are pointing opposite directions\n        assert!(corrected_normal.mip(&ray.direction) < 0.0);\n    }\n\n    /// Tests that a suitable collision is found when approaching a single voxel from various angles and that\n    /// no collision is found in paths that don't reach that voxel.\n    #[test]\n    fn chunk_sphere_cast_examples() {\n        let collider_radius = 0.02;\n        let ctx = TestSphereCastContext::new(collider_radius);\n\n        // Approach a single voxel from various angles. Ensure that a suitable collision is found each time.\n        // Note: The voxel is centered at (1.5, 1.5, 1.5) in the grid coordinates used in this test.\n\n        // Face collisions\n        cast_with_test_ray(\n            &ctx,\n            [0.0, 1.5, 1.5],\n            [1.5, 1.5, 1.5],\n            |ray, tanh_distance| {\n                test_face_collision(&ctx, ray, 0, tanh_distance);\n            },\n        );\n\n        cast_with_test_ray(\n            &ctx,\n            [1.5, 1.5, 3.0],\n            [1.5, 1.5, 1.5],\n            |ray, tanh_distance| {\n                test_face_collision(&ctx, ray, 2, tanh_distance);\n            },\n        );\n\n        // Edge collisions\n        cast_with_test_ray(\n            &ctx,\n            [1.5, 3.0, 0.0],\n            [1.5, 1.5, 1.5],\n            |ray, tanh_distance| {\n                test_edge_collision(&ctx, ray, 0, tanh_distance);\n            },\n        );\n\n        cast_with_test_ray(\n            &ctx,\n            [3.0, 1.5, 3.0],\n            [1.5, 1.5, 1.5],\n            |ray, tanh_distance| {\n                test_edge_collision(&ctx, ray, 1, tanh_distance);\n            },\n        );\n\n        // Vertex collisions\n        cast_with_test_ray(\n            &ctx,\n            [0.0, 0.0, 0.0],\n            [1.5, 1.5, 1.5],\n            |ray, tanh_distance| {\n                test_vertex_collision(&ctx, ray, tanh_distance);\n            },\n        );\n\n        cast_with_test_ray(\n            &ctx,\n            [3.0, 3.0, 0.0],\n            [1.5, 1.5, 1.5],\n            |ray, tanh_distance| {\n                test_vertex_collision(&ctx, ray, tanh_distance);\n            },\n        );\n\n        // No collision: Going sideways relative to a face\n        cast_with_test_ray(\n            &ctx,\n            [3.0, 1.5, 1.5],\n            [3.0, 3.0, 1.5],\n            |ray, tanh_distance| {\n                assert!(chunk_sphere_cast_wrapper(&ctx, ray, tanh_distance).is_none());\n            },\n        );\n\n        // No collision: Going away from a face\n        cast_with_test_ray(\n            &ctx,\n            [3.0, 1.5, 1.5],\n            [4.5, 1.5, 1.5],\n            |ray, tanh_distance| {\n                assert!(chunk_sphere_cast_wrapper(&ctx, ray, tanh_distance).is_none());\n            },\n        );\n\n        // No collision: Past cast endpoint\n        cast_with_test_ray(\n            &ctx,\n            [8.0, 1.5, 1.5],\n            [3.0, 1.5, 1.5],\n            |ray, tanh_distance| {\n                assert!(chunk_sphere_cast_wrapper(&ctx, ray, tanh_distance).is_none());\n            },\n        );\n    }\n\n    /// Tests that colliding with a face from the back side is impossible. Note that colliding\n    /// with the back side of an edge or vertex is still possible. Getting rid of these collisions\n    /// is a possible future enhancement.\n    #[test]\n    fn face_collisions_one_sided() {\n        let collider_radius = 0.01;\n        let ctx = TestSphereCastContext::new(collider_radius);\n\n        cast_with_test_ray(\n            &ctx,\n            [1.5, 1.5, 1.5],\n            [4.5, 1.5, 1.5],\n            |ray, tanh_distance| {\n                assert!(chunk_sphere_cast_wrapper(&ctx, ray, tanh_distance).is_none());\n            },\n        )\n    }\n}\n"
  },
  {
    "path": "common/src/chunk_ray_casting.rs",
    "content": "use crate::{\n    collision_math::Ray,\n    math::{MVector, PermuteXYZ},\n    node::{ChunkLayout, VoxelAABB, VoxelData},\n    voxel_math::{CoordAxis, CoordSign, Coords},\n    world::Material,\n};\n\npub struct ChunkCastHit {\n    /// The tanh of the distance traveled along the ray to result in this hit.\n    pub tanh_distance: f32,\n\n    /// The coordinates of the block that was hit, including margins.\n    pub voxel_coords: Coords,\n\n    /// Which of the three axes is orthogonal to the face of the block that was hit.\n    pub face_axis: CoordAxis,\n\n    /// The direction along `face_axis` corresponding to the outside of the face that was hit.\n    pub face_sign: CoordSign,\n}\n\n/// Performs ray casting against the voxels in the chunk with the given `voxel_data`\n///\n/// The `ray` parameter is given and any resulting hit normals are given in the chunk's dual coordinate system.\n///\n/// The `tanh_distance` is the hyperbolic tangent of the distance along the ray to check for hits.\npub fn chunk_ray_cast(\n    voxel_data: &VoxelData,\n    layout: &ChunkLayout,\n    ray: &Ray,\n    tanh_distance: f32,\n) -> Option<ChunkCastHit> {\n    let mut hit: Option<ChunkCastHit> = None;\n\n    let bounding_box = VoxelAABB::from_ray_segment_and_radius(layout, ray, tanh_distance, 0.0)?;\n\n    for t_axis in 0..3 {\n        hit = find_face_collision(\n            voxel_data,\n            layout,\n            &bounding_box,\n            t_axis,\n            ray,\n            hit.as_ref().map_or(tanh_distance, |hit| hit.tanh_distance),\n        )\n        .or(hit);\n    }\n\n    hit\n}\n\n/// Detect intersections between a ray and the front side of a voxel face\nfn find_face_collision(\n    voxel_data: &VoxelData,\n    layout: &ChunkLayout,\n    bounding_box: &VoxelAABB,\n    t_axis: usize,\n    ray: &Ray,\n    tanh_distance: f32,\n) -> Option<ChunkCastHit> {\n    let mut hit: Option<ChunkCastHit> = None;\n\n    let u_axis = (t_axis + 1) % 3;\n    let v_axis = (t_axis + 2) % 3;\n\n    // Loop through all grid planes overlapping the bounding box\n    for t in bounding_box.grid_planes(t_axis) {\n        // Find a normal to the grid plane. Note that (t, 0, 0, x) is a normal of the plane whose closest point\n        // to the origin is (x, 0, 0, t), and we use that fact here.\n        let normal = MVector::new(1.0, 0.0, 0.0, layout.grid_to_dual(t))\n            .tuv_to_xyz(t_axis)\n            .normalized_direction();\n\n        let Some(new_tanh_distance) = ray.solve_point_plane_intersection(&normal) else {\n            continue;\n        };\n\n        // If new_tanh_distance is out of range, no collision occurred.\n        if new_tanh_distance >= hit.as_ref().map_or(tanh_distance, |hit| hit.tanh_distance) {\n            continue;\n        }\n\n        // Which side we approach the plane from affects which voxel we want to use for hit detection.\n        // If exiting a chunk via a chunk boundary, hit detection is handled by a different chunk.\n        // We also want to retain this face_direction for reporting the hit result later.\n        let (face_sign, voxel_t) = if ray.direction.mip(&normal) < 0.0 {\n            if t == 0 {\n                continue;\n            }\n            (CoordSign::Plus, t - 1)\n        } else {\n            if t == layout.dimension() {\n                continue;\n            }\n            (CoordSign::Minus, t)\n        };\n\n        let ray_endpoint = ray.ray_point(new_tanh_distance);\n        let contact_point = ray_endpoint - normal.as_ref() * ray_endpoint.mip(&normal);\n\n        // Compute the u and v-coordinates of the voxels at the contact point\n        let Some(voxel_u) = layout.dual_to_voxel(contact_point[u_axis] / contact_point.w) else {\n            continue;\n        };\n        let Some(voxel_v) = layout.dual_to_voxel(contact_point[v_axis] / contact_point.w) else {\n            continue;\n        };\n\n        // Ensure that the relevant voxel is solid\n        if !voxel_is_solid(\n            voxel_data,\n            layout,\n            [voxel_t, voxel_u, voxel_v].tuv_to_xyz(t_axis),\n        ) {\n            continue;\n        }\n\n        // A collision was found. Update the hit.\n        hit = Some(ChunkCastHit {\n            tanh_distance: new_tanh_distance,\n            voxel_coords: Coords([voxel_t, voxel_u, voxel_v].tuv_to_xyz(t_axis)),\n            face_axis: CoordAxis::try_from(t_axis).unwrap(),\n            face_sign,\n        });\n    }\n    hit\n}\n\n/// Checks whether a voxel can be collided with. Any non-void voxel falls under this category.\nfn voxel_is_solid(voxel_data: &VoxelData, layout: &ChunkLayout, coords: [u8; 3]) -> bool {\n    debug_assert!(coords[0] < layout.dimension());\n    debug_assert!(coords[1] < layout.dimension());\n    debug_assert!(coords[2] < layout.dimension());\n    voxel_data.get(Coords(coords).to_index(layout.dimension())) != Material::Void\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::node::VoxelData;\n\n    use super::*;\n\n    /// Helper structure used to reduce the number of parameters to pass around with tests\n    struct TestRayCastContext {\n        layout: ChunkLayout,\n        voxel_data: VoxelData,\n    }\n\n    impl TestRayCastContext {\n        fn new() -> Self {\n            let dimension: u8 = 12;\n\n            let mut ctx = TestRayCastContext {\n                layout: ChunkLayout::new(dimension),\n                voxel_data: VoxelData::Solid(Material::Void),\n            };\n\n            // Populate voxels. Consists of a single voxel with voxel coordinates (1, 1, 1). The cube corresponding\n            // to this voxel has grid coordinates from (1, 1, 1) to (2, 2, 2)\n            ctx.set_voxel([1, 1, 1], Material::Dirt);\n\n            ctx\n        }\n\n        fn set_voxel(&mut self, coords: [u8; 3], material: Material) {\n            debug_assert!(coords[0] < self.layout.dimension());\n            debug_assert!(coords[1] < self.layout.dimension());\n            debug_assert!(coords[2] < self.layout.dimension());\n            self.voxel_data.data_mut(self.layout.dimension())\n                [Coords(coords).to_index(self.layout.dimension())] = material;\n        }\n    }\n\n    /// Helper method to set up common parameters that are used\n    /// in a passed-in closure to call ray casting methods.\n    fn cast_with_test_ray(\n        ctx: &TestRayCastContext,\n        ray_start_grid_coords: [f32; 3],\n        ray_end_grid_coords: [f32; 3],\n        wrapped_fn: impl FnOnce(&Ray, f32),\n    ) {\n        let ray_start = MVector::new(\n            ray_start_grid_coords[0] / ctx.layout.dual_to_grid_factor(),\n            ray_start_grid_coords[1] / ctx.layout.dual_to_grid_factor(),\n            ray_start_grid_coords[2] / ctx.layout.dual_to_grid_factor(),\n            1.0,\n        )\n        .normalized_point();\n\n        let ray_end = MVector::new(\n            ray_end_grid_coords[0] / ctx.layout.dual_to_grid_factor(),\n            ray_end_grid_coords[1] / ctx.layout.dual_to_grid_factor(),\n            ray_end_grid_coords[2] / ctx.layout.dual_to_grid_factor(),\n            1.0,\n        )\n        .normalized_point();\n\n        let ray = Ray::new(\n            ray_start,\n            ((ray_end.as_ref() - ray_start.as_ref())\n                + ray_start.as_ref() * ray_start.mip(&(ray_end.as_ref() - ray_start.as_ref())))\n            .normalized_direction(),\n        );\n\n        let tanh_distance = (-(ray_start.mip(&ray_end))).acosh();\n\n        wrapped_fn(&ray, tanh_distance)\n    }\n\n    fn chunk_ray_cast_wrapper(\n        ctx: &TestRayCastContext,\n        ray: &Ray,\n        tanh_distance: f32,\n    ) -> Option<ChunkCastHit> {\n        chunk_ray_cast(&ctx.voxel_data, &ctx.layout, ray, tanh_distance)\n    }\n\n    fn test_face_collision(\n        ctx: &TestRayCastContext,\n        ray: &Ray,\n        tanh_distance: f32,\n        expected_face_axis: CoordAxis,\n        expected_face_sign: CoordSign,\n    ) {\n        let hit = chunk_ray_cast_wrapper(ctx, ray, tanh_distance);\n        let hit = hit.expect(\"collision expected\");\n        assert_eq!(hit.voxel_coords, Coords([1, 1, 1]));\n        assert_eq!(hit.face_axis, expected_face_axis);\n        assert_eq!(hit.face_sign, expected_face_sign);\n        // sanity_check_normal(ray, &hit.unwrap()); TODO: Check other results\n    }\n\n    /// Tests that a suitable collision is found when approaching a single voxel from various angles and that\n    /// no collision is found in paths that don't reach that voxel.\n    #[test]\n    fn chunk_ray_cast_examples() {\n        let ctx = TestRayCastContext::new();\n\n        // Approach a single voxel from various angles. Ensure that a suitable collision is found each time.\n        // Note: The voxel is centered at (1.5, 1.5, 1.5) in the grid coordinates used in this test.\n\n        cast_with_test_ray(\n            &ctx,\n            [0.0, 1.5, 1.5],\n            [1.5, 1.5, 1.5],\n            |ray, tanh_distance| {\n                test_face_collision(&ctx, ray, tanh_distance, CoordAxis::X, CoordSign::Minus);\n            },\n        );\n\n        cast_with_test_ray(\n            &ctx,\n            [1.5, 1.5, 3.0],\n            [1.5, 1.5, 1.5],\n            |ray, tanh_distance| {\n                test_face_collision(&ctx, ray, tanh_distance, CoordAxis::Z, CoordSign::Plus);\n            },\n        );\n\n        // No collision: Going sideways relative to a face\n        cast_with_test_ray(\n            &ctx,\n            [3.0, 1.5, 1.5],\n            [3.0, 3.0, 1.5],\n            |ray, tanh_distance| {\n                assert!(chunk_ray_cast_wrapper(&ctx, ray, tanh_distance).is_none());\n            },\n        );\n\n        // No collision: Going away from a face\n        cast_with_test_ray(\n            &ctx,\n            [3.0, 1.5, 1.5],\n            [4.5, 1.5, 1.5],\n            |ray, tanh_distance| {\n                assert!(chunk_ray_cast_wrapper(&ctx, ray, tanh_distance).is_none());\n            },\n        );\n\n        // No collision: Past cast endpoint\n        cast_with_test_ray(\n            &ctx,\n            [8.0, 1.5, 1.5],\n            [3.0, 1.5, 1.5],\n            |ray, tanh_distance| {\n                assert!(chunk_ray_cast_wrapper(&ctx, ray, tanh_distance).is_none());\n            },\n        );\n    }\n\n    /// Tests that colliding with a face from the back side is impossible. Note that colliding\n    /// with the back side of an edge or vertex is still possible. Getting rid of these collisions\n    /// is a possible future enhancement.\n    #[test]\n    fn face_collisions_one_sided() {\n        let ctx = TestRayCastContext::new();\n\n        cast_with_test_ray(\n            &ctx,\n            [1.5, 1.5, 1.5],\n            [4.5, 1.5, 1.5],\n            |ray, tanh_distance| {\n                assert!(chunk_ray_cast_wrapper(&ctx, ray, tanh_distance).is_none());\n            },\n        )\n    }\n}\n"
  },
  {
    "path": "common/src/chunks.rs",
    "content": "use std::ops::{Index, IndexMut};\n\nuse crate::dodeca::Vertex;\n\n/// A table of chunks contained by a single node\n///\n/// Each chunk is 1/8 of a cube whose vertices are at the centers of nodes. It\n/// can also be thought of as 1/20 of a node.\n#[derive(Debug, Copy, Clone, Default)]\npub struct Chunks<T> {\n    values: [T; 20],\n}\n\nimpl<T> Index<Vertex> for Chunks<T> {\n    type Output = T;\n    fn index(&self, v: Vertex) -> &T {\n        &self.values[v as usize]\n    }\n}\n\nimpl<T> IndexMut<Vertex> for Chunks<T> {\n    fn index_mut(&mut self, v: Vertex) -> &mut T {\n        &mut self.values[v as usize]\n    }\n}\n"
  },
  {
    "path": "common/src/codec.rs",
    "content": "use anyhow::{Result, bail};\nuse serde::{Serialize, de::DeserializeOwned};\n\npub async fn send<T: Serialize + ?Sized>(stream: &mut quinn::SendStream, msg: &T) -> Result<()> {\n    let buf = postcard::to_stdvec(msg).unwrap();\n    let len = buf.len();\n    if len >= 1 << 24 {\n        bail!(\"{} byte ordered message exceeds maximum length\", len);\n    }\n    stream.write_all(&(len as u32).to_le_bytes()[0..3]).await?;\n    stream.write_all(&buf).await?;\n    Ok(())\n}\n\n/// Returns `None` on end of stream\npub async fn recv<T: DeserializeOwned>(stream: &mut quinn::RecvStream) -> Result<Option<T>> {\n    let mut tag = [0; 4];\n    match stream.read_exact(&mut tag[0..3]).await {\n        Err(quinn::ReadExactError::FinishedEarly(_)) => return Ok(None),\n        Err(quinn::ReadExactError::ReadError(e)) => return Err(e.into()),\n        Ok(()) => {}\n    }\n\n    let len = u32::from_le_bytes(tag) as usize;\n    let mut buf = vec![0; len];\n    match stream.read_exact(&mut buf).await {\n        Err(quinn::ReadExactError::FinishedEarly(_)) => return Ok(None),\n        Err(quinn::ReadExactError::ReadError(e)) => return Err(e.into()),\n        Ok(()) => {}\n    }\n    Ok(Some(postcard::from_bytes(&buf)?))\n}\n\n/// Send a message as the entirety of `stream`\npub async fn send_whole<T: Serialize + ?Sized>(\n    mut stream: quinn::SendStream,\n    msg: &T,\n) -> std::result::Result<(), quinn::WriteError> {\n    let buf = postcard::to_stdvec(msg).unwrap();\n    stream.write_all(&buf).await?;\n    Ok(())\n}\n\n/// Receive the entirety of `stream` as a `T`\npub async fn recv_whole<T: DeserializeOwned>(\n    size_limit: usize,\n    mut stream: quinn::RecvStream,\n) -> Result<T> {\n    let buf = stream.read_to_end(size_limit).await?;\n    Ok(postcard::from_bytes(&buf)?)\n}\n"
  },
  {
    "path": "common/src/collision_math.rs",
    "content": "use crate::math::{MDirection, MIsometry, MPoint, MVector};\n\n/// A ray in hyperbolic space. The position and direction must be orthogonal:\n/// `mip(position, direction) == 0`.\n#[derive(Debug)]\npub struct Ray {\n    pub position: MPoint<f32>,\n    pub direction: MDirection<f32>,\n}\n\nimpl Ray {\n    /// Constructs a new `Ray`. It is the caller's responsibility to ensure that\n    /// `position` and `direction` are orthogonal.\n    pub fn new(position: MPoint<f32>, direction: MDirection<f32>) -> Ray {\n        Ray {\n            position,\n            direction,\n        }\n    }\n\n    /// Returns an unnormalized vector representing a point along this ray\n    /// `atanh(tanh_distance)` units away from the origin.\n    pub fn ray_point(&self, tanh_distance: f32) -> MVector<f32> {\n        self.position.as_ref() + self.direction.as_ref() * tanh_distance\n    }\n\n    /// Finds the tanh of the distance a sphere will have to travel along the ray before it\n    /// intersects the given plane.\n    pub fn solve_sphere_plane_intersection(\n        &self,\n        plane_normal: &MDirection<f32>,\n        sinh_radius: f32,\n    ) -> Option<f32> {\n        let mip_pos_a = self.position.mip(plane_normal);\n        let mip_dir_a = self.direction.mip(plane_normal);\n\n        solve_quadratic(\n            mip_pos_a.powi(2) - sinh_radius.powi(2),\n            mip_pos_a * mip_dir_a,\n            mip_dir_a.powi(2) + sinh_radius.powi(2),\n        )\n    }\n\n    /// Finds the tanh of the distance a sphere will have to travel along the ray before it\n    /// intersects the given line.\n    pub fn solve_sphere_line_intersection(\n        &self,\n        line_normal1: &MDirection<f32>,\n        line_normal0: &MDirection<f32>,\n        sinh_radius: f32,\n    ) -> Option<f32> {\n        let mip_pos_a = self.position.mip(line_normal0);\n        let mip_dir_a = self.direction.mip(line_normal0);\n        let mip_pos_b = self.position.mip(line_normal1);\n        let mip_dir_b = self.direction.mip(line_normal1);\n\n        solve_quadratic(\n            mip_pos_a.powi(2) + mip_pos_b.powi(2) - sinh_radius.powi(2),\n            mip_pos_a * mip_dir_a + mip_pos_b * mip_dir_b,\n            mip_dir_a.powi(2) + mip_dir_b.powi(2) + sinh_radius.powi(2),\n        )\n    }\n\n    /// Finds the tanh of the distance a sphere will have to travel along the ray before it\n    /// intersects the given point.\n    pub fn solve_sphere_point_intersection(\n        &self,\n        point_normal0: &MDirection<f32>,\n        point_normal1: &MDirection<f32>,\n        point_normal2: &MDirection<f32>,\n        sinh_radius: f32,\n    ) -> Option<f32> {\n        let mip_pos_a = self.position.mip(point_normal0);\n        let mip_dir_a = self.direction.mip(point_normal0);\n        let mip_pos_b = self.position.mip(point_normal1);\n        let mip_dir_b = self.direction.mip(point_normal1);\n        let mip_pos_c = self.position.mip(point_normal2);\n        let mip_dir_c = self.direction.mip(point_normal2);\n\n        solve_quadratic(\n            mip_pos_a.powi(2) + mip_pos_b.powi(2) + mip_pos_c.powi(2) - sinh_radius.powi(2),\n            mip_pos_a * mip_dir_a + mip_pos_b * mip_dir_b + mip_pos_c * mip_dir_c,\n            mip_dir_a.powi(2) + mip_dir_b.powi(2) + mip_dir_c.powi(2) + sinh_radius.powi(2),\n        )\n    }\n\n    /// Finds the tanh of the distance a point will have to travel along a ray before it\n    /// intersects the given plane.\n    pub fn solve_point_plane_intersection(&self, plane_normal: &MDirection<f32>) -> Option<f32> {\n        let mip_pos_a = self.position.mip(plane_normal);\n        let mip_dir_a = self.direction.mip(plane_normal);\n\n        let result = -mip_pos_a / mip_dir_a;\n        if result.is_finite() && result > 0.0 {\n            Some(result)\n        } else {\n            None\n        }\n    }\n}\n\nimpl std::ops::Mul<&Ray> for MIsometry<f32> {\n    type Output = Ray;\n\n    #[inline]\n    fn mul(self, rhs: &Ray) -> Self::Output {\n        Ray {\n            position: self * rhs.position,\n            direction: self * rhs.direction,\n        }\n    }\n}\n\n/// Finds the lower solution `x` of `constant_term + 2 * half_linear_term * x + quadratic_term * x * x == 0`\n/// if such a solution exists and is non-negative. Assumes that `quadratic_term` is positive. Double-roots are\n/// ignored.\n///\n/// If the lower solution is negative, but a small perturbation to the constant term would make it 0, this function\n/// returns 0.\nfn solve_quadratic(constant_term: f32, half_linear_term: f32, quadratic_term: f32) -> Option<f32> {\n    const EPSILON: f32 = 1e-4;\n\n    // If the linear term is positive, the lower solution is negative, and we're not interested. If the\n    // linear term is zero, the solution can only be non-negative if the constant term is also zero,\n    // which results in a double-root, which we also ignore.\n    if half_linear_term >= 0.0 {\n        return None;\n    }\n\n    // If the constant term is negative, the lower solution must also be negative. To avoid precision issues\n    // allowing a collider to clip through a surface, we treat small negative constant terms as zero, which\n    // results in a lower solution of zero.\n    if constant_term <= 0.0 {\n        return if constant_term > -EPSILON {\n            Some(0.0)\n        } else {\n            None\n        };\n    }\n\n    let discriminant = half_linear_term * half_linear_term - quadratic_term * constant_term;\n    if discriminant <= 0.0 {\n        return None;\n    }\n\n    // We use an alternative quadratic formula to ensure that we return a positive number if `constant_term > 0.0`.\n    // Otherwise, the edge case of a small positive `constant_term` could be mishandled.\n    // The denominator cannot be zero because both of its terms are positive.\n    Some(constant_term / (-half_linear_term + discriminant.sqrt()))\n}\n\n#[cfg(test)]\nmod tests {\n    use approx::assert_abs_diff_eq;\n\n    use super::*;\n\n    #[test]\n    fn solve_sphere_plane_intersection_example() {\n        // Hit the z=0 plane with a radius of 0.2\n        let ray = MIsometry::translation_along(&na::Vector3::new(0.0, 0.0, -0.5))\n            * &Ray::new(\n                MPoint::origin(),\n                MDirection::new_unchecked(0.8, 0.0, 0.6, 0.0),\n            );\n        let normal = -MDirection::z();\n        let hit_point = ray\n            .ray_point(\n                ray.solve_sphere_plane_intersection(&normal, 0.2_f32.sinh())\n                    .unwrap(),\n            )\n            .normalized_point();\n\n        assert_abs_diff_eq!(hit_point.mip(&normal), 0.2_f32.sinh(), epsilon = 1e-4);\n    }\n\n    #[test]\n    fn solve_sphere_plane_intersection_direct_hit() {\n        // Directly hit the z=0 plane with a ray 0.5 units away and a radius of 0.2.\n        let ray = MIsometry::translation_along(&na::Vector3::new(0.0, 0.0, -0.5))\n            * &Ray::new(MPoint::origin(), MDirection::z());\n        let normal = -MDirection::z();\n        assert_abs_diff_eq!(\n            ray.solve_sphere_plane_intersection(&normal, 0.2_f32.sinh())\n                .unwrap(),\n            0.3_f32.tanh(),\n            epsilon = 1e-4\n        );\n    }\n\n    #[test]\n    fn solve_sphere_plane_intersection_miss() {\n        // No collision with the plane anywhere along the ray's line\n        let ray = MIsometry::translation_along(&na::Vector3::new(0.0, 0.0, -0.5))\n            * &Ray::new(MPoint::origin(), MDirection::x());\n        let normal = -MDirection::z();\n        assert!(\n            ray.solve_sphere_plane_intersection(&normal, 0.2_f32.sinh())\n                .is_none()\n        );\n    }\n\n    #[test]\n    fn solve_sphere_plane_intersection_margin() {\n        // Sphere is already contacting the plane, with some error\n        let ray = MIsometry::translation_along(&na::Vector3::new(0.0, 0.0, -0.2))\n            * &Ray::new(MPoint::origin(), MDirection::z());\n        let normal = -MDirection::z();\n        assert_eq!(\n            ray.solve_sphere_plane_intersection(&normal, 0.2001_f32.sinh())\n                .unwrap(),\n            0.0\n        );\n    }\n\n    #[test]\n    fn solve_sphere_line_intersection_example() {\n        // Hit the x=z=0 line with a radius of 0.2\n        let ray = MIsometry::translation_along(&na::Vector3::new(0.0, 0.0, -0.5))\n            * &Ray::new(\n                MPoint::origin(),\n                MVector::new(1.0, 2.0, 3.0, 0.0).normalized_direction(),\n            );\n        let line_normal0 = MDirection::x();\n        let line_normal1 = MDirection::z();\n        let hit_point = ray\n            .ray_point(\n                ray.solve_sphere_line_intersection(&line_normal0, &line_normal1, 0.2_f32.sinh())\n                    .unwrap(),\n            )\n            .normalized_point();\n        // Measue the distance from hit_point to the line and ensure it's equal to the radius\n        assert_abs_diff_eq!(\n            (hit_point.mip(&line_normal0).powi(2) + hit_point.mip(&line_normal1).powi(2)).sqrt(),\n            0.2_f32.sinh(),\n            epsilon = 1e-4\n        );\n    }\n\n    #[test]\n    fn solve_sphere_line_intersection_direct_hit() {\n        // Directly hit the x=z=0 line with a ray 0.5 units away and a radius of 0.2.\n\n        // Ensure the ray is slightly off-center so that the distance math is shown to be correct\n        let ray = MIsometry::translation_along(&na::Vector3::new(0.0, 0.7, 0.0))\n            * MIsometry::translation_along(&na::Vector3::new(0.0, 0.0, -0.5))\n            * &Ray::new(MPoint::origin(), MDirection::z());\n        let line_normal0 = MDirection::x();\n        let line_normal1 = MDirection::z();\n        assert_abs_diff_eq!(\n            ray.solve_sphere_line_intersection(&line_normal0, &line_normal1, 0.2_f32.sinh())\n                .unwrap(),\n            0.3_f32.tanh(),\n            epsilon = 1e-4\n        );\n    }\n\n    #[test]\n    fn solve_sphere_line_intersection_miss() {\n        // No collision with the line anywhere along the ray's line\n        let ray = MIsometry::translation_along(&na::Vector3::new(0.0, 0.0, -0.5))\n            * &Ray::new(MPoint::origin(), MDirection::x());\n        let line_normal0 = MDirection::x();\n        let line_normal1 = MDirection::z();\n        assert!(\n            ray.solve_sphere_line_intersection(&line_normal0, &line_normal1, 0.2_f32.sinh())\n                .is_none()\n        );\n    }\n\n    #[test]\n    fn solve_sphere_line_intersection_margin() {\n        // Sphere is already contacting the line, with some error\n        let ray = MIsometry::translation_along(&na::Vector3::new(0.0, 0.0, -0.2))\n            * &Ray::new(MPoint::origin(), MDirection::z());\n        let line_normal0 = MDirection::x();\n        let line_normal1 = MDirection::z();\n        assert_eq!(\n            ray.solve_sphere_line_intersection(&line_normal0, &line_normal1, 0.2001_f32.sinh())\n                .unwrap(),\n            0.0\n        );\n    }\n\n    #[test]\n    fn solve_sphere_line_intersection_precision() {\n        // Using ray coordinates determined empirically from manual playtesting, show that the\n        // current implementation of `solve_sphere_line_intersection` provides better results\n        // than an arguably-simpler implementation involving the line's position and direction.\n        // Similar reasoning can also apply to `solve_sphere_point_intersection` even though it is\n        // not tested explicitly in the same way.\n        let ray = Ray::new(\n            MPoint::new_unchecked(-0.019093871, -0.0014823675, 0.059645057, 1.0019588),\n            MDirection::new_unchecked(-0.02954007, 0.9965602, 0.07752046, 0.003702946),\n        );\n        let line_normal0 = MDirection::<f32>::x();\n        let line_normal1 = MDirection::<f32>::y();\n        let radius = 0.019090926_f32;\n        // The following returns wrong results in the other implementation, so we test this case\n        // to make sure there are no regressions.\n        assert!(\n            ray.solve_sphere_line_intersection(&line_normal0, &line_normal1, radius.sinh())\n                .is_none()\n        );\n    }\n\n    #[test]\n    fn solve_sphere_point_intersection_example() {\n        // Hit the origin with a radius of 0.2\n        let ray = MIsometry::translation_along(&na::Vector3::new(0.0, 0.0, -0.5))\n            * &Ray::new(\n                MPoint::origin(),\n                MVector::new(1.0, 2.0, 6.0, 0.0).normalized_direction(),\n            );\n        let point_position = MPoint::origin();\n        let point_normal0 = MDirection::x();\n        let point_normal1 = MDirection::y();\n        let point_normal2 = MDirection::z();\n        let hit_point = ray\n            .ray_point(\n                ray.solve_sphere_point_intersection(\n                    &point_normal0,\n                    &point_normal1,\n                    &point_normal2,\n                    0.2_f32.sinh(),\n                )\n                .unwrap(),\n            )\n            .normalized_point();\n        assert_abs_diff_eq!(\n            -hit_point.mip(&point_position),\n            0.2_f32.cosh(),\n            epsilon = 1e-4\n        );\n    }\n\n    #[test]\n    fn solve_sphere_point_intersection_direct_hit() {\n        // Directly hit the origin with a ray 0.5 units away and a radius of 0.2.\n        let ray = MIsometry::translation_along(&na::Vector3::new(0.0, 0.0, -0.5))\n            * &Ray::new(MPoint::origin(), MDirection::z());\n        let point_normal0 = MDirection::x();\n        let point_normal1 = MDirection::y();\n        let point_normal2 = MDirection::z();\n        assert_abs_diff_eq!(\n            ray.solve_sphere_point_intersection(\n                &point_normal0,\n                &point_normal1,\n                &point_normal2,\n                0.2_f32.sinh()\n            )\n            .unwrap(),\n            0.3_f32.tanh(),\n            epsilon = 1e-4\n        );\n    }\n\n    #[test]\n    fn solve_sphere_point_intersection_miss() {\n        // No collision with the point anywhere along the ray's line\n        let ray = MIsometry::translation_along(&na::Vector3::new(0.0, 0.0, -0.5))\n            * &Ray::new(MPoint::origin(), MDirection::x());\n        let point_normal0 = MDirection::x();\n        let point_normal1 = MDirection::y();\n        let point_normal2 = MDirection::z();\n        assert!(\n            ray.solve_sphere_point_intersection(\n                &point_normal0,\n                &point_normal1,\n                &point_normal2,\n                0.2_f32.sinh()\n            )\n            .is_none()\n        );\n    }\n\n    #[test]\n    fn solve_sphere_point_intersection_margin() {\n        // Sphere is already contacting the point, with some error\n        let ray = MIsometry::translation_along(&na::Vector3::new(0.0, 0.0, -0.2))\n            * &Ray::new(MPoint::origin(), MDirection::z());\n        let point_normal0 = MDirection::x();\n        let point_normal1 = MDirection::y();\n        let point_normal2 = MDirection::z();\n        assert_eq!(\n            ray.solve_sphere_point_intersection(\n                &point_normal0,\n                &point_normal1,\n                &point_normal2,\n                0.2001_f32.sinh()\n            )\n            .unwrap(),\n            0.0\n        );\n    }\n\n    #[test]\n    fn foo() {\n        // Hit the z=0 plane\n        let ray = MIsometry::translation_along(&na::Vector3::new(0.0, 0.0, -0.5))\n            * &Ray::new(\n                MPoint::origin(),\n                MDirection::new_unchecked(0.8, 0.0, 0.6, 0.0),\n            );\n        let normal = -MDirection::z();\n        let hit_point = ray\n            .ray_point(ray.solve_point_plane_intersection(&normal).unwrap())\n            .normalized_point();\n        assert_abs_diff_eq!(hit_point.mip(&normal), 0.0, epsilon = 1e-4);\n    }\n\n    #[test]\n    fn solve_quadratic_example() {\n        let a = 1.0;\n        let b = -2.0;\n        let c = 0.2;\n        let x = solve_quadratic(c, b / 2.0, a).unwrap();\n\n        // x should be a solution\n        assert_abs_diff_eq!(a * x * x + b * x + c, 0.0, epsilon = 1e-4);\n\n        // x should be the smallest solution, less than the parabola's vertex.\n        assert!(x < -b / (2.0 * a));\n    }\n}\n"
  },
  {
    "path": "common/src/cursor.rs",
    "content": "use std::sync::LazyLock;\n\nuse crate::dodeca::{SIDE_COUNT, Side, Vertex};\nuse crate::graph::{Graph, NodeId};\nuse crate::node::ChunkId;\n\n/// Navigates the cubic dual of a graph\n#[derive(Debug, Copy, Clone, Eq, PartialEq)]\npub struct Cursor {\n    node: NodeId,\n    a: Side,\n    b: Side,\n    c: Side,\n}\n\nimpl Cursor {\n    /// Construct a canonical cursor for the cube at `vertex` of `node`\n    pub fn from_vertex(node: NodeId, vertex: Vertex) -> Self {\n        let [a, b, c] = vertex.canonical_sides();\n        Self { node, a, b, c }\n    }\n\n    /// Get the neighbor towards `dir`\n    pub fn step(self, graph: &Graph, dir: Dir) -> Option<Self> {\n        // For a cube identified by three dodecahedral faces sharing a vertex, we identify its\n        // cubical neighbors by taking each vertex incident to exactly two of the faces and the face\n        // of the three it's not incident to, and selecting the cube represented by the new vertex\n        // in both the dodecahedron sharing the face unique to the new vertex and that sharing the\n        // face that the new vertex isn't incident to.\n        let (a, b, c) = (self.a, self.b, self.c);\n        let a_prime = NEIGHBORS[a as usize][b as usize][c as usize].unwrap();\n        let b_prime = NEIGHBORS[b as usize][a as usize][c as usize].unwrap();\n        let c_prime = NEIGHBORS[c as usize][b as usize][a as usize].unwrap();\n        use Dir::*;\n        let (sides, neighbor) = match dir {\n            Left => ((a, b, c_prime), c),\n            Right => ((a, b, c_prime), c_prime),\n            Down => ((a, b_prime, c), b),\n            Up => ((a, b_prime, c), b_prime),\n            Forward => ((a_prime, b, c), a),\n            Back => ((a_prime, b, c), a_prime),\n        };\n        let node = graph.neighbor(self.node, neighbor)?;\n        Some(Self {\n            node,\n            a: sides.0,\n            b: sides.1,\n            c: sides.2,\n        })\n    }\n\n    /// Node and dodecahedral vertex that contains the representation for this cube in the graph\n    pub fn canonicalize(self, graph: &Graph) -> Option<ChunkId> {\n        graph.canonicalize(ChunkId::new(\n            self.node,\n            Vertex::from_sides([self.a, self.b, self.c]).unwrap(),\n        ))\n    }\n}\n\n#[derive(Debug, Copy, Clone, Eq, PartialEq)]\npub enum Dir {\n    Left,\n    Right,\n    Down,\n    Up,\n    Forward,\n    Back,\n}\nimpl Dir {\n    pub fn iter() -> impl ExactSizeIterator<Item = Self> + Clone {\n        use Dir::*;\n        [Left, Right, Down, Up, Forward, Back].into_iter()\n    }\n\n    /// Returns the unit vector corresponding to the direction.\n    pub fn vector(self) -> na::Vector3<isize> {\n        use Dir::*;\n        match self {\n            Up => na::Vector3::x(),\n            Down => -na::Vector3::x(),\n            Left => na::Vector3::y(),\n            Right => -na::Vector3::y(),\n            Forward => na::Vector3::z(),\n            Back => -na::Vector3::z(),\n        }\n    }\n}\n\n/// Returns a direction's opposite direction.\nimpl std::ops::Neg for Dir {\n    type Output = Self;\n    fn neg(self) -> Self::Output {\n        use Dir::*;\n        match self {\n            Left => Right,\n            Right => Left,\n            Down => Up,\n            Up => Down,\n            Forward => Back,\n            Back => Forward,\n        }\n    }\n}\n\n/// Maps every (A, B, C) sharing a vertex to A', the side that shares edges with B and C but not A\nstatic NEIGHBORS: LazyLock<[[[Option<Side>; SIDE_COUNT]; SIDE_COUNT]; SIDE_COUNT]> =\n    LazyLock::new(|| {\n        let mut result = [[[None; SIDE_COUNT]; SIDE_COUNT]; SIDE_COUNT];\n        for a in Side::iter() {\n            for b in Side::iter() {\n                for c in Side::iter() {\n                    for s in Side::iter() {\n                        if s == a || s == b || s == c {\n                            continue;\n                        }\n                        let (opposite, shared) =\n                            match (s.adjacent_to(a), s.adjacent_to(b), s.adjacent_to(c)) {\n                                (false, true, true) => (a, (b, c)),\n                                (true, false, true) => (b, (a, c)),\n                                (true, true, false) => (c, (a, b)),\n                                _ => continue,\n                            };\n                        result[opposite as usize][shared.0 as usize][shared.1 as usize] = Some(s);\n                    }\n                }\n            }\n        }\n        result\n    });\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::{proto::Position, traversal::ensure_nearby};\n\n    #[test]\n    fn neighbor_sanity() {\n        for v in Vertex::iter() {\n            let [a, b, c] = v.canonical_sides();\n            assert_eq!(\n                NEIGHBORS[a as usize][b as usize][c as usize],\n                NEIGHBORS[a as usize][c as usize][b as usize]\n            );\n        }\n    }\n\n    #[test]\n    fn cursor_identities() {\n        let mut graph = Graph::new(1);\n        ensure_nearby(&mut graph, &Position::origin(), 3.0);\n        let start = Cursor::from_vertex(NodeId::ROOT, Vertex::A);\n        let wiggle = |dir| {\n            let x = start.step(&graph, dir).unwrap();\n            assert!(x != start);\n            assert_eq!(x.step(&graph, -dir).unwrap(), start);\n        };\n        wiggle(Dir::Left);\n        wiggle(Dir::Right);\n        wiggle(Dir::Down);\n        wiggle(Dir::Up);\n        wiggle(Dir::Forward);\n        wiggle(Dir::Back);\n\n        let vcycle = |dir| {\n            // Five steps because an edge in the dual honeycomb has\n            // five cubes around itself, not four as in Euclidean space.\n            let looped = start\n                .step(&graph, dir)\n                .expect(\"positive\")\n                .step(&graph, Dir::Down)\n                .expect(\"down\")\n                .step(&graph, -dir)\n                .expect(\"negative\")\n                .step(&graph, Dir::Up)\n                .expect(\"up\")\n                .step(&graph, dir)\n                .expect(\"positive\");\n            assert_eq!(\n                looped.canonicalize(&graph).unwrap(),\n                ChunkId::new(NodeId::ROOT, Vertex::A),\n            );\n        };\n        vcycle(Dir::Left);\n        vcycle(Dir::Right);\n        vcycle(Dir::Forward);\n        vcycle(Dir::Back);\n    }\n}\n"
  },
  {
    "path": "common/src/dodeca.rs",
    "content": "//! Tools for processing the geometry of a right dodecahedron\n\nuse serde::{Deserialize, Serialize};\n\nuse crate::math::{MDirection, MIsometry, MPoint};\nuse crate::voxel_math::ChunkAxisPermutation;\n\n/// Sides of a right dodecahedron\n///\n/// These sides are arranged based on the following adjacency graph, although it\n/// is recommended not to hardcode side names in other code:\n/// ```nocode\n///          A\n/// (D-) I-E-B-C-D (-I)\n///  (K-) L-G-F-H-K (-L)\n///           J\n/// ```\n/// The above adjacency graph can be read as a map of a globe, where side A is\n/// at the north pole, and side J is at the south pole.\n#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]\npub enum Side {\n    A,\n    B,\n    C,\n    D,\n    E,\n    F,\n    G,\n    H,\n    I,\n    J,\n    K,\n    L,\n}\n\n// TODO: Remove in favor of using Side::VALUES.len() directly when https://github.com/rust-lang/rust-analyzer/issues/21478 is resolved\npub const SIDE_COUNT: usize = Side::VALUES.len();\n\nimpl Side {\n    pub const VALUES: [Self; 12] = [\n        Self::A,\n        Self::B,\n        Self::C,\n        Self::D,\n        Self::E,\n        Self::F,\n        Self::G,\n        Self::H,\n        Self::I,\n        Self::J,\n        Self::K,\n        Self::L,\n    ];\n\n    pub fn iter() -> impl ExactSizeIterator<Item = Self> {\n        Self::VALUES.iter().copied()\n    }\n\n    /// Whether `self` and `other` share an edge\n    ///\n    /// `false` when `self == other`.\n    #[inline]\n    pub fn adjacent_to(self, other: Side) -> bool {\n        data::ADJACENT[self as usize][other as usize]\n    }\n\n    /// Outward normal vector of this side\n    #[inline]\n    pub fn normal(self) -> &'static MDirection<f32> {\n        &data::SIDE_NORMALS_F32[self as usize]\n    }\n\n    /// Outward normal vector of this side\n    #[inline]\n    pub fn normal_f64(self) -> &'static MDirection<f64> {\n        &data::SIDE_NORMALS_F64[self as usize]\n    }\n\n    /// Reflection across this side. Using this matrix is the standard way to\n    /// switch between the coordinate systems between adjacent nodes.\n    #[inline]\n    pub fn reflection(self) -> &'static MIsometry<f32> {\n        &data::REFLECTIONS_F32[self as usize]\n    }\n\n    /// Reflection across this side. Using this matrix is the standard way to\n    /// switch between the coordinate systems between adjacent nodes.\n    #[inline]\n    pub fn reflection_f64(self) -> &'static MIsometry<f64> {\n        &data::REFLECTIONS_F64[self as usize]\n    }\n\n    /// Whether `p` is opposite the dodecahedron across the plane containing `self`\n    #[inline]\n    pub fn is_facing(self, p: &MPoint<f32>) -> bool {\n        let r = self.reflection().row(3).clone_owned();\n        (r * na::Vector4::from(*p)).x < p.w\n    }\n}\n\n/// Vertices of a right dodecahedron\n///\n/// In Hypermine, each dodecahedral node consists of 20 chunks, one for each\n/// vertex, shaped like an irregular cube. Each chunk can also be thought of as\n/// an eighth of a cube in the dual cubic tiling.\n///\n/// Each chunk can be given its own coordinate system, where its right-angled\n/// corner (or, in other words, the actual vertex of the dodecahedron) is at the\n/// origin, and the x, y, and z axes each run alongside an edge of the chunk\n/// orthogoal to the respective \"canonical\" side (See `canonical_sides`).\n///\n/// Since a vertex can be identified with a chunk, `Vertex` contains methods\n/// such as `node_to_dual` and `dual_to_node` that return matrices to allow one\n/// to freely swap between the coordinate system for the node and the coordinate\n/// system for the chunk.\n#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, Serialize, Deserialize)]\npub enum Vertex {\n    A,\n    B,\n    C,\n    D,\n    E,\n    F,\n    G,\n    H,\n    I,\n    J,\n    K,\n    L,\n    M,\n    N,\n    O,\n    P,\n    Q,\n    R,\n    S,\n    T,\n}\n\n// TODO: Remove in favor of using Vertex::VALUES.len() directly when https://github.com/rust-lang/rust-analyzer/issues/21478 is resolved\npub const VERTEX_COUNT: usize = Vertex::VALUES.len();\n\nimpl Vertex {\n    pub const VALUES: [Self; 20] = [\n        Self::A,\n        Self::B,\n        Self::C,\n        Self::D,\n        Self::E,\n        Self::F,\n        Self::G,\n        Self::H,\n        Self::I,\n        Self::J,\n        Self::K,\n        Self::L,\n        Self::M,\n        Self::N,\n        Self::O,\n        Self::P,\n        Self::Q,\n        Self::R,\n        Self::S,\n        Self::T,\n    ];\n\n    pub fn iter() -> impl ExactSizeIterator<Item = Self> {\n        Self::VALUES.iter().copied()\n    }\n\n    /// Vertex shared by three sides, if any\n    #[inline]\n    pub fn from_sides(sides: [Side; 3]) -> Option<Self> {\n        data::SIDES_TO_VERTEX[sides[0] as usize][sides[1] as usize][sides[2] as usize]\n    }\n\n    /// Sides incident to this vertex, in canonical order.\n    ///\n    /// This canonical order determines the X, Y, and Z axes of the chunk\n    /// corresponding to the vertex.\n    #[inline]\n    pub fn canonical_sides(self) -> [Side; 3] {\n        data::VERTEX_CANONICAL_SIDES[self as usize]\n    }\n\n    /// Vertices adjacent to this vertex in canonical order.\n    ///\n    /// The canonical order of adjacent vertices is based on the canonical order\n    /// of sides incident to the vertex, as each of the three adjacent vertices\n    /// corresponds to one of the three sides. As for which side, when two\n    /// vertices are adjacent, they share two out of three sides of the\n    /// dodecahedron. The side they do _not_ share is the side they correspond\n    /// to.\n    ///\n    /// Put another way, anything leaving a chunk in the negative-X direction\n    /// will end up crossing `canonical_sides()[0]`, while anything leaving a\n    /// chunk in the positive-X direction will end up arriving at\n    /// `adjacent_vertices()[0]`.\n    #[inline]\n    pub fn adjacent_vertices(self) -> [Vertex; 3] {\n        data::ADJACENT_VERTICES[self as usize]\n    }\n\n    /// Chunk axes permutations for vertices adjacent to this vertex in\n    /// canonical order.\n    ///\n    /// The chunks of two adjacent vertices meet at a plane. When swiching\n    /// reference frames from one vertex to another, it is necessary to reflect\n    /// about this plane and then apply the permutation returned by this\n    /// function.\n    #[inline]\n    pub fn chunk_axis_permutations(self) -> &'static [ChunkAxisPermutation; 3] {\n        &data::CHUNK_AXIS_PERMUTATIONS[self as usize]\n    }\n\n    /// For each vertex of the cube dual to this dodecahedral vertex, provides an iterator of at\n    /// most 3 steps to reach the corresponding graph node, and binary coordinates of the vertex in\n    /// question with respect to the origin vertex of the cube.\n    pub fn dual_vertices(\n        self,\n    ) -> impl ExactSizeIterator<Item = ([bool; 3], impl ExactSizeIterator<Item = Side>)> {\n        let [a, b, c] = self.canonical_sides();\n        let verts = [\n            ([Side::A; 3], 0, [false, false, false]),\n            ([c, Side::A, Side::A], 1, [false, false, true]),\n            ([b, Side::A, Side::A], 1, [false, true, false]),\n            ([b, c, Side::A], 2, [false, true, true]),\n            ([a, Side::A, Side::A], 1, [true, false, false]),\n            ([a, c, Side::A], 2, [true, false, true]),\n            ([a, b, Side::A], 2, [true, true, false]),\n            ([a, b, c], 3, [true, true, true]),\n        ];\n        (0..8).map(move |i| {\n            let (sides, len, coords) = verts[i];\n            (coords, (0..len).map(move |i| sides[i]))\n        })\n    }\n\n    /// Transform from euclidean chunk coordinates to hyperbolic node space\n    pub fn chunk_to_node(self) -> na::Matrix4<f32> {\n        na::Matrix4::from(*self.dual_to_node())\n            * na::Matrix4::new_scaling(1.0 / Self::dual_to_chunk_factor())\n    }\n\n    /// Transform from euclidean chunk coordinates to hyperbolic node space\n    pub fn chunk_to_node_f64(self) -> na::Matrix4<f64> {\n        na::Matrix4::from(*self.dual_to_node_f64())\n            * na::Matrix4::new_scaling(1.0 / Self::dual_to_chunk_factor_f64())\n    }\n\n    /// Transform from hyperbolic node space to euclidean chunk coordinates\n    pub fn node_to_chunk(self) -> na::Matrix4<f32> {\n        na::Matrix4::new_scaling(Self::dual_to_chunk_factor())\n            * na::Matrix4::from(*self.node_to_dual())\n    }\n\n    /// Transform from hyperbolic node space to euclidean chunk coordinates\n    pub fn node_to_chunk_f64(self) -> na::Matrix4<f64> {\n        na::Matrix4::new_scaling(Self::dual_to_chunk_factor_f64())\n            * na::Matrix4::from(*self.node_to_dual_f64())\n    }\n\n    /// Transform from cube-centric coordinates to dodeca-centric coordinates\n    pub fn dual_to_node(self) -> &'static MIsometry<f32> {\n        &data::DUAL_TO_NODE_F32[self as usize]\n    }\n\n    /// Transform from cube-centric coordinates to dodeca-centric coordinates\n    pub fn dual_to_node_f64(self) -> &'static MIsometry<f64> {\n        &data::DUAL_TO_NODE_F64[self as usize]\n    }\n\n    /// Transform from dodeca-centric coordinates to cube-centric coordinates\n    pub fn node_to_dual(self) -> &'static MIsometry<f32> {\n        &data::NODE_TO_DUAL_F32[self as usize]\n    }\n\n    /// Transform from dodeca-centric coordinates to cube-centric coordinates\n    pub fn node_to_dual_f64(self) -> &'static MIsometry<f64> {\n        &data::NODE_TO_DUAL_F64[self as usize]\n    }\n\n    /// Scale factor used in conversion from cube-centric coordinates to euclidean chunk coordinates.\n    /// Scaling the x, y, and z components of a vector in cube-centric coordinates by this value\n    /// and dividing them by the w coordinate will yield euclidean chunk coordinates.\n    pub fn dual_to_chunk_factor() -> f32 {\n        *data::DUAL_TO_CHUNK_FACTOR_F32\n    }\n\n    /// Scale factor used in conversion from cube-centric coordinates to euclidean chunk coordinates.\n    /// Scaling the x, y, and z components of a vector in cube-centric coordinates by this value\n    /// and dividing them by the w coordinate will yield euclidean chunk coordinates.\n    pub fn dual_to_chunk_factor_f64() -> f64 {\n        *data::DUAL_TO_CHUNK_FACTOR_F64\n    }\n\n    /// Scale factor used in conversion from euclidean chunk coordinates to cube-centric coordinates.\n    /// Scaling the x, y, and z components of a vector in homogeneous euclidean chunk coordinates by this value\n    /// and lorentz-normalizing the result will yield cube-centric coordinates.\n    pub fn chunk_to_dual_factor() -> f32 {\n        *data::CHUNK_TO_DUAL_FACTOR_F32\n    }\n\n    /// Scale factor used in conversion from euclidean chunk coordinates to cube-centric coordinates.\n    /// Scaling the x, y, and z components of a vector in homogeneous euclidean chunk coordinates by this value\n    /// and lorentz-normalizing the result will yield cube-centric coordinates.\n    pub fn chunk_to_dual_factor_f64() -> f64 {\n        *data::CHUNK_TO_DUAL_FACTOR_F64\n    }\n\n    /// In dodeca-centric coordinates, the center of the smallest sphere that contains the entire\n    /// chunk defined by this vertex.\n    pub fn chunk_bounding_sphere_center(self) -> &'static MPoint<f32> {\n        &data::CHUNK_BOUNDING_SPHERE_CENTERS_F32[self as usize]\n    }\n\n    /// In dodeca-centric coordinates, the center of the smallest sphere that contains the entire\n    /// chunk defined by this vertex.\n    pub fn chunk_bounding_sphere_center_f64(self) -> &'static MPoint<f64> {\n        &data::CHUNK_BOUNDING_SPHERE_CENTERS_F64[self as usize]\n    }\n\n    /// Convenience method for `self.chunk_to_node().determinant() < 0`.\n    pub fn parity(self) -> bool {\n        data::CHUNK_TO_NODE_PARITY[self as usize]\n    }\n}\n\npub const BOUNDING_SPHERE_RADIUS_F64: f64 = 1.2264568712514068;\npub const BOUNDING_SPHERE_RADIUS: f32 = BOUNDING_SPHERE_RADIUS_F64 as f32;\n\npub const CHUNK_BOUNDING_SPHERE_RADIUS_F64: f64 = BOUNDING_SPHERE_RADIUS_F64 * 0.5;\npub const CHUNK_BOUNDING_SPHERE_RADIUS: f32 = CHUNK_BOUNDING_SPHERE_RADIUS_F64 as f32;\n\nmod data {\n    use std::array;\n    use std::sync::LazyLock;\n\n    use crate::dodeca::{SIDE_COUNT, Side, VERTEX_COUNT, Vertex};\n    use crate::math::{MDirection, MIsometry, MPoint, MVector, PermuteXYZ};\n    use crate::voxel_math::ChunkAxisPermutation;\n\n    /// Whether two sides share an edge\n    pub static ADJACENT: LazyLock<[[bool; SIDE_COUNT]; SIDE_COUNT]> = LazyLock::new(|| {\n        Side::VALUES.map(|side0| {\n            Side::VALUES.map(|side1| {\n                // Two sides can have the following values when taking the mip\n                // of their normals:\n                // - When identical: 1\n                // - When adjacent: 0\n                // - When two steps away: -1.618 = -phi\n                // - When antipodal: -2.618 = -phi - 1\n                // Therefore, the range (-0.5..0.5) only contains adjacent sides\n                // and is robust to numerical precision limits.\n                (-0.5..0.5).contains(&side0.normal_f64().mip(side1.normal_f64()))\n            })\n        })\n    });\n\n    /// Vector corresponding to the outer normal of each side\n    pub static SIDE_NORMALS_F64: LazyLock<[MDirection<f64>; SIDE_COUNT]> = LazyLock::new(|| {\n        // In Euclidean geometry, the coordinates of a dodecahedron's sides'\n        // normals are the same as the coordinates of the vertices of an\n        // icosahedron centered at the origin. There is a formula for these\n        // vertices' coordinates based on the golden ratio, which we take\n        // advantage of here.\n\n        // To set the w-coordinate of these normals, we add an additional\n        // constraint: The `mip` of two adjacent normals must be 0 (since this\n        // is a right-angled dodechadron). Solving for `w` gives us our\n        // `template_normal`. We also make sure to normalize it.\n\n        // All other normals are based on this template normal, with permuations\n        // and sign changes.\n        let phi = libm::sqrt(1.25) + 0.5; // golden ratio\n        let template_normal = MVector::new(1.0, phi, 0.0, libm::sqrt(phi)).normalized_direction();\n        let signed_template_normals = {\n            let n = template_normal;\n            [\n                MDirection::new_unchecked(n.x, n.y, n.z, n.w),\n                MDirection::new_unchecked(-n.x, n.y, -n.z, n.w),\n                MDirection::new_unchecked(n.x, -n.y, -n.z, n.w),\n                MDirection::new_unchecked(-n.x, -n.y, n.z, n.w),\n            ]\n        };\n\n        Side::VALUES.map(|side| {\n            let signed_template_normal = signed_template_normals[side as usize / 3];\n            signed_template_normal.tuv_to_xyz((3 - side as usize % 3) % 3)\n        })\n    });\n\n    /// Transform that moves from a neighbor to a reference node, for each side\n    pub static REFLECTIONS_F64: LazyLock<[MIsometry<f64>; SIDE_COUNT]> =\n        LazyLock::new(|| SIDE_NORMALS_F64.map(|r| MIsometry::reflection(&r)));\n\n    /// Sides incident to a vertex, in canonical order\n    pub static VERTEX_CANONICAL_SIDES: LazyLock<[[Side; 3]; VERTEX_COUNT]> = LazyLock::new(|| {\n        let mut result: Vec<[Side; 3]> = Vec::new();\n\n        // Rather than trying to work this out mathematically or by hand, we\n        // take the brute force approach of checking every unique triplet of\n        // vertices, adding a new vertex to the list whenever a new triplet of\n        // mutually-adjacent sides is discovered.\n        for a in Side::VALUES.iter().copied() {\n            for b in Side::VALUES[a as usize + 1..].iter().copied() {\n                for c in Side::VALUES[b as usize + 1..].iter().copied() {\n                    if !a.adjacent_to(b) || !b.adjacent_to(c) || !c.adjacent_to(a) {\n                        continue;\n                    }\n                    result.push([a, b, c]);\n                }\n            }\n        }\n\n        result.try_into().expect(\"exactly 20 vertices expected\")\n    });\n\n    /// Which vertices are adjacent to other vertices and opposite the canonical sides\n    pub static ADJACENT_VERTICES: LazyLock<[[Vertex; 3]; VERTEX_COUNT]> = LazyLock::new(|| {\n        Vertex::VALUES.map(|vertex| {\n            let canonical_sides = vertex.canonical_sides();\n            array::from_fn(|canonical_sides_index| {\n                // Try every possible side to find an adjacent vertex.\n                for test_side in Side::iter() {\n                    if test_side == canonical_sides[canonical_sides_index] {\n                        continue;\n                    }\n                    let mut test_sides = canonical_sides;\n                    test_sides[canonical_sides_index] = test_side;\n                    if let Some(adjacent_vertex) = Vertex::from_sides(test_sides) {\n                        return adjacent_vertex;\n                    }\n                }\n                panic!(\"No suitable vertex found\");\n            })\n        })\n    });\n\n    /// Which transformations have to be done after a reflection to switch reference frames from one vertex\n    /// to one of its adjacent vertices (ordered similarly to ADJACENT_VERTICES)\n    pub static CHUNK_AXIS_PERMUTATIONS: LazyLock<[[ChunkAxisPermutation; 3]; VERTEX_COUNT]> =\n        LazyLock::new(|| {\n            Vertex::VALUES.map(|vertex| {\n                let canonical_sides = vertex.canonical_sides();\n                array::from_fn(|canonical_sides_index| {\n                    // Try every possible side to find an adjacent vertex.\n                    for test_side in Side::iter() {\n                        if test_side == canonical_sides[canonical_sides_index] {\n                            continue;\n                        }\n                        let mut test_sides = canonical_sides;\n                        test_sides[canonical_sides_index] = test_side;\n                        let Some(adjacent_vertex) = Vertex::from_sides(test_sides) else {\n                            continue;\n                        };\n                        // Compare the natural permutation of sides after a reflection from `vertex` to `adjacent_vertex`\n                        // to the canonical permutation of the sides for `adjacent_vertex`.\n                        return ChunkAxisPermutation::from_permutation(\n                            test_sides,\n                            adjacent_vertex.canonical_sides(),\n                        );\n                    }\n                    panic!(\"No suitable vertex found\");\n                })\n            })\n        });\n\n    /// Transform that converts from cube-centric coordinates to dodeca-centric coordinates\n    pub static DUAL_TO_NODE_F64: LazyLock<[MIsometry<f64>; VERTEX_COUNT]> = LazyLock::new(|| {\n        let mip_origin_normal = MVector::origin().mip(Side::A.normal_f64()); // This value is the same for every side\n        Vertex::VALUES.map(|vertex| {\n            let [a, b, c] = vertex.canonical_sides();\n\n            // The matrix we want to produce is a change-of-basis matrix,\n            // consistint of four columns representing vectors with\n            // dodeca-centric coordinates, where each vector represents one of\n            // the basis vectors in cube-centric coordinates.\n\n            // Since adjacent normals are already orthogonal, we can use them\n            // as-is for the first three columns of this matrix. We just need to\n            // negate them so that they point towards the origin instead of away\n            // because the dodeca's origin has positive cube-centric\n            // coordinates.\n\n            // As for the last column of the change-of-basis matrix, that would\n            // be the cube-centric origin in dodeca-centric coordinates, or in\n            // other words, the vertex's location in dodeca-centric coordinates.\n            // To find this, we start at the origin and project the vector to be\n            // orthogonal to each of the three normals, one at a time. Because\n            // these three normals are orthogonal to each other, the resulting\n            // formula is simple.\n\n            // Note that part of the projection formula requires taking the\n            // `mip` of a normal vector and the origin, but this is a constant\n            // value that doesn't depend on the normal vector, so the formula\n            // used here takes advantage of that.\n            let vertex_position = (MVector::origin()\n                - (a.normal_f64().as_ref() + b.normal_f64().as_ref() + c.normal_f64().as_ref())\n                    * mip_origin_normal)\n                .normalized_point();\n            MIsometry::from_columns_unchecked(\n                &[-a.normal_f64(), -b.normal_f64(), -c.normal_f64()],\n                vertex_position,\n            )\n        })\n    });\n\n    /// Transform that converts from dodeca-centric coordinates to cube-centric coordinates\n    pub static NODE_TO_DUAL_F64: LazyLock<[MIsometry<f64>; VERTEX_COUNT]> =\n        LazyLock::new(|| DUAL_TO_NODE_F64.map(|m| m.inverse()));\n\n    pub static DUAL_TO_CHUNK_FACTOR_F64: LazyLock<f64> =\n        LazyLock::new(|| (2.0 + 5.0f64.sqrt()).sqrt());\n\n    pub static CHUNK_TO_DUAL_FACTOR_F64: LazyLock<f64> =\n        LazyLock::new(|| 1.0 / *DUAL_TO_CHUNK_FACTOR_F64);\n\n    pub static CHUNK_BOUNDING_SPHERE_CENTERS_F64: LazyLock<[MPoint<f64>; VERTEX_COUNT]> =\n        LazyLock::new(|| {\n            Vertex::VALUES.map(|vertex| {\n                // Chunks are most stretched between the origin and the dodeca's vertex, so finding\n                // the midpoint of these two extremes allows one to find the bounding sphere.\n                // Note that this also means that the bounding sphere radius is half the dodeca's\n                // bounding sphere radius.\n                (vertex.dual_to_node_f64() * MPoint::origin()).midpoint(&MPoint::origin())\n            })\n        });\n\n    /// Vertex shared by 3 sides\n    pub static SIDES_TO_VERTEX: LazyLock<[[[Option<Vertex>; SIDE_COUNT]; SIDE_COUNT]; SIDE_COUNT]> =\n        LazyLock::new(|| {\n            let mut result = [[[None; SIDE_COUNT]; SIDE_COUNT]; SIDE_COUNT];\n            for vertex in Vertex::iter() {\n                let [a, b, c] = vertex.canonical_sides().map(|side| side as usize);\n                result[a][b][c] = Some(vertex);\n                result[a][c][b] = Some(vertex);\n                result[b][a][c] = Some(vertex);\n                result[b][c][a] = Some(vertex);\n                result[c][a][b] = Some(vertex);\n                result[c][b][a] = Some(vertex);\n            }\n            result\n        });\n\n    /// Whether the determinant of the dual-to-node transform is negative\n    pub static CHUNK_TO_NODE_PARITY: LazyLock<[bool; VERTEX_COUNT]> =\n        LazyLock::new(|| Vertex::VALUES.map(|vertex| vertex.dual_to_node().parity()));\n\n    pub static SIDE_NORMALS_F32: LazyLock<[MDirection<f32>; SIDE_COUNT]> =\n        LazyLock::new(|| SIDE_NORMALS_F64.map(|n| n.cast()));\n\n    pub static REFLECTIONS_F32: LazyLock<[MIsometry<f32>; SIDE_COUNT]> =\n        LazyLock::new(|| REFLECTIONS_F64.map(|n| n.cast()));\n\n    pub static DUAL_TO_NODE_F32: LazyLock<[MIsometry<f32>; VERTEX_COUNT]> =\n        LazyLock::new(|| DUAL_TO_NODE_F64.map(|n| n.cast()));\n\n    pub static NODE_TO_DUAL_F32: LazyLock<[MIsometry<f32>; VERTEX_COUNT]> =\n        LazyLock::new(|| NODE_TO_DUAL_F64.map(|n| n.cast()));\n\n    pub static DUAL_TO_CHUNK_FACTOR_F32: LazyLock<f32> =\n        LazyLock::new(|| *DUAL_TO_CHUNK_FACTOR_F64 as f32);\n\n    pub static CHUNK_TO_DUAL_FACTOR_F32: LazyLock<f32> =\n        LazyLock::new(|| *CHUNK_TO_DUAL_FACTOR_F64 as f32);\n\n    pub static CHUNK_BOUNDING_SPHERE_CENTERS_F32: LazyLock<[MPoint<f32>; VERTEX_COUNT]> =\n        LazyLock::new(|| CHUNK_BOUNDING_SPHERE_CENTERS_F64.map(|p| p.cast()));\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use approx::*;\n\n    #[test]\n    fn vertex_sides_consistent() {\n        use std::collections::HashSet;\n        let triples = Vertex::iter()\n            .map(|v| v.canonical_sides())\n            .collect::<HashSet<_>>();\n        assert_eq!(triples.len(), VERTEX_COUNT);\n        for triple in Vertex::iter().map(|v| v.canonical_sides()) {\n            let mut sorted = triple;\n            sorted.sort_unstable();\n            assert_eq!(triple, sorted);\n            assert!(triple[0].adjacent_to(triple[1]));\n            assert!(triple[1].adjacent_to(triple[2]));\n            assert!(triple[2].adjacent_to(triple[0]));\n        }\n    }\n\n    #[test]\n    fn sides_to_vertex() {\n        for v in Vertex::iter() {\n            let [a, b, c] = v.canonical_sides();\n            assert_eq!(v, Vertex::from_sides([a, b, c]).unwrap());\n            assert_eq!(v, Vertex::from_sides([a, c, b]).unwrap());\n            assert_eq!(v, Vertex::from_sides([b, a, c]).unwrap());\n            assert_eq!(v, Vertex::from_sides([b, c, a]).unwrap());\n            assert_eq!(v, Vertex::from_sides([c, a, b]).unwrap());\n            assert_eq!(v, Vertex::from_sides([c, b, a]).unwrap());\n        }\n    }\n\n    #[test]\n    fn adjacent_chunk_axis_permutations() {\n        // Assumptions for this test to be valid. If any assertions in this section fail, the test itself\n        // needs to be modified\n        assert_eq!(Vertex::A.canonical_sides(), [Side::A, Side::B, Side::C]);\n        assert_eq!(Vertex::B.canonical_sides(), [Side::A, Side::B, Side::E]);\n\n        assert_eq!(Vertex::F.canonical_sides(), [Side::B, Side::C, Side::F]);\n        assert_eq!(Vertex::J.canonical_sides(), [Side::C, Side::F, Side::H]);\n\n        // Test cases\n\n        // Variables with name vertex_?_canonical_sides_reflected refer to the canonical sides\n        // of a particular vertex after a reflection that moves it to another vertex.\n        // For instance, vertex_a_canonical_sides_reflected is similar to Vertex::A.canonical_sides(),\n        // but one of the sides is changed to match Vertex B, but the order of the other two sides is left alone.\n        let vertex_a_canonical_sides_reflected = [Side::A, Side::B, Side::E];\n        let vertex_b_canonical_sides_reflected = [Side::A, Side::B, Side::C];\n        assert_eq!(\n            Vertex::A.chunk_axis_permutations()[2],\n            ChunkAxisPermutation::from_permutation(\n                vertex_a_canonical_sides_reflected,\n                Vertex::B.canonical_sides()\n            )\n        );\n        assert_eq!(\n            Vertex::B.chunk_axis_permutations()[2],\n            ChunkAxisPermutation::from_permutation(\n                vertex_b_canonical_sides_reflected,\n                Vertex::A.canonical_sides()\n            )\n        );\n\n        let vertex_f_canonical_sides_reflected = [Side::H, Side::C, Side::F];\n        let vertex_j_canonical_sides_reflected = [Side::C, Side::F, Side::B];\n        assert_eq!(\n            Vertex::F.chunk_axis_permutations()[0],\n            ChunkAxisPermutation::from_permutation(\n                vertex_f_canonical_sides_reflected,\n                Vertex::J.canonical_sides()\n            )\n        );\n        assert_eq!(\n            Vertex::J.chunk_axis_permutations()[2],\n            ChunkAxisPermutation::from_permutation(\n                vertex_j_canonical_sides_reflected,\n                Vertex::F.canonical_sides()\n            )\n        );\n    }\n\n    #[test]\n    fn side_is_facing() {\n        for side in Side::iter() {\n            assert!(!side.is_facing(&MPoint::origin()));\n            assert!(side.is_facing(&(*side.reflection() * MPoint::origin())));\n        }\n    }\n\n    #[test]\n    fn radius() {\n        let corner = *Vertex::A.dual_to_node_f64() * MPoint::origin();\n        assert_abs_diff_eq!(\n            BOUNDING_SPHERE_RADIUS_F64,\n            corner.distance(&MPoint::origin()),\n            epsilon = 1e-10\n        );\n        let phi = (1.0 + 5.0f64.sqrt()) / 2.0; // Golden ratio\n        assert_abs_diff_eq!(\n            BOUNDING_SPHERE_RADIUS_F64,\n            (1.5 * phi).sqrt().asinh(),\n            epsilon = 1e-10\n        );\n    }\n\n    #[test]\n    fn chunk_bounding_sphere() {\n        let corner = *Vertex::A.dual_to_node_f64() * MPoint::origin();\n        let bounding_sphere_center = Vertex::A.chunk_bounding_sphere_center_f64();\n        assert_abs_diff_eq!(\n            CHUNK_BOUNDING_SPHERE_RADIUS_F64,\n            corner.distance(bounding_sphere_center),\n            epsilon = 1e-10\n        );\n        assert_abs_diff_eq!(\n            CHUNK_BOUNDING_SPHERE_RADIUS_F64,\n            MPoint::origin().distance(bounding_sphere_center),\n            epsilon = 1e-10\n        );\n    }\n\n    #[test]\n    fn chunk_to_node() {\n        // Chunk coordinates of (1, 1, 1) should be at the center of a dodecahedron.\n        let mut chunk_corner_in_node_coordinates =\n            Vertex::A.chunk_to_node_f64() * na::Vector4::new(1.0, 1.0, 1.0, 1.0);\n        chunk_corner_in_node_coordinates /= chunk_corner_in_node_coordinates.w;\n        assert_abs_diff_eq!(\n            chunk_corner_in_node_coordinates,\n            na::Vector4::new(0.0, 0.0, 0.0, 1.0),\n            epsilon = 1e-10\n        );\n    }\n\n    #[test]\n    fn node_to_chunk() {\n        assert_abs_diff_eq!(\n            Vertex::A.chunk_to_node_f64().try_inverse().unwrap(),\n            Vertex::A.node_to_chunk_f64(),\n            epsilon = 1e-10\n        );\n    }\n}\n"
  },
  {
    "path": "common/src/graph.rs",
    "content": "#![allow(clippy::len_without_is_empty)]\n\nuse std::collections::VecDeque;\n\nuse blake3::Hasher;\nuse fxhash::{FxHashMap, FxHashSet};\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    dodeca::{SIDE_COUNT, Side},\n    math::{MIsometry, MPoint},\n    node::{ChunkId, ChunkLayout, Node},\n};\n\n/// Graph of the right dodecahedral tiling of H^3\npub struct Graph {\n    nodes: FxHashMap<NodeId, NodeContainer>,\n    layout: ChunkLayout,\n}\n\nimpl Graph {\n    pub fn new(dimension: u8) -> Self {\n        let mut nodes = FxHashMap::default();\n        nodes.insert(NodeId::ROOT, NodeContainer::new(None, 0));\n        Self {\n            nodes,\n            layout: ChunkLayout::new(dimension),\n        }\n    }\n\n    #[inline]\n    pub fn layout(&self) -> &ChunkLayout {\n        &self.layout\n    }\n\n    /// The number of nodes in the graph.\n    #[inline]\n    pub fn len(&self) -> u32 {\n        self.nodes.len() as u32\n    }\n\n    #[inline]\n    pub fn contains(&self, node: NodeId) -> bool {\n        self.nodes.contains_key(&node)\n    }\n\n    /// Node and vertex that the cube around a certain vertex is canonically assigned to.\n    ///\n    /// Each cube is said to be canonically assigned to node it touches that is closest to the root.\n    pub fn canonicalize(&self, mut chunk: ChunkId) -> Option<ChunkId> {\n        for side in chunk.vertex.canonical_sides().into_iter() {\n            // missing neighbors are always longer\n            if let Some(neighbor) = self.neighbor(chunk.node, side)\n                && self.depth(neighbor) < self.depth(chunk.node)\n            {\n                chunk.node = neighbor;\n            }\n        }\n        Some(chunk)\n    }\n\n    /// Returns the given node's parents along with the side each shares with the node.\n    pub fn parents(&self, node: NodeId) -> impl ExactSizeIterator<Item = (Side, NodeId)> + use<> {\n        let node_depth = self.depth(node);\n\n        let mut results = [None; 3];\n        let mut len = 0;\n\n        for side in Side::iter() {\n            // filtering out not-yet-allocated neighbors is fine since\n            // they have to be longer than us not to be allocated yet\n            if let Some(neighbor_node) = self.neighbor(node, side)\n                && self.depth(neighbor_node) < node_depth\n            {\n                results[len] = Some((side, neighbor_node));\n                len += 1;\n            }\n        }\n\n        (0..len).map(move |i| results[i].unwrap())\n    }\n\n    #[inline]\n    pub fn neighbor(&self, node: NodeId, which: Side) -> Option<NodeId> {\n        self.nodes[&node].neighbors[which as usize]\n    }\n\n    /// The number of steps required to get from the root to the given node.\n    #[inline]\n    pub fn depth(&self, node: NodeId) -> u32 {\n        self.nodes[&node].depth\n    }\n\n    /// Given a `transform` relative to a `reference` node, computes the node\n    /// that it's closest to and the transform that moves it there\n    pub fn normalize_transform(\n        &self,\n        mut reference: NodeId,\n        original: &MIsometry<f32>,\n    ) -> (NodeId, MIsometry<f32>) {\n        let mut transform = MIsometry::identity();\n        let mut location = original * MPoint::origin();\n        'outer: loop {\n            for side in Side::iter() {\n                if !side.is_facing(&location) {\n                    continue;\n                }\n                reference = match self.neighbor(reference, side) {\n                    None => continue,\n                    Some(x) => x,\n                };\n                let mat = side.reflection();\n                location = mat * location;\n                transform = mat * transform;\n                continue 'outer;\n            }\n            break;\n        }\n        (reference, transform)\n    }\n\n    #[inline]\n    pub fn primary_parent_side(&self, node: NodeId) -> Option<Side> {\n        self.nodes[&node].primary_parent_side\n    }\n\n    /// Iterate over every node except the root via a breadth-first search in\n    /// the form of ordered pairs `(Side, NodeId)` where `NodeId` is the ID of\n    /// its primary parent node, and `Side` is the side shared by the node and the\n    /// primary parent node. This can be used to construct a copy of the graph with the\n    /// same set of nodes.\n    pub fn tree(&self) -> TreeIter<'_> {\n        TreeIter::new(self)\n    }\n\n    /// Ensures that the neighbour node at a particular side of a particular node exists in the graph,\n    /// as well as the nodes from the root to the neighbour node.\n    pub fn ensure_neighbor(&mut self, node: NodeId, side: Side) -> NodeId {\n        // A node cannot be created before any of its parents, so if a new node is created, it is guaranteed\n        // to be a child node.\n        self.nodes[&node].neighbors[side as usize].unwrap_or_else(|| self.insert_child(node, side))\n    }\n\n    /// Whether `node`'s neighbor along `side` is closer than it to the root\n    fn is_parent_side(&self, node: NodeId, side: Side) -> bool {\n        let v = &self.nodes[&node];\n        v.neighbors[side as usize].is_some_and(|x| self.nodes[&x].depth < v.depth)\n    }\n\n    /// Inserts the child of the given node at the given side into the graph, ensuring that all\n    /// its parents are created first.\n    pub fn insert_child(&mut self, node: NodeId, side: Side) -> NodeId {\n        // To help improve readability, we use the term \"subject\" to refer to the not-yet-created child node, since the term\n        // \"child\" can be ambiguous.\n        let parents_of_subject = self.populate_parents_of_subject(node, side);\n\n        // Select the side along the canonical path from the root to this node. This is guaranteed\n        // to be the first entry of the `parents_of_subject` iterator.\n        let (primary_parent_side, primary_parent) = parents_of_subject.clone().next().unwrap();\n        let mut hasher = Hasher::new();\n        hasher.update(&primary_parent.0.to_le_bytes());\n        hasher.update(&[primary_parent_side as u8]);\n        let mut xof = hasher.finalize_xof();\n        let mut hash = [0; 16];\n        xof.fill(&mut hash);\n        let id = NodeId(u128::from_le_bytes(hash));\n\n        let depth = self.nodes[&node].depth + 1;\n        self.nodes\n            .insert(id, NodeContainer::new(Some(primary_parent_side), depth));\n        for (side, neighbor) in parents_of_subject {\n            self.link_neighbors(id, neighbor, side);\n        }\n        id\n    }\n\n    #[inline]\n    pub fn hash_of(&self, node: NodeId) -> u128 {\n        node.0\n    }\n\n    #[inline]\n    pub fn from_hash(&self, hash: u128) -> NodeId {\n        NodeId(hash)\n    }\n\n    /// Ensure all parents of a not-yet-created child node (which we call the \"subject\") exist, and return them\n    /// (including the given node) in the form of ordered pairs containing the side they share with this \"subject\",\n    /// and their node ID. These ordered pairs will be sorted by side, based on enum order.\n    fn populate_parents_of_subject(\n        &mut self,\n        node: NodeId,\n        side: Side,\n    ) -> impl Iterator<Item = (Side, NodeId)> + Clone + use<> {\n        let mut parents_of_subject = [None; 3]; // Maximum number of parents is 3\n        let mut count = 0;\n        for candidate_parent_side in Side::iter() {\n            if candidate_parent_side == side {\n                // The given node is included in the list of returned nodes.\n                parents_of_subject[count] = Some((side, node));\n                count += 1;\n            } else if candidate_parent_side.adjacent_to(side)\n                && self.is_parent_side(node, candidate_parent_side)\n            {\n                // This branch covers parents of the subject other than the given node.\n                // This is non-obvious, as it relies on the fact that a side is a parent side of the subject\n                // exactly when it is a parent side of the given node. This is not true in general, but it is true\n                // when the parent side in question is adjacent to the side shared by the given node and the subject.\n                // That is what allows the `self.is_parent_side(node, candidate_parent_side)` condition to behave as desired.\n\n                // We would like to return (and recursively create if needed) the parent of the subject. This means that\n                // if we label the shared side A and the parent side B, the path we would like to follow from the given node is AB,\n                // since A will take us to the subject, and then B will take us to its parent. However, taking\n                // the path AB is impossible because it would require the subject to already be in the graph. Fortuantely,\n                // we can take the path BA instead because that will reach the same node, thanks to the fact that each edge\n                // is shared by 4 dodecas.\n                let parent_of_node = self.neighbor(node, candidate_parent_side).unwrap();\n                let parent_of_subject = self.ensure_neighbor(parent_of_node, side);\n                parents_of_subject[count] = Some((candidate_parent_side, parent_of_subject));\n                count += 1;\n            } else {\n                // The `candidate_parent_side` is not a parent side of the subject, so no action is necessary.\n            }\n        }\n        parents_of_subject.into_iter().flatten()\n    }\n\n    /// Register `a` and `b` as adjacent along `side`\n    fn link_neighbors(&mut self, a: NodeId, b: NodeId, side: Side) {\n        debug_assert!(\n            self.nodes[&a].neighbors[side as usize].is_none()\n                && self.nodes[&b].neighbors[side as usize].is_none()\n        );\n        self.nodes.get_mut(&a).unwrap().neighbors[side as usize] = Some(b);\n        self.nodes.get_mut(&b).unwrap().neighbors[side as usize] = Some(a);\n    }\n}\n\nimpl std::ops::Index<NodeId> for Graph {\n    type Output = Node;\n\n    #[inline]\n    fn index(&self, node_id: NodeId) -> &Node {\n        &self.nodes[&node_id].value\n    }\n}\n\nimpl std::ops::IndexMut<NodeId> for Graph {\n    #[inline]\n    fn index_mut(&mut self, node_id: NodeId) -> &mut Node {\n        &mut self.nodes.get_mut(&node_id).unwrap().value\n    }\n}\n\n/// Unique 128-bit identifier for a dodecahedral node in the graph. This ID\n/// depends entirely on the location of the node within the graph and is\n/// guaranteed not to depend on the order in which nodes are added to the graph.\n///\n/// A cryptographic hash function is used to ensure uniqueness, making it\n/// astronomically unlikely to be able to find two different nodes in the graph\n/// with the same ID.\n#[derive(Debug, Copy, Clone, Eq, PartialEq, PartialOrd, Ord, Hash, Serialize, Deserialize)]\npub struct NodeId(u128);\n\nimpl NodeId {\n    pub const ROOT: Self = Self(0);\n}\n\nstruct NodeContainer {\n    value: Node,\n    primary_parent_side: Option<Side>,\n    /// Distance to root via parents\n    depth: u32,\n    neighbors: [Option<NodeId>; SIDE_COUNT],\n}\n\nimpl NodeContainer {\n    fn new(primary_parent_side: Option<Side>, depth: u32) -> Self {\n        Self {\n            value: Node::default(),\n            primary_parent_side,\n            depth,\n            neighbors: [None; SIDE_COUNT],\n        }\n    }\n}\n\n// Iterates through the graph with breadth-first search\npub struct TreeIter<'a> {\n    queue: VecDeque<NodeId>,\n    visited: FxHashSet<NodeId>,\n    nodes: &'a FxHashMap<NodeId, NodeContainer>,\n}\n\nimpl<'a> TreeIter<'a> {\n    fn new(graph: &'a Graph) -> Self {\n        let mut result = TreeIter {\n            queue: VecDeque::from([NodeId::ROOT]),\n            visited: FxHashSet::from_iter([NodeId::ROOT]),\n            nodes: &graph.nodes,\n        };\n\n        // Skip the root node\n        let _ = result.next_node();\n\n        result\n    }\n\n    // Returns the next Node in the traversal. The iterator returns its primary parent and primary parent side.\n    fn next_node(&mut self) -> Option<&NodeContainer> {\n        let node_id = self.queue.pop_front()?;\n        let node = &self.nodes[&node_id];\n        for side in Side::iter() {\n            if let Some(neighbor) = node.neighbors[side as usize]\n                && !self.visited.contains(&neighbor)\n            {\n                self.queue.push_back(neighbor);\n                self.visited.insert(neighbor);\n            }\n        }\n        Some(node)\n    }\n}\n\nimpl Iterator for TreeIter<'_> {\n    type Item = (Side, NodeId);\n\n    fn next(&mut self) -> Option<Self::Item> {\n        let node = self.next_node()?;\n        let side = node.primary_parent_side.unwrap();\n        Some((side, node.neighbors[side as usize].unwrap()))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::{proto::Position, traversal::ensure_nearby};\n\n    use super::*;\n    use approx::*;\n\n    #[test]\n    fn parent_child_relationships() {\n        let mut graph = Graph::new(1);\n        assert_eq!(graph.len(), 1);\n        let a = graph.ensure_neighbor(NodeId::ROOT, Side::A);\n        assert_eq!(graph.len(), 2);\n        let a2 = graph.ensure_neighbor(NodeId::ROOT, Side::A);\n        assert_eq!(graph.len(), 2);\n        assert_eq!(a, a2);\n        assert_eq!(graph.ensure_neighbor(a, Side::A), NodeId::ROOT);\n        assert_eq!(graph.nodes[&a].depth, 1);\n        let b = graph.ensure_neighbor(NodeId::ROOT, Side::B);\n        assert_eq!(graph.len(), 3);\n        assert_eq!(graph.ensure_neighbor(b, Side::B), NodeId::ROOT);\n        let c = graph.ensure_neighbor(a, Side::C);\n        assert!(graph.len() > 4);\n        assert_eq!(graph.ensure_neighbor(c, Side::C), a);\n        assert_eq!(graph.nodes[&c].depth, 2);\n    }\n\n    #[test]\n    fn children_have_common_neighbor() {\n        let mut graph = Graph::new(1);\n        let a = graph.ensure_neighbor(NodeId::ROOT, Side::A);\n        let b = graph.ensure_neighbor(NodeId::ROOT, Side::B);\n        let a_neighbors = Side::iter()\n            .map(|side| graph.ensure_neighbor(a, side))\n            .collect::<Vec<_>>();\n        let b_neighbors = Side::iter()\n            .map(|side| graph.ensure_neighbor(b, side))\n            .collect::<Vec<_>>();\n        let common = a_neighbors\n            .iter()\n            .cloned()\n            .filter(|x| b_neighbors.contains(x))\n            .collect::<Vec<_>>();\n\n        assert_eq!(\n            common.len(),\n            2,\n            \"both the root and some other node are common neighbors\"\n        );\n        assert!(common.contains(&NodeId::ROOT));\n        let other = common.into_iter().find(|&x| x != NodeId::ROOT).unwrap();\n        assert_eq!(graph.nodes[&other].depth, 2);\n    }\n\n    #[test]\n    fn normalize_transform() {\n        let mut graph = Graph::new(1);\n        let a = graph.ensure_neighbor(NodeId::ROOT, Side::A);\n        {\n            let (node, xf) = graph.normalize_transform(NodeId::ROOT, &MIsometry::identity());\n            assert_eq!(node, NodeId::ROOT);\n            assert_abs_diff_eq!(xf, MIsometry::identity(), epsilon = 1e-5);\n        }\n        {\n            let (node, xf) = graph.normalize_transform(NodeId::ROOT, Side::A.reflection());\n            assert_eq!(node, a);\n            assert_abs_diff_eq!(xf, Side::A.reflection(), epsilon = 1e-5);\n        }\n    }\n\n    #[test]\n    fn rebuild_from_tree() {\n        let mut a = Graph::new(1);\n        ensure_nearby(&mut a, &Position::origin(), 3.0);\n        let mut b = Graph::new(1);\n        for (side, parent) in a.tree() {\n            b.insert_child(parent, side);\n        }\n        assert_eq!(a.len(), b.len());\n        for (c, d) in a.tree().zip(b.tree()) {\n            assert_eq!(c.0, d.0);\n            assert_eq!(a.neighbor(c.1, c.0), b.neighbor(c.1, c.0));\n        }\n    }\n\n    #[test]\n    fn hash_consistency() {\n        let h1 = {\n            let mut g = Graph::new(1);\n            let n1 = g.ensure_neighbor(NodeId::ROOT, Side::A);\n            let n2 = g.ensure_neighbor(n1, Side::B);\n            let n3 = g.ensure_neighbor(n2, Side::C);\n            g.ensure_neighbor(n3, Side::D)\n        };\n        let h2 = {\n            let mut g = Graph::new(1);\n            let n1 = g.ensure_neighbor(NodeId::ROOT, Side::C);\n            let n2 = g.ensure_neighbor(n1, Side::A);\n            let n3 = g.ensure_neighbor(n2, Side::B);\n            g.ensure_neighbor(n3, Side::D)\n        };\n\n        assert_eq!(h1, h2);\n    }\n}\n"
  },
  {
    "path": "common/src/graph_collision.rs",
    "content": "use crate::{\n    chunk_collision::chunk_sphere_cast,\n    collision_math::Ray,\n    graph::Graph,\n    math::MVector,\n    node::{Chunk, ChunkId},\n    proto::Position,\n    traversal::RayTraverser,\n};\n\n/// Performs sphere casting (swept collision query) against the voxels in the `Graph`\n///\n/// The `ray` parameter and any resulting hit normals are given in the local coordinate system of `position`.\n///\n/// The `tanh_distance` is the hyperbolic tangent of the cast_distance, or the distance along the ray to check for hits.\n///\n/// This function may return a `Err(OutOfBounds)` if not enough chunks are generated, even if the ray never reaches an\n/// ungenerated chunk. To prevent these errors, make sure that the distance between the ray's start point and the center of\n/// the closest node with ungenerated chunks is greater than `cast_distance + collider_radius + dodeca::BOUNDING_SPHERE_RADIUS`\npub fn sphere_cast(\n    collider_radius: f32,\n    graph: &Graph,\n    position: &Position,\n    ray: &Ray,\n    mut tanh_distance: f32,\n) -> Result<Option<GraphCastHit>, OutOfBounds> {\n    // A collision check is assumed to be a miss until a collision is found.\n    // This `hit` variable gets updated over time before being returned.\n    let mut hit: Option<GraphCastHit> = None;\n\n    let mut traverser = RayTraverser::new(graph, *position, ray, collider_radius);\n    while let Some((chunk, transform)) = traverser.next(tanh_distance) {\n        let Some(chunk) = chunk else {\n            // Collision checking on chunk outside of graph\n            return Err(OutOfBounds);\n        };\n        let Chunk::Populated {\n            voxels: ref voxel_data,\n            ..\n        } = graph[chunk]\n        else {\n            // Collision checking on unpopulated chunk\n            return Err(OutOfBounds);\n        };\n\n        // Check collision within a single chunk\n        hit = chunk_sphere_cast(\n            collider_radius,\n            voxel_data,\n            graph.layout(),\n            &(transform * ray),\n            tanh_distance,\n        )\n        .map_or(hit, |hit| {\n            tanh_distance = hit.tanh_distance;\n            Some(GraphCastHit {\n                tanh_distance: hit.tanh_distance,\n                chunk,\n                normal: transform.inverse() * hit.normal,\n            })\n        });\n    }\n\n    Ok(hit)\n}\n\n#[derive(Debug)]\npub struct OutOfBounds;\n\n/// Information about the intersection at the end of a ray segment.\n#[derive(Debug)]\npub struct GraphCastHit {\n    /// The tanh of the distance traveled along the ray to result in this hit.\n    pub tanh_distance: f32,\n\n    /// Which chunk in the graph the hit occurred in\n    pub chunk: ChunkId,\n\n    /// Represents the normal vector of the hit surface in the original coordinate system\n    /// of the sphere casting. To get the actual normal vector, project it so that it is orthogonal\n    /// to the endpoint in Lorentz space.\n    pub normal: MVector<f32>,\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::{\n        collision_math::Ray,\n        dodeca::{self, Side, Vertex},\n        graph::{Graph, NodeId},\n        math::{MIsometry, MPoint},\n        node::VoxelData,\n        proto::Position,\n        traversal::{ensure_nearby, nearby_nodes},\n        voxel_math::Coords,\n        world::Material,\n    };\n\n    use super::*;\n\n    /// Convenience struct used to locate a particular voxel that should be solid in a test case.\n    struct VoxelLocation<'a> {\n        /// Path from the origin node to the voxel\n        node_path: &'a [Side],\n\n        /// Which chunk in the given node the voxel is in\n        vertex: Vertex,\n\n        /// The coordinates of the voxel\n        coords: Coords,\n    }\n\n    impl VoxelLocation<'_> {\n        fn new(node_path: &[Side], vertex: Vertex, coords: [u8; 3]) -> VoxelLocation<'_> {\n            VoxelLocation {\n                node_path,\n                vertex,\n                coords: Coords(coords),\n            }\n        }\n    }\n\n    struct SphereCastExampleTestCase<'a> {\n        /// Which voxel the test case focuses on. Also determines the coordinate system of the ray.\n        /// Any detected collision is expected to be on this voxel.\n        chosen_voxel: VoxelLocation<'a>,\n\n        /// Which voxels should be solid in the test case\n        additional_populated_voxels: &'a [VoxelLocation<'a>],\n\n        /// Grid coordinates of ray's start position relative to the root's \"A\" chunk\n        start_chunk_relative_grid_ray_start: [f32; 3],\n\n        /// Grid coordinates of ray's end position relative to chunk given by the chosen node and vertex\n        chosen_chunk_relative_grid_ray_end: [f32; 3],\n\n        /// What to use as the collider radius for shape casting\n        collider_radius: f32,\n\n        /// Amount to increase (or decrease) the ray's length compared to ending it at grid_ray_end\n        ray_length_modifier: f32,\n\n        /// Whether a collision should occur for the test to pass\n        collision_expected: bool,\n    }\n\n    impl SphereCastExampleTestCase<'_> {\n        fn execute(self) {\n            let dimension: u8 = 12;\n            let mut graph = Graph::new(dimension);\n            let graph_radius = 3.0;\n\n            // Set up a graph with void chunks\n            ensure_nearby(&mut graph, &Position::origin(), graph_radius);\n            for (node, _) in nearby_nodes(&graph, &Position::origin(), graph_radius) {\n                for vertex in dodeca::Vertex::iter() {\n                    graph[ChunkId::new(node, vertex)] = Chunk::Populated {\n                        voxels: VoxelData::Solid(Material::Void),\n                        surface: None,\n                        old_surface: None,\n                    };\n                }\n            }\n\n            Self::populate_voxel(&mut graph, dimension, &self.chosen_voxel);\n\n            for voxel in self.additional_populated_voxels {\n                Self::populate_voxel(&mut graph, dimension, voxel);\n            }\n\n            // Find the transform of the chosen chunk\n            let chosen_chunk_transform: MIsometry<f32> = self\n                .chosen_voxel\n                .node_path\n                .iter()\n                .fold(MIsometry::identity(), |transform: MIsometry<f32>, side| {\n                    transform * side.reflection()\n                })\n                * self.chosen_voxel.vertex.dual_to_node();\n\n            let dual_to_grid_factor = graph.layout().dual_to_grid_factor();\n            let ray_target = chosen_chunk_transform\n                * MVector::new(\n                    self.chosen_chunk_relative_grid_ray_end[0] / dual_to_grid_factor,\n                    self.chosen_chunk_relative_grid_ray_end[1] / dual_to_grid_factor,\n                    self.chosen_chunk_relative_grid_ray_end[2] / dual_to_grid_factor,\n                    1.0,\n                )\n                .normalized_point();\n\n            let ray_position = *Vertex::A.dual_to_node()\n                * MVector::new(\n                    self.start_chunk_relative_grid_ray_start[0] / dual_to_grid_factor,\n                    self.start_chunk_relative_grid_ray_start[1] / dual_to_grid_factor,\n                    self.start_chunk_relative_grid_ray_start[2] / dual_to_grid_factor,\n                    1.0,\n                )\n                .normalized_point();\n            let ray_direction = ray_target.as_ref() - ray_position.as_ref();\n\n            let ray = Ray::new(\n                ray_position,\n                (ray_direction.as_ref() + ray_position.as_ref() * ray_position.mip(&ray_direction))\n                    .normalized_direction(),\n            );\n\n            let tanh_distance =\n                ((-ray_position.mip(&ray_target)).acosh() + self.ray_length_modifier).tanh();\n\n            let hit = sphere_cast(\n                self.collider_radius,\n                &graph,\n                &Position::origin(),\n                &ray,\n                tanh_distance,\n            )\n            .expect(\"conclusive collision result\");\n\n            if self.collision_expected {\n                assert!(hit.is_some(), \"no collision detected\");\n                assert_eq!(\n                    hit.as_ref().unwrap().chunk,\n                    Self::get_voxel_chunk(&graph, &self.chosen_voxel),\n                    \"collision occurred in wrong chunk\"\n                );\n                assert!(\n                    hit.as_ref().unwrap().normal.mip(&ray.direction) < 0.0,\n                    \"normal is facing the wrong way\"\n                );\n            } else {\n                assert!(hit.is_none(), \"unexpected collision detected\");\n            }\n        }\n\n        fn populate_voxel(graph: &mut Graph, dimension: u8, voxel_location: &VoxelLocation) {\n            // Find the ChunkId of the given chunk\n            let chunk = ChunkId::new(\n                voxel_location\n                    .node_path\n                    .iter()\n                    .fold(NodeId::ROOT, |node, &side| {\n                        graph.neighbor(node, side).unwrap()\n                    }),\n                voxel_location.vertex,\n            );\n            let Chunk::Populated {\n                voxels: voxel_data, ..\n            } = &mut graph[chunk]\n            else {\n                panic!(\"All chunks should be populated.\");\n            };\n\n            // Populate the given voxel with dirt.\n            voxel_data.data_mut(dimension)[voxel_location.coords.to_index(dimension)] =\n                Material::Dirt;\n        }\n\n        fn get_voxel_chunk(graph: &Graph, voxel_location: &VoxelLocation) -> ChunkId {\n            ChunkId::new(\n                voxel_location\n                    .node_path\n                    .iter()\n                    .fold(NodeId::ROOT, |node, &side| {\n                        graph.neighbor(node, side).unwrap()\n                    }),\n                voxel_location.vertex,\n            )\n        }\n    }\n\n    /// Checks that `sphere_cast` behaves as expected under normal circumstances.\n    #[test]\n    fn sphere_cast_examples() {\n        // Basic test case\n        SphereCastExampleTestCase {\n            chosen_voxel: VoxelLocation::new(&[Side::G], Vertex::I, [2, 3, 5]),\n            additional_populated_voxels: &[],\n            start_chunk_relative_grid_ray_start: [12.0, 12.0, 12.0], // Node center\n            chosen_chunk_relative_grid_ray_end: [2.5, 3.5, 5.5],\n            collider_radius: 0.02,\n            ray_length_modifier: 0.0,\n            collision_expected: true,\n        }\n        .execute();\n\n        // Barely touching a neighboring node\n        SphereCastExampleTestCase {\n            chosen_voxel: VoxelLocation::new(\n                &[Vertex::B.canonical_sides()[0]],\n                Vertex::B,\n                [0, 11, 11],\n            ),\n            additional_populated_voxels: &[],\n            start_chunk_relative_grid_ray_start: [12.0, 12.0, 12.0], // Node center\n            chosen_chunk_relative_grid_ray_end: [0.0, 12.0, 12.0],\n            collider_radius: 0.02,\n            ray_length_modifier: -0.019,\n            collision_expected: true,\n        }\n        .execute();\n\n        // Barely not touching a neighboring node\n        SphereCastExampleTestCase {\n            chosen_voxel: VoxelLocation::new(\n                &[Vertex::B.canonical_sides()[0]],\n                Vertex::B,\n                [0, 11, 11],\n            ),\n            additional_populated_voxels: &[],\n            start_chunk_relative_grid_ray_start: [12.0, 12.0, 12.0], // Node center\n            chosen_chunk_relative_grid_ray_end: [0.0, 12.0, 12.0],\n            collider_radius: 0.02,\n            ray_length_modifier: -0.021,\n            collision_expected: false,\n        }\n        .execute();\n\n        // Barely touching a neighboring vertex\n        {\n            // This test case requires a bit of extra logic because getting the voxel coordinates\n            // adjacent to a voxel in a neighboring chunk requires inspecting the canonical side\n            // order of both vertices.\n            let chosen_vertex = Vertex::A.adjacent_vertices()[0];\n            let corresponding_axis = chosen_vertex\n                .canonical_sides()\n                .iter()\n                .position(|side| !Vertex::A.canonical_sides().contains(side))\n                .unwrap();\n            let mut chosen_voxel_coords = [0, 0, 0];\n            chosen_voxel_coords[corresponding_axis] = 11;\n            let mut grid_ray_end = [0.0, 0.0, 0.0];\n            grid_ray_end[corresponding_axis] = 12.0;\n            SphereCastExampleTestCase {\n                chosen_voxel: VoxelLocation::new(&[], chosen_vertex, chosen_voxel_coords),\n                additional_populated_voxels: &[],\n                start_chunk_relative_grid_ray_start: [0.0, 0.0, 0.0], // Node's A-vertex corner\n                chosen_chunk_relative_grid_ray_end: grid_ray_end,\n                collider_radius: 0.02,\n                ray_length_modifier: -0.019,\n                collision_expected: true,\n            }\n            .execute();\n        }\n\n        // Barely touching a node opposite the original node at a corner\n        SphereCastExampleTestCase {\n            chosen_voxel: VoxelLocation::new(\n                &[\n                    Vertex::D.canonical_sides()[0],\n                    Vertex::D.canonical_sides()[1],\n                    Vertex::D.canonical_sides()[2],\n                ],\n                Vertex::D,\n                [0, 0, 0],\n            ),\n            additional_populated_voxels: &[],\n            start_chunk_relative_grid_ray_start: [12.0, 12.0, 12.0], // Node center\n            chosen_chunk_relative_grid_ray_end: [0.0, 0.0, 0.0],\n            collider_radius: 0.02,\n            ray_length_modifier: -0.019,\n            collision_expected: true,\n        }\n        .execute();\n\n        // Colliding with a neighboring node's voxel before the center node's voxel\n        SphereCastExampleTestCase {\n            chosen_voxel: VoxelLocation::new(\n                &[Vertex::A.canonical_sides()[0]],\n                Vertex::A,\n                [0, 4, 4],\n            ),\n            additional_populated_voxels: &[VoxelLocation::new(&[], Vertex::A, [0, 5, 4])],\n            // Because we use the \"A\" vertex, the two coordinate systems below coincide for x = 0.0\n            start_chunk_relative_grid_ray_start: [0.0, 3.0, 4.5],\n            chosen_chunk_relative_grid_ray_end: [0.0, 8.0, 4.5],\n            collider_radius: 0.02,\n            ray_length_modifier: 0.0,\n            collision_expected: true,\n        }\n        .execute();\n\n        // Colliding with the center node's voxel before a neighboring node's voxel\n        SphereCastExampleTestCase {\n            chosen_voxel: VoxelLocation::new(&[], Vertex::A, [0, 4, 4]),\n            additional_populated_voxels: &[VoxelLocation::new(\n                &[Vertex::A.canonical_sides()[0]],\n                Vertex::A,\n                [0, 5, 4],\n            )],\n            start_chunk_relative_grid_ray_start: [0.0, 3.0, 4.5],\n            chosen_chunk_relative_grid_ray_end: [0.0, 8.0, 4.5],\n            collider_radius: 0.02,\n            ray_length_modifier: 0.0,\n            collision_expected: true,\n        }\n        .execute();\n    }\n\n    /// Tests that a sphere cast that gets close to the corner of an unloaded chunk does not throw an error as\n    /// long as the contract for sphere_cast is upheld.\n    #[test]\n    fn sphere_cast_near_unloaded_chunk() {\n        let dimension: u8 = 12;\n        let mut graph = Graph::new(dimension);\n\n        let sides = Vertex::A.canonical_sides();\n\n        // Add six nodes surrounding the origin's Vertex::A to total 7 out of 8 nodes.\n        // Only the far corner is missing.\n        let first_neighbors = [\n            graph.ensure_neighbor(NodeId::ROOT, sides[0]),\n            graph.ensure_neighbor(NodeId::ROOT, sides[1]),\n            graph.ensure_neighbor(NodeId::ROOT, sides[2]),\n        ];\n        let second_neighbors = [\n            graph.ensure_neighbor(first_neighbors[0], sides[1]),\n            graph.ensure_neighbor(first_neighbors[1], sides[2]),\n            graph.ensure_neighbor(first_neighbors[2], sides[0]),\n        ];\n\n        // Populate all graph nodes\n        for node in [\n            &[NodeId::ROOT],\n            first_neighbors.as_slice(),\n            second_neighbors.as_slice(),\n        ]\n        .concat()\n        {\n            for vertex in dodeca::Vertex::iter() {\n                graph[ChunkId::new(node, vertex)] = Chunk::Populated {\n                    voxels: VoxelData::Solid(Material::Void),\n                    surface: None,\n                    old_surface: None,\n                };\n            }\n        }\n\n        // The node coordinates of the corner of the missing node\n        let vertex_pos = Vertex::A.dual_to_node() * MPoint::origin();\n\n        // Use a ray starting from the origin. The direction vector is vertex_pos with the w coordinate\n        // set to 0 and normalized\n        let ray = Ray::new(\n            MPoint::origin(),\n            (vertex_pos.as_ref() - MVector::w() * vertex_pos.w).normalized_direction(),\n        );\n        let sphere_radius = 0.1;\n\n        // Use a distance slightly less than the maximum possible before an error would occur.\n        let distance = vertex_pos.w.acosh() - sphere_radius - 1e-4;\n\n        let hit = sphere_cast(\n            sphere_radius,\n            &graph,\n            &Position::origin(),\n            &ray,\n            distance.tanh(),\n        );\n\n        assert!(hit.is_ok());\n    }\n}\n"
  },
  {
    "path": "common/src/graph_entities.rs",
    "content": "use fxhash::FxHashMap;\nuse hecs::Entity;\n\nuse crate::graph::NodeId;\n\n#[derive(Default)]\npub struct GraphEntities {\n    map: FxHashMap<NodeId, Vec<Entity>>,\n}\n\nimpl GraphEntities {\n    pub fn new() -> Self {\n        Self {\n            map: FxHashMap::default(),\n        }\n    }\n\n    pub fn get(&self, node: NodeId) -> &[Entity] {\n        self.map.get(&node).map_or(&[], |x| &x[..])\n    }\n\n    pub fn insert(&mut self, node: NodeId, entity: Entity) {\n        let vec = self.map.entry(node).or_default();\n        debug_assert!(!vec.contains(&entity), \"redundant insert\");\n        vec.push(entity);\n    }\n\n    pub fn remove(&mut self, node: NodeId, entity: Entity) {\n        let vec = self.map.get_mut(&node).expect(\"remove from empty node\");\n        let pos = vec\n            .iter()\n            .position(|&e| e == entity)\n            .expect(\"no such entity at this node\");\n        vec.swap_remove(pos);\n        if vec.is_empty() {\n            self.map.remove(&node);\n        }\n    }\n}\n"
  },
  {
    "path": "common/src/graph_ray_casting.rs",
    "content": "use crate::{\n    chunk_ray_casting::chunk_ray_cast,\n    collision_math::Ray,\n    graph::Graph,\n    node::{Chunk, ChunkId},\n    proto::Position,\n    traversal::RayTraverser,\n    voxel_math::{CoordAxis, CoordSign, Coords},\n};\n\n/// Performs ray casting against the voxels in the `DualGraph`\n///\n/// The `ray` parameter and any resulting hit normals are given in the local coordinate system of `position`.\n///\n/// The `tanh_distance` is the hyperbolic tangent of the cast_distance, or the distance along the ray to check for hits.\n///\n/// This function may return an `Err(OutOfBounds)` if not enough chunks are generated, even if the ray never reaches an\n/// ungenerated chunk. To prevent these errors, make sure that the distance between the ray's start point and the center of\n/// the closest node with ungenerated chunks is greater than `cast_distance + dodeca::BOUNDING_SPHERE_RADIUS`\npub fn ray_cast(\n    graph: &Graph,\n    position: &Position,\n    ray: &Ray,\n    mut tanh_distance: f32,\n) -> Result<Option<GraphCastHit>, OutOfBounds> {\n    // A ray cast is assumed to be a miss until a collision is found.\n    // This `hit` variable gets updated over time before being returned.\n    let mut hit: Option<GraphCastHit> = None;\n\n    let mut traverser = RayTraverser::new(graph, *position, ray, 0.0);\n    while let Some((chunk, transform)) = traverser.next(tanh_distance) {\n        let Some(chunk) = chunk else {\n            // Ray reached chunk outside of graph\n            return Err(OutOfBounds);\n        };\n        let Chunk::Populated {\n            voxels: ref voxel_data,\n            ..\n        } = graph[chunk]\n        else {\n            // Ray reached unpopulated chunk\n            return Err(OutOfBounds);\n        };\n\n        hit = chunk_ray_cast(\n            voxel_data,\n            graph.layout(),\n            &(transform * ray),\n            tanh_distance,\n        )\n        .map_or(hit, |hit| {\n            tanh_distance = hit.tanh_distance;\n            Some(GraphCastHit {\n                tanh_distance: hit.tanh_distance,\n                chunk,\n                voxel_coords: hit.voxel_coords,\n                face_axis: hit.face_axis,\n                face_sign: hit.face_sign,\n            })\n        });\n    }\n\n    Ok(hit)\n}\n\n#[derive(Debug)]\npub struct OutOfBounds;\n\n/// Information about the intersection at the end of a ray segment.\n#[derive(Debug)]\npub struct GraphCastHit {\n    /// The tanh of the distance traveled along the ray to result in this hit.\n    pub tanh_distance: f32,\n\n    /// Which chunk in the graph the hit occurred in\n    pub chunk: ChunkId,\n\n    /// The coordinates of the block that was hit, including margins.\n    pub voxel_coords: Coords,\n\n    /// Which of the three axes is orthogonal to the face of the block that was hit.\n    pub face_axis: CoordAxis,\n\n    /// The direction along `face_axis` corresponding to the outside of the face that was hit.\n    pub face_sign: CoordSign,\n}\n"
  },
  {
    "path": "common/src/id.rs",
    "content": "#[macro_export]\nmacro_rules! mkid {\n    ($name:ident : $ty:ty) => {\n        #[derive(Debug, Eq, PartialEq, Ord, PartialOrd, Copy, Clone, Hash)]\n        pub struct $name($ty);\n\n        impl $name {\n            #[inline]\n            pub fn to_bits(self) -> u64 {\n                self.0\n            }\n\n            pub fn from_bits(x: u64) -> Self {\n                Self(x)\n            }\n        }\n\n        impl From<$ty> for $name {\n            fn from(x: $ty) -> $name {\n                $name(x)\n            }\n        }\n\n        impl From<$name> for $ty {\n            fn from(x: $name) -> $ty {\n                x.0\n            }\n        }\n\n        impl std::str::FromStr for $name {\n            type Err = ::std::num::ParseIntError;\n            fn from_str(s: &str) -> Result<Self, Self::Err> {\n                Ok($name(<$ty>::from_str_radix(s, 16)?))\n            }\n        }\n\n        impl serde::Serialize for $name {\n            fn serialize<S>(&self, s: S) -> Result<S::Ok, S::Error>\n            where\n                S: serde::Serializer,\n            {\n                if s.is_human_readable() {\n                    s.serialize_str(&self.to_string())\n                } else {\n                    self.0.serialize(s)\n                }\n            }\n        }\n\n        impl<'a> serde::Deserialize<'a> for $name {\n            fn deserialize<D>(d: D) -> Result<Self, D::Error>\n            where\n                D: serde::Deserializer<'a>,\n            {\n                use serde::de::Error;\n                if d.is_human_readable() {\n                    let x = <&'a str>::deserialize(d)?;\n                    x.parse().map_err(D::Error::custom)\n                } else {\n                    Ok($name(<$ty>::deserialize(d)?))\n                }\n            }\n        }\n    };\n}\n"
  },
  {
    "path": "common/src/lib.rs",
    "content": "#![allow(clippy::needless_borrowed_reference)]\n\nuse rand::{\n    Rng,\n    distr::{Distribution, StandardUniform},\n};\n\n#[macro_use]\nmod id;\n\nextern crate nalgebra as na;\npub mod character_controller;\npub mod chunk_collision;\nmod chunk_ray_casting;\nmod chunks;\npub mod codec;\npub mod collision_math;\npub mod cursor;\npub mod dodeca;\npub mod graph;\npub mod graph_collision;\nmod graph_entities;\npub mod graph_ray_casting;\nmod margins;\npub mod math;\npub mod node;\npub mod peer_traverser;\npub mod proto;\nmod sim_config;\npub mod traversal;\npub mod voxel_math;\npub mod world;\npub mod worldgen;\n\npub use chunks::Chunks;\npub use graph_entities::GraphEntities;\npub use sim_config::{SimConfig, SimConfigRaw};\n\n// Stable IDs made of 8 random bytes for easy persistent references\nmkid!(EntityId: u64);\n\nimpl std::fmt::Display for EntityId {\n    fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {\n        write!(f, \"{:016x}\", self.0)\n    }\n}\n\nimpl Distribution<EntityId> for StandardUniform {\n    fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> EntityId {\n        EntityId(rng.random())\n    }\n}\n\npub type Step = i32;\n\npub fn defer<F: FnOnce()>(f: F) -> Defer<F> {\n    Defer::new(f)\n}\n\npub struct Defer<F: FnOnce()>(Option<F>);\n\nimpl<F: FnOnce()> Defer<F> {\n    pub fn new(f: F) -> Self {\n        Self(Some(f))\n    }\n\n    pub fn invoke(self) {}\n\n    pub fn cancel(mut self) {\n        self.0 = None;\n    }\n}\n\nimpl<F: FnOnce()> Drop for Defer<F> {\n    fn drop(&mut self) {\n        if let Some(f) = self.0.take() {\n            f()\n        }\n    }\n}\n\n/// Clamp speed to to 1.0 and graceful NaN handling\npub fn sanitize_motion_input(v: na::Vector3<f32>) -> na::Vector3<f32> {\n    if !v.iter().all(|x| x.is_finite()) {\n        return na::Vector3::zeros();\n    }\n    v / v.norm().max(1.0)\n}\n\npub fn tracing_guard() -> tracing::dispatcher::DefaultGuard {\n    use tracing_subscriber::util::SubscriberInitExt;\n    tracing_subscriber().set_default()\n}\n\npub fn init_tracing() {\n    use tracing_subscriber::util::SubscriberInitExt;\n    tracing_subscriber().init();\n}\n\nfn tracing_subscriber() -> impl tracing::Subscriber {\n    use tracing_subscriber::{filter, fmt, layer::SubscriberExt, registry};\n\n    registry()\n        .with(\n            fmt::layer()\n                .with_target(false)\n                .with_ansi(cfg!(not(windows))),\n        )\n        .with(\n            filter::EnvFilter::from_default_env()\n                .add_directive(tracing_subscriber::filter::LevelFilter::INFO.into()),\n        )\n}\n\npub trait Anonymize {\n    type Output;\n    fn anonymize(&self) -> Self::Output;\n}\n\nimpl Anonymize for std::path::Path {\n    type Output = std::path::PathBuf;\n\n    fn anonymize(&self) -> Self::Output {\n        let Some(home) = std::env::home_dir() else {\n            // Give up if we don't know the home directory\n            return self.into();\n        };\n        let Ok(home_relative_path) = self.strip_prefix(home) else {\n            // If the path is not in the home directory, there is nothing to do.\n            return self.into();\n        };\n        let mut result = std::path::PathBuf::new();\n        // Write a best effort placeholder that should work as a substitute for the home directory in a file explorer\n        result.push(if cfg!(windows) {\n            \"%USERPROFILE%\"\n        } else {\n            \"$HOME\"\n        });\n        result.push(home_relative_path);\n        result\n    }\n}\n"
  },
  {
    "path": "common/src/margins.rs",
    "content": "use crate::{\n    dodeca::Vertex,\n    graph::Graph,\n    math::PermuteXYZ,\n    node::{Chunk, ChunkId, VoxelData},\n    voxel_math::{ChunkAxisPermutation, ChunkDirection, CoordAxis, CoordSign, Coords},\n    world::Material,\n};\n\n/// Updates the margins of both `voxels` and `neighbor_voxels` at the side they meet at.\n/// It is assumed that `voxels` corresponds to a chunk that lies at `vertex` and that\n/// `neighbor_voxels` is at direction `direction` from `voxels`.\npub fn fix_margins(\n    dimension: u8,\n    vertex: Vertex,\n    voxels: &mut VoxelData,\n    direction: ChunkDirection,\n    neighbor_voxels: &mut VoxelData,\n) {\n    let neighbor_axis_permutation = neighbor_axis_permutation(vertex, direction);\n\n    let margin_coord = CoordsWithMargins::margin_coord(dimension, direction.sign);\n    let boundary_coord = CoordsWithMargins::boundary_coord(dimension, direction.sign);\n\n    // If two solid chunks are both void or both non-void, do nothing.\n    if voxels.is_solid()\n        && neighbor_voxels.is_solid()\n        && (voxels.get(0) == Material::Void) == (neighbor_voxels.get(0) == Material::Void)\n    {\n        return;\n    }\n\n    // If either chunk is solid and consistent with the boundary of the other chunk, do nothing.\n    // Since this consists of two similar cases (which of the two chunks is solid), we use a loop\n    // here to make it clear how the logic of these two cases differ from each other.\n    for (dense_voxels, dense_to_solid_direction, solid_voxels) in [\n        (&*voxels, direction, &*neighbor_voxels),\n        (\n            &*neighbor_voxels,\n            neighbor_axis_permutation * direction,\n            &*voxels,\n        ),\n    ] {\n        // Check that dense_voxels is indeed dense and solid_voxels is indeed solid\n        if !dense_voxels.is_solid() && solid_voxels.is_solid() {\n            let solid_voxels_is_void = solid_voxels.get(0) == Material::Void;\n            // Check that the face of dense_voxels that meets solid_voxels matches. If it does,\n            // skip the margin reconciliation stage.\n            if all_voxels_at_face(dimension, dense_voxels, dense_to_solid_direction, |m| {\n                (m == Material::Void) == solid_voxels_is_void\n            }) {\n                return;\n            }\n        }\n    }\n\n    // Otherwise, both chunks need to be dense, and margins should be reconciled between them.\n    let voxel_data = voxels.data_mut(dimension);\n    let neighbor_voxel_data = neighbor_voxels.data_mut(dimension);\n    for j in 0..dimension {\n        for i in 0..dimension {\n            // Determine coordinates of the boundary voxel (to read from) and the margin voxel (to write to)\n            // in voxel_data's perspective. To convert to neighbor_voxel_data's perspective, left-multiply\n            // by neighbor_axis_permutation.\n            let coords_of_boundary_voxel = CoordsWithMargins(\n                [boundary_coord, i + 1, j + 1].tuv_to_xyz(direction.axis as usize),\n            );\n            let coords_of_margin_voxel =\n                CoordsWithMargins([margin_coord, i + 1, j + 1].tuv_to_xyz(direction.axis as usize));\n\n            // Use neighbor_voxel_data to set margins of voxel_data\n            voxel_data[coords_of_margin_voxel.to_index(dimension)] = neighbor_voxel_data\n                [(neighbor_axis_permutation * coords_of_boundary_voxel).to_index(dimension)];\n\n            // Use voxel_data to set margins of neighbor_voxel_data\n            neighbor_voxel_data\n                [(neighbor_axis_permutation * coords_of_margin_voxel).to_index(dimension)] =\n                voxel_data[coords_of_boundary_voxel.to_index(dimension)];\n        }\n    }\n}\n\n/// Check if the given predicate `f` holds true for any voxel at the given face of a chunk\nfn all_voxels_at_face(\n    dimension: u8,\n    voxels: &VoxelData,\n    direction: ChunkDirection,\n    f: impl Fn(Material) -> bool,\n) -> bool {\n    let boundary_coord = CoordsWithMargins::boundary_coord(dimension, direction.sign);\n    for j in 0..dimension {\n        for i in 0..dimension {\n            let coords_of_boundary_voxel = CoordsWithMargins(\n                [boundary_coord, i + 1, j + 1].tuv_to_xyz(direction.axis as usize),\n            );\n\n            if !f(voxels.get(coords_of_boundary_voxel.to_index(dimension))) {\n                return false;\n            }\n        }\n    }\n\n    true\n}\n\n/// Updates the margins of a given VoxelData to match the voxels they're next to. This is a good assumption to start\n/// with before taking into account neighboring chunks because it means that no surface will be present on the boundaries\n/// of the chunk, resulting in the least rendering. This is also generally accurate when the neighboring chunks are solid.\npub fn initialize_margins(dimension: u8, voxels: &mut VoxelData) {\n    // If voxels is solid, the margins are already set up the way they should be.\n    if voxels.is_solid() {\n        return;\n    }\n\n    for direction in ChunkDirection::iter() {\n        let margin_coord = CoordsWithMargins::margin_coord(dimension, direction.sign);\n        let boundary_coord = CoordsWithMargins::boundary_coord(dimension, direction.sign);\n        let chunk_data = voxels.data_mut(dimension);\n        for j in 0..dimension {\n            for i in 0..dimension {\n                // Determine coordinates of the boundary voxel (to read from) and the margin voxel (to write to).\n                let coords_of_boundary_voxel = CoordsWithMargins(\n                    [boundary_coord, i + 1, j + 1].tuv_to_xyz(direction.axis as usize),\n                );\n                let coords_of_margin_voxel = CoordsWithMargins(\n                    [margin_coord, i + 1, j + 1].tuv_to_xyz(direction.axis as usize),\n                );\n\n                chunk_data[coords_of_margin_voxel.to_index(dimension)] =\n                    chunk_data[coords_of_boundary_voxel.to_index(dimension)];\n            }\n        }\n    }\n}\n\n/// Based on the given `coords` and the neighboring voxel at direction\n/// `direction` (if it's in a different chunk), updates both of their respective\n/// margins to match each others' materials.\npub fn reconcile_margin_voxels(\n    graph: &mut Graph,\n    chunk: ChunkId,\n    coords: Coords,\n    direction: ChunkDirection,\n) {\n    let coords_of_boundary_voxel: CoordsWithMargins = coords.into();\n    let dimension = graph.layout().dimension();\n\n    // There is nothing to do if we're not on a boundary voxel.\n    if coords_of_boundary_voxel[direction.axis]\n        != CoordsWithMargins::boundary_coord(dimension, direction.sign)\n    {\n        return;\n    }\n\n    let mut coords_of_margin_voxel = coords_of_boundary_voxel;\n    coords_of_margin_voxel[direction.axis] =\n        CoordsWithMargins::margin_coord(dimension, direction.sign);\n\n    let neighbor_axis_permutation = neighbor_axis_permutation(chunk.vertex, direction);\n    let Some(neighbor_chunk) = graph.get_chunk_neighbor(chunk, direction.axis, direction.sign)\n    else {\n        // If there's no neighbor chunk, there is nothing to do.\n        return;\n    };\n\n    // Gather information from the current chunk and the neighboring chunk. If either is unpopulated, there\n    // is nothing to do.\n    let material = if let Chunk::Populated { voxels, .. } = &graph[chunk] {\n        voxels.get(coords.to_index(dimension))\n    } else {\n        return;\n    };\n    let neighbor_material = if let Chunk::Populated {\n        voxels: neighbor_voxels,\n        ..\n    } = &graph[neighbor_chunk]\n    {\n        neighbor_voxels\n            .get((neighbor_axis_permutation * coords_of_boundary_voxel).to_index(dimension))\n    } else {\n        return;\n    };\n\n    // Update the neighbor chunk's margin to the current chunk's material.\n    let Chunk::Populated {\n        voxels: neighbor_voxels,\n        surface: neighbor_surface,\n        old_surface: neighbor_old_surface,\n    } = &mut graph[neighbor_chunk]\n    else {\n        unreachable!();\n    };\n    neighbor_voxels.data_mut(dimension)\n        [(neighbor_axis_permutation * coords_of_margin_voxel).to_index(dimension)] = material;\n    *neighbor_old_surface = neighbor_surface.take().or(*neighbor_old_surface);\n\n    // Update the current chunk's margin to the neighbor chunk's material.\n\n    // This can be necessary even if `neighbor_material` hasn't changed because\n    // margins are not guaranteed to have exactly the right material unless they\n    // need to be rendered. For instance a margin can sometimes have material\n    // \"Dirt\" even if the voxel it's based on has material \"Slate\" because\n    // changing the margin from \"Dirt\" to \"Slate\" earlier would have required\n    // turning a solid chunk into a dense chunk.\n    let Chunk::Populated {\n        voxels,\n        surface,\n        old_surface,\n    } = &mut graph[chunk]\n    else {\n        unreachable!();\n    };\n    voxels.data_mut(dimension)[coords_of_margin_voxel.to_index(dimension)] = neighbor_material;\n    *old_surface = surface.take().or(*old_surface);\n}\n\nfn neighbor_axis_permutation(vertex: Vertex, direction: ChunkDirection) -> ChunkAxisPermutation {\n    match direction.sign {\n        CoordSign::Plus => vertex.chunk_axis_permutations()[direction.axis as usize],\n        CoordSign::Minus => ChunkAxisPermutation::IDENTITY,\n    }\n}\n\n/// Coordinates for a discrete voxel within a chunk, including margins\n#[derive(Debug, Clone, Copy, PartialEq, Eq)]\nstruct CoordsWithMargins(pub [u8; 3]);\n\nimpl CoordsWithMargins {\n    /// Returns the array index in `VoxelData` corresponding to these coordinates\n    pub fn to_index(self, chunk_size: u8) -> usize {\n        let chunk_size_with_margin = chunk_size as usize + 2;\n        (self.0[0] as usize)\n            + (self.0[1] as usize) * chunk_size_with_margin\n            + (self.0[2] as usize) * chunk_size_with_margin.pow(2)\n    }\n\n    /// Returns the x, y, or z coordinate that would correspond to the margin in the direction of `sign`\n    pub fn margin_coord(chunk_size: u8, sign: CoordSign) -> u8 {\n        match sign {\n            CoordSign::Plus => chunk_size + 1,\n            CoordSign::Minus => 0,\n        }\n    }\n\n    /// Returns the x, y, or z coordinate that would correspond to the voxel meeting the chunk boundary in the direction of `sign`\n    pub fn boundary_coord(chunk_size: u8, sign: CoordSign) -> u8 {\n        match sign {\n            CoordSign::Plus => chunk_size,\n            CoordSign::Minus => 1,\n        }\n    }\n}\n\nimpl From<Coords> for CoordsWithMargins {\n    #[inline]\n    fn from(value: Coords) -> Self {\n        CoordsWithMargins([value.0[0] + 1, value.0[1] + 1, value.0[2] + 1])\n    }\n}\n\nimpl std::ops::Index<CoordAxis> for CoordsWithMargins {\n    type Output = u8;\n\n    #[inline]\n    fn index(&self, coord_axis: CoordAxis) -> &u8 {\n        self.0.index(coord_axis as usize)\n    }\n}\n\nimpl std::ops::IndexMut<CoordAxis> for CoordsWithMargins {\n    #[inline]\n    fn index_mut(&mut self, coord_axis: CoordAxis) -> &mut u8 {\n        self.0.index_mut(coord_axis as usize)\n    }\n}\n\nimpl std::ops::Mul<CoordsWithMargins> for ChunkAxisPermutation {\n    type Output = CoordsWithMargins;\n\n    fn mul(self, rhs: CoordsWithMargins) -> Self::Output {\n        let mut result = CoordsWithMargins([0; 3]);\n        for axis in CoordAxis::iter() {\n            result[self[axis]] = rhs[axis];\n        }\n        result\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::{dodeca::Vertex, graph::NodeId, voxel_math::Coords, world::Material};\n\n    use super::*;\n\n    #[test]\n    fn test_fix_margins() {\n        // This test case can set up empirically by placing blocks and printing their coordinates to confirm which\n        // coordinates are adjacent to each other.\n\n        // `voxels` lives at vertex F\n        let mut voxels = VoxelData::Solid(Material::Void);\n        voxels.data_mut(12)[Coords([11, 2, 10]).to_index(12)] = Material::WoodPlanks;\n\n        // `neighbor_voxels` lives at vertex J\n        let mut neighbor_voxels = VoxelData::Solid(Material::Void);\n        neighbor_voxels.data_mut(12)[Coords([2, 10, 11]).to_index(12)] = Material::Grass;\n\n        // Sanity check that voxel adjacencies are as expected. If the test fails here, it's likely that \"dodeca.rs\" was\n        // redesigned, and the test itself will have to be fixed, rather than the code being tested.\n        assert_eq!(Vertex::F.adjacent_vertices()[0], Vertex::J);\n        assert_eq!(Vertex::J.adjacent_vertices()[2], Vertex::F);\n\n        // Sanity check that voxels are populated as expected, using `CoordsWithMargins` for consistency with the actual\n        // test case.\n        assert_eq!(\n            voxels.get(CoordsWithMargins([12, 3, 11]).to_index(12)),\n            Material::WoodPlanks\n        );\n        assert_eq!(\n            neighbor_voxels.get(CoordsWithMargins([3, 11, 12]).to_index(12)),\n            Material::Grass\n        );\n\n        fix_margins(\n            12,\n            Vertex::F,\n            &mut voxels,\n            ChunkDirection::PLUS_X,\n            &mut neighbor_voxels,\n        );\n\n        // Actual verification: Check that the margins were set correctly\n        assert_eq!(\n            voxels.get(CoordsWithMargins([13, 3, 11]).to_index(12)),\n            Material::Grass\n        );\n        assert_eq!(\n            neighbor_voxels.get(CoordsWithMargins([3, 11, 13]).to_index(12)),\n            Material::WoodPlanks\n        );\n    }\n\n    #[test]\n    fn test_initialize_margins() {\n        let mut voxels = VoxelData::Solid(Material::Void);\n        voxels.data_mut(12)[Coords([11, 2, 10]).to_index(12)] = Material::WoodPlanks;\n        assert_eq!(\n            voxels.get(CoordsWithMargins([12, 3, 11]).to_index(12)),\n            Material::WoodPlanks\n        );\n\n        initialize_margins(12, &mut voxels);\n\n        assert_eq!(\n            voxels.get(CoordsWithMargins([13, 3, 11]).to_index(12)),\n            Material::WoodPlanks\n        );\n    }\n\n    #[test]\n    fn test_reconcile_margin_voxels() {\n        let mut graph = Graph::new(12);\n        let current_vertex = Vertex::A;\n        let neighbor_vertex = current_vertex.adjacent_vertices()[1];\n        let neighbor_node =\n            graph.ensure_neighbor(NodeId::ROOT, current_vertex.canonical_sides()[0]);\n\n        // These are the chunks this test will work with.\n        let current_chunk = ChunkId::new(NodeId::ROOT, current_vertex);\n        let node_neighbor_chunk = ChunkId::new(neighbor_node, current_vertex);\n        let vertex_neighbor_chunk = ChunkId::new(NodeId::ROOT, neighbor_vertex);\n\n        // Populate relevant chunks\n        for chunk in [current_chunk, node_neighbor_chunk, vertex_neighbor_chunk] {\n            graph[chunk] = Chunk::Populated {\n                voxels: VoxelData::Solid(Material::Void),\n                surface: None,\n                old_surface: None,\n            };\n        }\n\n        // Fill current chunk with appropriate materials\n        {\n            let Chunk::Populated { voxels, .. } = &mut graph[current_chunk] else {\n                unreachable!()\n            };\n            voxels.data_mut(12)[Coords([0, 7, 9]).to_index(12)] = Material::WoodPlanks;\n            voxels.data_mut(12)[Coords([5, 11, 9]).to_index(12)] = Material::Grass;\n        }\n\n        // Fill vertex_neighbor chunk with appropriate material\n        {\n            let Chunk::Populated { voxels, .. } = &mut graph[vertex_neighbor_chunk] else {\n                unreachable!()\n            };\n            voxels.data_mut(12)[Coords([5, 9, 11]).to_index(12)] = Material::Slate;\n        }\n\n        // Reconcile margins\n        reconcile_margin_voxels(\n            &mut graph,\n            current_chunk,\n            Coords([0, 7, 9]),\n            ChunkDirection::MINUS_X,\n        );\n        reconcile_margin_voxels(\n            &mut graph,\n            current_chunk,\n            Coords([5, 11, 9]),\n            ChunkDirection::PLUS_Y,\n        );\n\n        // Check the margins of current_chunk\n        let Chunk::Populated {\n            voxels: current_voxels,\n            ..\n        } = &graph[current_chunk]\n        else {\n            unreachable!(\"node_neighbor_chunk should have been populated by this test\");\n        };\n        assert_eq!(\n            current_voxels.get(CoordsWithMargins([6, 13, 10]).to_index(12)),\n            Material::Slate\n        );\n\n        // Check the margins of node_neighbor_chunk\n        let Chunk::Populated {\n            voxels: node_neighbor_voxels,\n            ..\n        } = &graph[node_neighbor_chunk]\n        else {\n            unreachable!(\"node_neighbor_chunk should have been populated by this test\");\n        };\n        assert_eq!(\n            node_neighbor_voxels.get(CoordsWithMargins([0, 8, 10]).to_index(12)),\n            Material::WoodPlanks\n        );\n\n        // Check the margins of vertex_neighbor_chunk\n        let Chunk::Populated {\n            voxels: vertex_neighbor_voxels,\n            ..\n        } = &graph[vertex_neighbor_chunk]\n        else {\n            unreachable!(\"vertex_neighbor_chunk should have been populated by this test\");\n        };\n        assert_eq!(\n            vertex_neighbor_voxels.get(CoordsWithMargins([6, 10, 13]).to_index(12)),\n            Material::Grass\n        );\n    }\n}\n"
  },
  {
    "path": "common/src/math.rs",
    "content": "//! This module defines the a vector and matrix type for use in Minkowski space,\n//! allowing natural operations to be performed in hyperbolic space. To better\n//! understand the math involved, it is recommended to read \"Visualizing\n//! Hyperbolic Space: Unusual Uses of 4x4 Matrices.\" Phillips, Gunn.\n//!\n//! This module also defines a few other free helper functions that do not\n//! directly relate to hyperbolic space.\n\nuse na::{RealField, Scalar};\nuse serde::{Deserialize, Serialize};\nuse simba::scalar::SupersetOf;\n\n/// A stack-allocated 4-dimensional column-vector in Minkowski space. Such\n/// vectors are useful for computations in the hyperboloid model of hyperbolic\n/// space. Note that the last coordinate, not the first coordinate, is treated\n/// as the special \"time\" coordinate.\n///\n/// This vector type is versatile, being able to represent multiple things in\n/// Hyperbolic space. What it can represent is generally determined by the\n/// Minkowski inner product between the vector and itself.\n/// - If it's negative, it represents a point in hyperbolic space. The origin is\n///   represented with the unit w-vector.\n/// - If it's zero, it represents an _ideal_ point in hyperbolic space. Such a\n///   point can be associated with horospheres\n/// - If it's positive, it represents an _ultraideal_ point in hyperbolic space.\n///   Such points can be treated as oriented planes.\n///\n/// If the absolute value of this Minkowski inner product is 1, it is\n/// normalized, and equations involving such a vector tend to be simpler, much\n/// like with unit vectors. Two types, `MPoint` and `MDirection`, are available\n/// to facilitate the use of such vectors (for Minkowski inner product -1 and 1,\n/// respectively).\n///\n/// Note that the simplest way to represent directions/velocities/normals at a\n/// point in hyperbolic space is with a vector whose Minkowski inner product\n/// with that point is 0. Such a vector will be tangent to the hyperboloid model\n/// at that associated point, so it can naturally represent movement along the\n/// hyperboloid in that direction.\n///\n/// As a general rule, when working with such vectors, it is highly recommended\n/// to avoid dot products and related operations such as vector magnitude, as\n/// these operations are meaningless in Minkowski space and are not preserved by\n/// isometries.\n#[derive(Debug, Copy, Clone, Serialize, Deserialize, PartialEq)]\n#[repr(transparent)]\npub struct MVector<N: Scalar>(na::Vector4<N>);\n\nimpl<N: RealField + Copy> MVector<N> {\n    /// Normalizes the vector so that the Minkowski inner product between the\n    /// vector and itself is -1. It should be called on vectors with a negative\n    /// self-mip, generally representing points.\n    ///\n    /// Note that this function is numerically unstable for vectors representing\n    /// points far from the origin, so it is recommended to avoid this function\n    /// for such vectors.\n    pub fn normalized_point(&self) -> MPoint<N> {\n        let scale_factor_squared = -self.mip(self);\n        if scale_factor_squared <= na::zero() {\n            debug_assert!(\n                false,\n                \"Tried to normalize a non-point-like vector as a point.\"\n            );\n            return MPoint::origin();\n        }\n        let scale_factor = scale_factor_squared.sqrt();\n        MPoint(*self / scale_factor)\n    }\n\n    /// Normalizes the vector so that the Minkowski inner product between the\n    /// vector and itself is 1. It should be called on vectors with a positive\n    /// self-mip, generally representing directions.\n    ///\n    /// Note that this function is numerically unstable for vectors representing\n    /// directions from points far from the origin, so it is recommended to\n    /// avoid this function for such vectors.\n    pub fn normalized_direction(&self) -> MDirection<N> {\n        let scale_factor_squared = self.mip(self);\n        if scale_factor_squared <= na::zero() {\n            debug_assert!(\n                false,\n                \"Tried to normalize a non-direction-like vector as a direction.\"\n            );\n            return MDirection::x();\n        }\n        let scale_factor = scale_factor_squared.sqrt();\n        MDirection(*self / scale_factor)\n    }\n\n    /// Minkowski inner product, aka `<a, b>_h`. This is much like the dot\n    /// product, but the product of the w-components is negated. This is the\n    /// main operation that distinguishes Minkowski space from Euclidean\n    /// 4-space.\n    pub fn mip(&self, other: &impl AsRef<MVector<N>>) -> N {\n        let other = other.as_ref();\n        self.x * other.x + self.y * other.y + self.z * other.z - self.w * other.w\n    }\n\n    /// The Minkowski-space equivalent of the outer product of two vectors. This\n    /// produces a rank-one matrix that is a useful intermediate result when\n    /// computing other matrices, such as reflection or translation matrices.\n    fn minkowski_outer_product(self, other: &Self) -> na::Matrix4<N> {\n        self.0 * na::RowVector4::new(other.x, other.y, other.z, -other.w)\n    }\n\n    /// Cast the components of `self` to another type.\n    #[inline]\n    pub fn cast<N2: RealField + Copy + SupersetOf<N>>(self) -> MVector<N2> {\n        MVector(self.0.cast())\n    }\n\n    /// The column vector with components `[0, 0, 0, 0]`.\n    #[inline]\n    pub fn zero() -> Self {\n        Self(na::zero())\n    }\n\n    /// The vector representing the origin in hyperbolic space. Alias for `MVector::w()`.\n    #[inline]\n    pub fn origin() -> Self {\n        Self::w()\n    }\n\n    /// The column vector with components `[1, 0, 0, 0]`.\n    #[inline]\n    pub fn x() -> Self {\n        Self(na::Vector4::x())\n    }\n\n    /// The column vector with components `[0, 1, 0, 0]`.\n    #[inline]\n    pub fn y() -> Self {\n        Self(na::Vector4::y())\n    }\n\n    /// The column vector with components `[0, 0, 1, 0]`.\n    #[inline]\n    pub fn z() -> Self {\n        Self(na::Vector4::z())\n    }\n\n    /// The column vector with components `[0, 0, 0, 1]`.\n    #[inline]\n    pub fn w() -> Self {\n        Self(na::Vector4::w())\n    }\n\n    /// Creates an `MVector` with the given components.\n    #[inline]\n    pub fn new(x: N, y: N, z: N, w: N) -> Self {\n        MVector(na::Vector4::new(x, y, z, w))\n    }\n\n    /// The first three coordinates of the vector. When working with an\n    /// `MVector` representing a velocity/direction from the origin, the\n    /// w-coordinate should always be 0, so using this function to extract a 3D\n    /// vector can help make that assumption more explicit.\n    #[inline]\n    pub fn xyz(self) -> na::Vector3<N> {\n        self.0.xyz()\n    }\n}\n\nimpl<N: Scalar> std::ops::Index<usize> for MVector<N> {\n    type Output = N;\n    #[inline]\n    fn index(&self, i: usize) -> &Self::Output {\n        &self.0[i]\n    }\n}\n\nimpl<N: Scalar> std::ops::IndexMut<usize> for MVector<N> {\n    #[inline]\n    fn index_mut(&mut self, i: usize) -> &mut Self::Output {\n        &mut self.0[i]\n    }\n}\n\nimpl<N: Scalar> From<na::Vector4<N>> for MVector<N> {\n    /// Reinterprets the input as a vector in Minkowski space.\n    fn from(value: na::Vector4<N>) -> Self {\n        Self(value)\n    }\n}\n\nimpl<N: Scalar> From<MVector<N>> for na::Vector4<N> {\n    /// Unwraps the underlying vector. This effectively reinterprets the vector\n    /// as a vector in Euclidean 4-space, or, if interpreted as homogeneous\n    /// coordinates, a point within the 3D Beltrami-Klein model (as long as it's\n    /// inside the unit ball).\n    fn from(value: MVector<N>) -> na::Vector4<N> {\n        value.0\n    }\n}\n\nimpl<N: Scalar> std::ops::Deref for MVector<N> {\n    type Target = na::coordinates::XYZW<N>;\n    #[inline]\n    fn deref(&self) -> &Self::Target {\n        self.0.deref()\n    }\n}\n\nimpl<N: Scalar> std::ops::DerefMut for MVector<N> {\n    #[inline]\n    fn deref_mut(&mut self) -> &mut Self::Target {\n        self.0.deref_mut()\n    }\n}\n\nimpl<N: Scalar> AsRef<MVector<N>> for MVector<N> {\n    #[inline]\n    fn as_ref(&self) -> &MVector<N> {\n        self\n    }\n}\n\nimpl<N: RealField> std::ops::Add<MVector<N>> for MVector<N> {\n    type Output = MVector<N>;\n    #[inline]\n    fn add(self, other: MVector<N>) -> Self::Output {\n        MVector(self.0 + other.0)\n    }\n}\n\nimpl<N: RealField> std::ops::Add<&MVector<N>> for MVector<N> {\n    type Output = MVector<N>;\n    #[inline]\n    fn add(self, other: &MVector<N>) -> Self::Output {\n        MVector(self.0 + &other.0)\n    }\n}\n\nimpl<N: RealField> std::ops::Add<MVector<N>> for &MVector<N> {\n    type Output = MVector<N>;\n    #[inline]\n    fn add(self, other: MVector<N>) -> Self::Output {\n        MVector(&self.0 + other.0)\n    }\n}\n\nimpl<N: RealField> std::ops::Add<&MVector<N>> for &MVector<N> {\n    type Output = MVector<N>;\n    #[inline]\n    fn add(self, other: &MVector<N>) -> Self::Output {\n        MVector(&self.0 + &other.0)\n    }\n}\n\nimpl<N: RealField> std::ops::AddAssign<MVector<N>> for MVector<N> {\n    #[inline]\n    fn add_assign(&mut self, other: MVector<N>) {\n        self.0 += other.0;\n    }\n}\n\nimpl<N: RealField> std::ops::AddAssign<&MVector<N>> for MVector<N> {\n    #[inline]\n    fn add_assign(&mut self, other: &MVector<N>) {\n        self.0 += &other.0;\n    }\n}\n\nimpl<N: RealField> std::ops::Sub<MVector<N>> for MVector<N> {\n    type Output = MVector<N>;\n    #[inline]\n    fn sub(self, other: MVector<N>) -> Self::Output {\n        MVector(self.0 - other.0)\n    }\n}\n\nimpl<N: RealField> std::ops::Sub<&MVector<N>> for MVector<N> {\n    type Output = MVector<N>;\n    #[inline]\n    fn sub(self, other: &MVector<N>) -> Self::Output {\n        MVector(self.0 - &other.0)\n    }\n}\n\nimpl<N: RealField> std::ops::Sub<MVector<N>> for &MVector<N> {\n    type Output = MVector<N>;\n    #[inline]\n    fn sub(self, other: MVector<N>) -> Self::Output {\n        MVector(&self.0 - other.0)\n    }\n}\n\nimpl<N: RealField> std::ops::Sub<&MVector<N>> for &MVector<N> {\n    type Output = MVector<N>;\n    #[inline]\n    fn sub(self, other: &MVector<N>) -> Self::Output {\n        MVector(&self.0 - &other.0)\n    }\n}\n\nimpl<N: RealField> std::ops::SubAssign<MVector<N>> for MVector<N> {\n    #[inline]\n    fn sub_assign(&mut self, other: MVector<N>) {\n        self.0 -= other.0;\n    }\n}\n\nimpl<N: RealField> std::ops::SubAssign<&MVector<N>> for MVector<N> {\n    #[inline]\n    fn sub_assign(&mut self, other: &MVector<N>) {\n        self.0 -= &other.0;\n    }\n}\n\nimpl<N: RealField> std::ops::Neg for MVector<N> {\n    type Output = MVector<N>;\n    #[inline]\n    fn neg(self) -> Self::Output {\n        MVector(-self.0)\n    }\n}\n\nimpl<N: RealField> std::ops::Neg for &MVector<N> {\n    type Output = MVector<N>;\n    #[inline]\n    fn neg(self) -> Self::Output {\n        MVector(-&self.0)\n    }\n}\n\nimpl<N: RealField> std::ops::Mul<N> for MVector<N> {\n    type Output = MVector<N>;\n    #[inline]\n    fn mul(self, rhs: N) -> Self::Output {\n        MVector(self.0 * rhs)\n    }\n}\n\nimpl<N: RealField> std::ops::Mul<N> for &MVector<N> {\n    type Output = MVector<N>;\n    #[inline]\n    fn mul(self, rhs: N) -> Self::Output {\n        MVector(&self.0 * rhs)\n    }\n}\n\nimpl<N: RealField> std::ops::MulAssign<N> for MVector<N> {\n    #[inline]\n    fn mul_assign(&mut self, rhs: N) {\n        self.0 *= rhs;\n    }\n}\n\nimpl<N: RealField> std::ops::Div<N> for MVector<N> {\n    type Output = MVector<N>;\n    #[inline]\n    fn div(self, rhs: N) -> Self::Output {\n        MVector(self.0 / rhs)\n    }\n}\n\nimpl<N: RealField> std::ops::Div<N> for &MVector<N> {\n    type Output = MVector<N>;\n    #[inline]\n    fn div(self, rhs: N) -> Self::Output {\n        MVector(&self.0 / rhs)\n    }\n}\n\nimpl<N: RealField> std::ops::DivAssign<N> for MVector<N> {\n    #[inline]\n    fn div_assign(&mut self, rhs: N) {\n        self.0 /= rhs;\n    }\n}\n\n/// An `MVector` with the constraint that the Minkowski inner product between\n/// the vector and itself is -1. Such a vector can be used to represent a point\n/// in hyperbolic space.\n#[derive(Debug, Copy, Clone, Serialize, Deserialize, PartialEq)]\n#[repr(transparent)]\npub struct MPoint<N: Scalar>(MVector<N>);\n\nimpl<N: RealField + Copy> MPoint<N> {\n    /// Returns the midpoint between this vector and the given vector.\n    pub fn midpoint(&self, other: &Self) -> MPoint<N> {\n        // The midpoint in the hyperboloid model is simply the midpoint in the\n        // underlying Euclidean 4-space normalized to land on the hyperboloid.\n        (self.as_ref() + other.as_ref()).normalized_point()\n    }\n\n    /// Returns the distance between the this vector and the given vector.\n    pub fn distance(&self, other: &Self) -> N {\n        // The absolute value of the mip between two normalized point-like is\n        // the cosh of their distance in hyperbolic space. This is analogous to\n        // the fact that the dot product between two unit vectors is the cos of\n        // their angle (or distance in spherical geometry).\n        (-self.mip(other)).acosh()\n    }\n\n    /// Minkowski inner product, aka `<a, b>_h`. This is much like the dot\n    /// product, but the product of the w-components is negated. This is the\n    /// main operation that distinguishes Minkowski space from Euclidean\n    /// 4-space.\n    #[inline]\n    pub fn mip(&self, other: &impl AsRef<MVector<N>>) -> N {\n        self.as_ref().mip(other)\n    }\n\n    /// The vector representing the origin in hyperbolic space. Alias for `MVector::w()`.\n    #[inline]\n    pub fn origin() -> Self {\n        Self::w()\n    }\n\n    /// The column vector with components `[0, 0, 0, 1]`.\n    #[inline]\n    pub fn w() -> Self {\n        Self(MVector::w())\n    }\n\n    /// Creates an `MPoint` with the given components. It is the caller's\n    /// responsibility to ensure that the `MPoint` invariant holds.\n    #[inline]\n    pub fn new_unchecked(x: N, y: N, z: N, w: N) -> Self {\n        Self(MVector::new(x, y, z, w))\n    }\n\n    /// Cast the components of `self` to another type.\n    #[inline]\n    pub fn cast<N2: RealField + Copy + SupersetOf<N>>(self) -> MPoint<N2> {\n        MPoint(self.0.cast())\n    }\n}\n\nimpl<N: Scalar> From<MPoint<N>> for MVector<N> {\n    /// Removes the constraint that makes the argument an `MPoint`\n    fn from(value: MPoint<N>) -> MVector<N> {\n        value.0\n    }\n}\n\nimpl<N: Scalar> From<MPoint<N>> for na::Vector4<N> {\n    /// Unwraps the underlying vector. This effectively reinterprets the vector\n    /// as a vector in Euclidean 4-space, or, if interpreted as homogeneous\n    /// coordinates, a point within the 3D Beltrami-Klein model (as long as it's\n    /// inside the unit ball).\n    fn from(value: MPoint<N>) -> na::Vector4<N> {\n        value.0.0\n    }\n}\n\nimpl<N: Scalar> std::ops::Deref for MPoint<N> {\n    type Target = na::coordinates::XYZW<N>;\n    #[inline]\n    fn deref(&self) -> &Self::Target {\n        self.0.deref()\n    }\n}\n\nimpl<N: Scalar> AsRef<MVector<N>> for MPoint<N> {\n    /// Unwraps the `MPoint` into its underlying `MVector`\n    #[inline]\n    fn as_ref(&self) -> &MVector<N> {\n        &self.0\n    }\n}\n\n/// An `MVector` with the constraint that the Minkowski inner product between\n/// the vector and itself is 1. Such a vector can be used to represent a\n/// direction in hyperbolic space.\n#[derive(Debug, Copy, Clone, Serialize, Deserialize, PartialEq)]\n#[repr(transparent)]\npub struct MDirection<N: Scalar>(MVector<N>);\n\nimpl<N: RealField + Copy> MDirection<N> {\n    /// The column vector with components `[1, 0, 0, 0]`.\n    #[inline]\n    pub fn x() -> Self {\n        Self(MVector::x())\n    }\n\n    /// The column vector with components `[0, 1, 0, 0]`.\n    #[inline]\n    pub fn y() -> Self {\n        Self(MVector::y())\n    }\n\n    /// The column vector with components `[0, 0, 1, 0]`.\n    #[inline]\n    pub fn z() -> Self {\n        Self(MVector::z())\n    }\n\n    /// Minkowski inner product, aka `<a, b>_h`. This is much like the dot\n    /// product, but the product of the w-components is negated. This is the\n    /// main operation that distinguishes Minkowski space from Euclidean\n    /// 4-space.\n    #[inline]\n    pub fn mip(&self, other: &impl AsRef<MVector<N>>) -> N {\n        self.as_ref().mip(other)\n    }\n\n    /// Creates an `MDirection with the given components. It is the caller's\n    /// responsibility to ensure that the `MDirection` invariant holds.\n    #[inline]\n    pub fn new_unchecked(x: N, y: N, z: N, w: N) -> Self {\n        Self(MVector::new(x, y, z, w))\n    }\n\n    /// Cast the components of `self` to another type.\n    #[inline]\n    pub fn cast<N2: RealField + Copy + SupersetOf<N>>(self) -> MDirection<N2> {\n        MDirection(self.0.cast())\n    }\n}\n\nimpl<N: Scalar> From<MDirection<N>> for na::Vector4<N> {\n    /// Unwraps the underlying vector. This effectively reinterprets the vector\n    /// as a vector in Euclidean 4-space, or, if interpreted as homogeneous\n    /// coordinates, a point within the 3D Beltrami-Klein model (as long as it's\n    /// inside the unit ball).\n    fn from(value: MDirection<N>) -> na::Vector4<N> {\n        value.0.0\n    }\n}\n\nimpl<N: Scalar> From<MDirection<N>> for MVector<N> {\n    /// Removes the constraint that makes the argument an `MDirection`\n    fn from(value: MDirection<N>) -> MVector<N> {\n        value.0\n    }\n}\n\nimpl<N: RealField + Copy> From<na::UnitVector3<N>> for MDirection<N> {\n    /// Reinterprets the input as a vector in Minkowski space.\n    fn from(value: na::UnitVector3<N>) -> Self {\n        MDirection(MVector(value.to_homogeneous()))\n    }\n}\n\nimpl<N: Scalar> std::ops::Deref for MDirection<N> {\n    type Target = na::coordinates::XYZW<N>;\n    #[inline]\n    fn deref(&self) -> &Self::Target {\n        self.0.deref()\n    }\n}\n\nimpl<N: Scalar> AsRef<MVector<N>> for MDirection<N> {\n    /// Unwraps the `MPoint` into its underlying `MVector`\n    #[inline]\n    fn as_ref(&self) -> &MVector<N> {\n        &self.0\n    }\n}\n\nimpl<N: RealField> std::ops::Neg for MDirection<N> {\n    type Output = MDirection<N>;\n    #[inline]\n    fn neg(self) -> Self::Output {\n        MDirection(-self.0)\n    }\n}\n\nimpl<N: RealField> std::ops::Neg for &MDirection<N> {\n    type Output = MDirection<N>;\n    #[inline]\n    fn neg(self) -> Self::Output {\n        MDirection(-&self.0)\n    }\n}\n\n/// A stack-allocated, column-major, 4x4 square matrix in Minkowski space that\n/// preserves the Minkowski inner product. Such matrices are useful for\n/// computations in the hyperboloid model of hyperbolic space. Note that the\n/// last coordinate, not the first coordinate, is treated as the special \"time\"\n/// coordinate.\n///\n/// To ensure that this matrix indeed represents an isometry in Minkowski space,\n/// a few invariants are preserved:\n/// - The Minkowski inner product between any two distinct columns is 0.\n/// - The Minkowski inner product of a column with itself is 1 for the first\n///   three columns, and -1 for the last column.\n#[derive(Debug, Copy, Clone, Serialize, Deserialize, PartialEq)]\n#[repr(transparent)]\npub struct MIsometry<N: Scalar>(na::Matrix4<N>);\n\nimpl<N: RealField + Copy> MIsometry<N> {\n    /// Returns a view containing the i-th row of this matrix.\n    #[inline]\n    pub fn row(&self, i: usize) -> na::MatrixView1x4<'_, N, na::U1, na::U4> {\n        self.0.row(i)\n    }\n\n    /// Creates an identity matrix.\n    #[inline]\n    pub fn identity() -> Self {\n        Self(na::Matrix4::identity())\n    }\n\n    /// The reflection about the hyperbolic plane represented by the given\n    /// vector.\n    pub fn reflection(normal: &MDirection<N>) -> Self {\n        // The formula below is the equivalent of the formula for the\n        // Householder matrix, but the minkowski outer product instead of the\n        // standard outer product to ensure that the reflection is done in\n        // Minkowski space. The resulting formula is\n        // `I - 2vv*`\n        Self(\n            na::Matrix4::<N>::identity()\n                - normal.as_ref().minkowski_outer_product(normal.as_ref())\n                    * na::convert::<_, N>(2.0),\n        )\n    }\n\n    /// The matrix that translates `a` to `b`.\n    pub fn translation(a: &MPoint<N>, b: &MPoint<N>) -> MIsometry<N> {\n        // A translation in hyperbolic space can be split into two\n        // point-reflections (reflections about a point, where the midpoint of\n        // the start and end points is that point)\n        // - A reflection about the point `a`\n        // - A reflection about the midpoint between `a` and `b`\n        // One can convince oneself of this by seeing where `a` goes and noting\n        // that no points will leave the line between `a` and `b`.\n\n        // The following notes below will use \"*\" as the adjoint operator,\n        // working in Minkowski space. All multiplication will be implied\n        // multiplication to avoid ambiguity with this operator. The matrix for\n        // a point-reflection can be derived in a similar manner to a\n        // Householder matrix and ends up being the same with a negated term:\n        // `-I - 2vv*`\n        // The midpoint of `a` and `b` is `a+b` normalized:\n        // `(a+b) / sqrt(-(a+b)*(a+b))`\n        // which simplifies to\n        // `(a+b) / sqrt(-(a*a + a*b + b*a + b*b))`\n        // `(a+b) / sqrt(-(a*a + 2a*b + b*b))`\n        // `(a+b) / sqrt(-(-1 + 2a*b + -1))`\n        // `(a+b) / sqrt(2-2a*b)`\n\n        // Therefore, the derivation of the translation formula is as follows:\n        // `reflect_about((a+b) / -(a*b)) * reflect_about(a)`\n        // `(-I - 2((a+b) / sqrt(2-2a*b))((a+b) / sqrt(2-2a*b))*) (-I - 2aa*)`\n        // `(-I - 2(a+b)(a+b)*/(2-2a*b)) (-I - 2aa*)`\n        // `(-I - (a+b)(a+b)*/(1-a*b)) (-I - 2aa*)`\n        // `I + (a+b)(a+b)*/(1-a*b) + 2aa* + 2(a+b)(a+b)*aa*/(1-a*b)`\n        // Using `(1-a*b) = (-a*a-a*b) = -a*(a+b) = -(a+b)*a`\n        // `I + (a+b)(a+b)*/(1-a*b) + 2aa* - 2(a+b)((a+b)*a)a*/((a+b)*a)`\n        // `I + (a+b)(a+b)*/(1-a*b) + 2aa* - 2(a+b)a*`\n        // `I + (a+b)(a+b)*/(1-a*b) + 2aa* - 2aa* - 2ba*`\n        // `I - 2ba* + (a+b)(a+b)*/(1-a*b)`\n        let a_plus_b = a.as_ref() + b.as_ref();\n        Self(\n            na::Matrix4::<N>::identity()\n                - b.as_ref().minkowski_outer_product(a.as_ref()) * na::convert::<_, N>(2.0)\n                + a_plus_b.minkowski_outer_product(&a_plus_b) / (N::one() - a.mip(b)),\n        )\n    }\n\n    /// The matrix that translates the origin in the direction of the given\n    /// vector with distance equal to its magnitude\n    pub fn translation_along(v: &na::Vector3<N>) -> MIsometry<N> {\n        // Translating x units along the x-axis takes the origin to `[sinh(x), 0, 0, cosh(x)]`.\n        // This is analogous to a rotation of `[0, 0, 1, 1]` along the xw-plane being `[sin(theta), 0, 0, cos(theta)]`.\n\n        // To find a general translation given this principle, we know that the\n        // origin moves to a location fitting the following constraints:\n        // - The first three coordinates must be in the same direction as `v`\n        // - The magnitude of the vector representing the first three coordinates is `sinh(||v||)`\n        // - The fourth component is `cosh(||v||)`\n\n        // Once we know where the origin goes, we use the `MIsometry::translation` function.\n        let norm = v.norm();\n        if norm == na::zero() {\n            return MIsometry::identity();\n        }\n        // `sinhc(x)` simply means `sinh(x)/x` but defined when `x` is 0. Using sinhc combines\n        // the normalization of `v` with its multiplication by `sinh(||v||)`.\n        MIsometry::translation(\n            &MPoint::origin(),\n            &MPoint(MVector((v * norm.sinhc()).insert_row(3, norm.cosh()))),\n        )\n    }\n\n    /// Creates an `MIsometry` with the given columns. It is the caller's\n    /// responsibility to ensure that the resulting matrix is a valid isometry\n    /// by ensuring that columns are mutually orthogonal.\n    #[inline]\n    pub fn from_columns_unchecked(\n        direction_columns: &[MDirection<N>; 3],\n        point_column: MPoint<N>,\n    ) -> Self {\n        Self(na::Matrix4::from_columns(&[\n            direction_columns[0].0.0,\n            direction_columns[1].0.0,\n            direction_columns[2].0.0,\n            point_column.0.0,\n        ]))\n    }\n\n    /// Creates an `MIsometry` with its elements filled with the components\n    /// provided by a slice in column-major order. It is the caller's\n    /// responsibility to ensure that the resulting matrix is a valid isometry.\n    #[inline]\n    pub fn from_column_slice_unchecked(data: &[N]) -> Self {\n        Self(na::Matrix4::from_column_slice(data))\n    }\n\n    /// Inverts the matrix. Note that this is an efficient operation because the\n    /// matrix is an isometry in Minkowski space. The operation actually\n    /// performed resembles a matrix transpose, but with some terms negated.\n    ///\n    /// Mathematically, this operation performed is the Hermitian adjoint, where\n    /// the inner product used is the Minkowski inner product.\n    #[rustfmt::skip]\n    pub fn inverse(&self) -> Self {\n        MIsometry(\n            na::Matrix4::new(\n                self.0.m11,  self.0.m21,  self.0.m31, -self.0.m41,\n                self.0.m12,  self.0.m22,  self.0.m32, -self.0.m42,\n                self.0.m13,  self.0.m23,  self.0.m33, -self.0.m43,\n                -self.0.m14, -self.0.m24, -self.0.m34,  self.0.m44,\n            )\n        )\n    }\n\n    /// Whether an isometry reverses winding with respect to the norm\n    pub fn parity(&self) -> bool {\n        self.0.fixed_view::<3, 3>(0, 0).determinant() < na::zero::<N>()\n    }\n\n    /// Corrects for any drift that may have occurred in the matrix entries due\n    /// to rounding that would violate the isometry constraints of the matrix.\n    /// If many operations are performed on a single matrix, it is represented\n    /// to call this function to correct for this drift.\n    ///\n    /// Note that this function is numerically unstable for transformations that\n    /// have a large translation component, so it is recommended to avoid this\n    /// function for such matrices.\n    pub fn renormalized(&self) -> MIsometry<N> {\n        // There are multiple ways this matrix can be renormalized. This\n        // approach splits the translation and orientation components of the\n        // hyperbolic isometry, renormalized them both, and recombines them.\n\n        // Since the last column of the matrix is where the origin gets\n        // translated, we extract the normalized translation component by\n        // recreating a hyperbolic translation matrix using that column.\n        let normalized_translation_component = MIsometry::translation(\n            &MPoint::origin(),\n            &MVector(self.0.column(3).into()).normalized_point(),\n        );\n\n        // Once we have the translation component, we use that component's\n        // inverse to remove the translation from the original matrix to extract\n        // the orientation component.\n        let orientation_component = normalized_translation_component.inverse() * self;\n\n        // Then, we use the QR decomposition to convert the orientation\n        // component into an orthogonal matrix, which renormalizes it.\n        let normalized_orientation_component = MIsometry(\n            na::QR::new(\n                (orientation_component.0)\n                    .fixed_view::<3, 3>(0, 0)\n                    .clone_owned(),\n            )\n            .q()\n            .to_homogeneous(),\n        );\n\n        // Finally, we recombine the newly-renormalized translation and\n        // orientation components.\n        normalized_translation_component * normalized_orientation_component\n    }\n\n    /// Cast the components of `self` to another type.\n    #[inline]\n    pub fn cast<N2: RealField + Copy + SupersetOf<N>>(self) -> MIsometry<N2> {\n        MIsometry(self.0.cast())\n    }\n}\n\nimpl<N: Scalar> std::ops::Index<(usize, usize)> for MIsometry<N> {\n    type Output = N;\n    #[inline]\n    fn index(&self, ij: (usize, usize)) -> &Self::Output {\n        &self.0[ij]\n    }\n}\n\nimpl<N: RealField + Copy> From<na::UnitQuaternion<N>> for MIsometry<N> {\n    /// Converts a quaternion into the matrix for the rotation it represents.\n    fn from(value: na::UnitQuaternion<N>) -> Self {\n        MIsometry(value.to_homogeneous())\n    }\n}\n\nimpl<N: RealField + Copy> From<na::Rotation3<N>> for MIsometry<N> {\n    /// Converts a rotation into the matrix representing that rotation.\n    fn from(value: na::Rotation3<N>) -> Self {\n        MIsometry(value.to_homogeneous())\n    }\n}\n\nimpl<N: Scalar> From<MIsometry<N>> for na::Matrix4<N> {\n    /// Unwraps the underlying matrix. This effectively reinterprets the matrix\n    /// as a matrix in Euclidean 4-space, or, if interpreted as homogeneous\n    /// coordinates, a transformation within the 3D Beltrami-Klein model.\n    fn from(value: MIsometry<N>) -> na::Matrix4<N> {\n        value.0\n    }\n}\n\nimpl<N: Scalar> std::ops::Deref for MIsometry<N> {\n    type Target = na::coordinates::M4x4<N>;\n    #[inline]\n    fn deref(&self) -> &Self::Target {\n        self.0.deref()\n    }\n}\n\nimpl<N: RealField> AsRef<[[N; 4]; 4]> for MIsometry<N> {\n    #[inline]\n    fn as_ref(&self) -> &[[N; 4]; 4] {\n        self.0.as_ref()\n    }\n}\n\nimpl<N: RealField> std::ops::Mul<MIsometry<N>> for MIsometry<N> {\n    type Output = MIsometry<N>;\n    #[inline]\n    fn mul(self, rhs: MIsometry<N>) -> Self::Output {\n        MIsometry(self.0 * rhs.0)\n    }\n}\n\nimpl<N: RealField> std::ops::Mul<&MIsometry<N>> for MIsometry<N> {\n    type Output = MIsometry<N>;\n    #[inline]\n    fn mul(self, rhs: &MIsometry<N>) -> Self::Output {\n        MIsometry(self.0 * &rhs.0)\n    }\n}\n\nimpl<N: RealField> std::ops::Mul<MIsometry<N>> for &MIsometry<N> {\n    type Output = MIsometry<N>;\n    #[inline]\n    fn mul(self, rhs: MIsometry<N>) -> Self::Output {\n        MIsometry(&self.0 * rhs.0)\n    }\n}\n\nimpl<N: RealField> std::ops::Mul<&MIsometry<N>> for &MIsometry<N> {\n    type Output = MIsometry<N>;\n    #[inline]\n    fn mul(self, rhs: &MIsometry<N>) -> Self::Output {\n        MIsometry(&self.0 * &rhs.0)\n    }\n}\n\nimpl<N: RealField> std::ops::MulAssign<MIsometry<N>> for MIsometry<N> {\n    #[inline]\n    fn mul_assign(&mut self, rhs: MIsometry<N>) {\n        self.0 *= rhs.0;\n    }\n}\n\nimpl<N: RealField> std::ops::MulAssign<&MIsometry<N>> for MIsometry<N> {\n    #[inline]\n    fn mul_assign(&mut self, rhs: &MIsometry<N>) {\n        self.0 *= &rhs.0;\n    }\n}\n\nimpl<N: RealField> std::ops::Mul<MVector<N>> for MIsometry<N> {\n    type Output = MVector<N>;\n    #[inline]\n    fn mul(self, rhs: MVector<N>) -> Self::Output {\n        MVector(self.0 * rhs.0)\n    }\n}\n\nimpl<N: RealField> std::ops::Mul<&MVector<N>> for MIsometry<N> {\n    type Output = MVector<N>;\n    #[inline]\n    fn mul(self, rhs: &MVector<N>) -> Self::Output {\n        MVector(self.0 * &rhs.0)\n    }\n}\n\nimpl<N: RealField> std::ops::Mul<MVector<N>> for &MIsometry<N> {\n    type Output = MVector<N>;\n    #[inline]\n    fn mul(self, rhs: MVector<N>) -> Self::Output {\n        MVector(&self.0 * rhs.0)\n    }\n}\n\nimpl<N: RealField> std::ops::Mul<&MVector<N>> for &MIsometry<N> {\n    type Output = MVector<N>;\n    #[inline]\n    fn mul(self, rhs: &MVector<N>) -> Self::Output {\n        MVector(&self.0 * &rhs.0)\n    }\n}\n\nimpl<N: RealField> std::ops::Mul<MPoint<N>> for MIsometry<N> {\n    type Output = MPoint<N>;\n    #[inline]\n    fn mul(self, rhs: MPoint<N>) -> Self::Output {\n        MPoint(self * rhs.0)\n    }\n}\n\nimpl<N: RealField> std::ops::Mul<&MPoint<N>> for MIsometry<N> {\n    type Output = MPoint<N>;\n    #[inline]\n    fn mul(self, rhs: &MPoint<N>) -> Self::Output {\n        MPoint(self * &rhs.0)\n    }\n}\n\nimpl<N: RealField> std::ops::Mul<MPoint<N>> for &MIsometry<N> {\n    type Output = MPoint<N>;\n    #[inline]\n    fn mul(self, rhs: MPoint<N>) -> Self::Output {\n        MPoint(self * rhs.0)\n    }\n}\n\nimpl<N: RealField> std::ops::Mul<&MPoint<N>> for &MIsometry<N> {\n    type Output = MPoint<N>;\n    #[inline]\n    fn mul(self, rhs: &MPoint<N>) -> Self::Output {\n        MPoint(self * &rhs.0)\n    }\n}\n\nimpl<N: RealField> std::ops::Mul<MDirection<N>> for MIsometry<N> {\n    type Output = MDirection<N>;\n    #[inline]\n    fn mul(self, rhs: MDirection<N>) -> Self::Output {\n        MDirection(self * rhs.0)\n    }\n}\n\nimpl<N: RealField> std::ops::Mul<&MDirection<N>> for MIsometry<N> {\n    type Output = MDirection<N>;\n    #[inline]\n    fn mul(self, rhs: &MDirection<N>) -> Self::Output {\n        MDirection(self * &rhs.0)\n    }\n}\n\nimpl<N: RealField> std::ops::Mul<MDirection<N>> for &MIsometry<N> {\n    type Output = MDirection<N>;\n    #[inline]\n    fn mul(self, rhs: MDirection<N>) -> Self::Output {\n        MDirection(self * rhs.0)\n    }\n}\n\nimpl<N: RealField> std::ops::Mul<&MDirection<N>> for &MIsometry<N> {\n    type Output = MDirection<N>;\n    #[inline]\n    fn mul(self, rhs: &MDirection<N>) -> Self::Output {\n        MDirection(self * &rhs.0)\n    }\n}\n\n/// Multiplies the argument by itself.\n#[inline]\npub fn sqr<N: RealField + Copy>(x: N) -> N {\n    x * x\n}\n\n/// Updates `subject` by moving it along the line determined by `projection_direction` so that\n/// its dot product with `normal` is `distance`. This effectively projects vectors onto the plane\n/// `distance` units away from the origin with normal `normal`. The projection is non-orthogonal in\n/// general, only orthogonal when `normal` is equal to `projection_direction`.\n///\n/// Precondition: For this to be possible, `projection_direction` cannot be orthogonal to `normal`.\npub fn project_to_plane<N: RealField + Copy>(\n    subject: &mut na::Vector3<N>,\n    normal: &na::UnitVector3<N>,\n    projection_direction: &na::UnitVector3<N>,\n    distance: N,\n) {\n    *subject += projection_direction.as_ref()\n        * ((distance - subject.dot(normal)) / projection_direction.dot(normal));\n}\n\n/// Returns the UnitQuaternion that rotates the `from` vector to the `to` vector, or `None` if\n/// `from` and `to` face opposite directions such that their sum has norm less than `epsilon`.\n/// This version is more numerically stable than nalgebra's equivalent function.\npub fn rotation_between_axis<N: RealField + Copy>(\n    from: &na::UnitVector3<N>,\n    to: &na::UnitVector3<N>,\n    epsilon: N,\n) -> Option<na::UnitQuaternion<N>> {\n    let angle_bisector = na::UnitVector3::try_new(from.into_inner() + to.into_inner(), epsilon)?;\n    Some(na::UnitQuaternion::new_unchecked(\n        na::Quaternion::from_parts(from.dot(&angle_bisector), from.cross(&angle_bisector)),\n    ))\n}\n\npub trait PermuteXYZ {\n    /// Converts from t-u-v coordinates to x-y-z coordinates. t-u-v coordinates\n    /// are a permuted version of x-y-z coordinates. `t_axis` determines which\n    /// of the three x-y-z coordinates corresponds to the t-coordinate. This\n    /// function works with any indexable entity with at least three entries.\n    /// Any entry after the third entry is ignored. As an extra guarantee, this\n    /// function only performs even permutations.\n    ///\n    /// Examples:\n    /// ```\n    /// # use common::math::PermuteXYZ;\n    /// assert_eq!([2, 4, 6].tuv_to_xyz(0), [2, 4, 6]);\n    /// assert_eq!([2, 4, 6].tuv_to_xyz(1), [6, 2, 4]);\n    /// assert_eq!([2, 4, 6].tuv_to_xyz(2), [4, 6, 2]);\n    /// assert_eq!([2, 4, 6, 8].tuv_to_xyz(1), [6, 2, 4, 8]);\n    /// ```\n    fn tuv_to_xyz(self, t_axis: usize) -> Self;\n}\n\nimpl<T: std::ops::IndexMut<usize, Output = N>, N: Copy> PermuteXYZ for T {\n    fn tuv_to_xyz(mut self, t_axis: usize) -> Self {\n        (self[t_axis], self[(t_axis + 1) % 3], self[(t_axis + 2) % 3]) =\n            (self[0], self[1], self[2]);\n        self\n    }\n}\n\nimpl<N: Scalar + Copy> PermuteXYZ for MPoint<N> {\n    fn tuv_to_xyz(self, t_axis: usize) -> Self {\n        MPoint(self.0.tuv_to_xyz(t_axis))\n    }\n}\n\nimpl<N: Scalar + Copy> PermuteXYZ for MDirection<N> {\n    fn tuv_to_xyz(self, t_axis: usize) -> Self {\n        MDirection(self.0.tuv_to_xyz(t_axis))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use approx::*;\n\n    impl<N: RealField> AbsDiffEq<MIsometry<N>> for MIsometry<N> {\n        type Epsilon = N;\n\n        #[inline]\n        fn default_epsilon() -> Self::Epsilon {\n            na::Matrix4::<N>::default_epsilon()\n        }\n\n        #[inline]\n        fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool {\n            self.0.abs_diff_eq(&other.0, epsilon)\n        }\n    }\n\n    impl<N: RealField> AbsDiffEq<MVector<N>> for MVector<N> {\n        type Epsilon = N;\n\n        #[inline]\n        fn default_epsilon() -> Self::Epsilon {\n            na::Vector4::<N>::default_epsilon()\n        }\n\n        #[inline]\n        fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool {\n            self.0.abs_diff_eq(&other.0, epsilon)\n        }\n    }\n\n    #[test]\n    #[rustfmt::skip]\n    fn reflect_example() {\n        assert_abs_diff_eq!(\n            MIsometry::reflection(&MVector::new(1.0, 0.0, 0.0, 0.5).normalized_direction()),\n            MIsometry(\n                na::Matrix4::new(\n                    -1.666, 0.0, 0.0, 1.333,\n                     0.0  , 1.0, 0.0, 0.0,\n                     0.0  , 0.0, 1.0, 0.0,\n                    -1.333, 0.0, 0.0, 1.666\n                )\n            ),\n            epsilon = 1e-3\n        );\n    }\n\n    #[test]\n    #[rustfmt::skip]\n    fn translate_example() {\n        assert_abs_diff_eq!(\n            MIsometry::translation(\n                &MVector::new(-0.5, -0.5, 0.0, 1.0).normalized_point(),\n                &MVector::new(0.3, -0.7, 0.0, 1.0).normalized_point()\n            ),\n            MIsometry(\n                na::Matrix4::new(\n                    1.676, 0.814, 0.0,  1.572,\n                    -1.369, 0.636, 0.0, -1.130,\n                    0.0,   0.0,   1.0,  0.0,\n                    1.919, 0.257, 0.0,  2.179,\n                )\n            ),\n            epsilon = 1e-3\n        );\n    }\n\n    #[test]\n    fn translate_identity() {\n        let a = MVector::new(-0.5, -0.5, 0.0, 1.0).normalized_point();\n        let b = MVector::new(0.3, -0.7, 0.0, 1.0).normalized_point();\n        let o = MVector::new(0.0, 0.0, 0.0, 1.0).normalized_point();\n        assert_abs_diff_eq!(\n            MIsometry::translation(&a, &b),\n            MIsometry::translation(&o, &a)\n                * MIsometry::translation(&o, &(MIsometry::translation(&a, &o) * b))\n                * MIsometry::translation(&a, &o),\n            epsilon = 1e-5\n        );\n    }\n\n    #[test]\n    fn translate_equivalence() {\n        let a = MVector::new(-0.5, -0.5, 0.0, 1.0).normalized_point();\n        let o = MVector::new(0.0, 0.0, 0.0, 1.0).normalized_point();\n        let direction = a.0.xyz().normalize();\n        let distance = dbg!(o.distance(&a));\n        assert_abs_diff_eq!(\n            MIsometry::translation(&o, &a),\n            MIsometry::translation_along(&(direction * distance)),\n            epsilon = 1e-5\n        );\n    }\n\n    #[test]\n    fn translate_distance() {\n        let dx = 2.3;\n        let xf = MIsometry::translation_along(&(na::Vector3::x() * dx));\n        assert_abs_diff_eq!(dx, MPoint::origin().distance(&(xf * MPoint::origin())));\n    }\n\n    #[test]\n    fn distance_example() {\n        let a = MVector::new(0.2, 0.0, 0.0, 1.0).normalized_point();\n        let b = MVector::new(-0.5, -0.5, 0.0, 1.0).normalized_point();\n        // Paper doubles distances for reasons unknown\n        assert_abs_diff_eq!(a.distance(&b), 2.074 / 2.0, epsilon = 1e-3);\n    }\n\n    #[test]\n    fn distance_commutative() {\n        let p = MPoint::new_unchecked(-1.0, -1.0, 0.0, 3.0f32.sqrt());\n        let q = MPoint::new_unchecked(1.0, -1.0, 0.0, 3.0f32.sqrt());\n        assert_abs_diff_eq!(p.distance(&q), q.distance(&p));\n    }\n\n    #[test]\n    fn midpoint_distance() {\n        let p = MPoint::new_unchecked(-1.0, -1.0, 0.0, 3.0f32.sqrt());\n        let q = MPoint::new_unchecked(1.0, -1.0, 0.0, 3.0f32.sqrt());\n        let m = p.midpoint(&q);\n        assert_abs_diff_eq!(p.distance(&m), m.distance(&q), epsilon = 1e-5);\n        assert_abs_diff_eq!(p.distance(&m) * 2.0, p.distance(&q), epsilon = 1e-5);\n    }\n\n    #[test]\n    fn renormalize_translation() {\n        let mat = MIsometry::translation(\n            &MVector::new(-0.5, -0.5, 0.0, 1.0).normalized_point(),\n            &MVector::new(0.3, -0.7, 0.0, 1.0).normalized_point(),\n        );\n        assert_abs_diff_eq!(mat.renormalized(), mat, epsilon = 1e-5);\n    }\n\n    #[test]\n    #[rustfmt::skip]\n    fn renormalize_reflection() {\n        let mat = MIsometry(na::Matrix4::new(\n            -1.0, 0.0, 0.0, 0.0,\n             0.0, 1.0, 0.0, 0.0,\n             0.0, 0.0, 1.0, 0.0,\n             0.0, 0.0, 0.0, 1.0));\n        assert_abs_diff_eq!(mat.renormalized(), mat, epsilon = 1e-5);\n    }\n\n    #[test]\n    #[rustfmt::skip]\n    fn renormalize_normalizes_matrix() {\n        // Matrix chosen with random entries between -1 and 1\n        let error = MIsometry(na::Matrix4::new(\n            -0.77, -0.21,  0.57, -0.59,\n             0.49, -0.68,  0.36,  0.68,\n            -0.75, -0.54, -0.13, -0.59,\n            -0.57, -0.80,  0.00, -0.53));\n\n        // translation with some error\n        let mat = MIsometry(MIsometry::translation(\n            &MVector::new(-0.5, -0.5, 0.0, 1.0).normalized_point(),\n            &MVector::new(0.3, -0.7, 0.0, 1.0).normalized_point(),\n        ).0 + error.0 * 0.05);\n\n        let normalized_mat = mat.renormalized();\n\n        // Check that the matrix is actually normalized\n        assert_abs_diff_eq!(\n            normalized_mat.inverse() * normalized_mat,\n            MIsometry::identity(),\n            epsilon = 1e-5\n        );\n    }\n\n    #[test]\n    fn project_to_plane_example() {\n        let distance = 4.0;\n        let projection_direction: na::UnitVector3<f32> =\n            na::UnitVector3::new_normalize(na::Vector3::new(3.0, -2.0, 7.0));\n        let normal: na::UnitVector3<f32> =\n            na::UnitVector3::new_normalize(na::Vector3::new(3.0, -2.0, 7.0));\n        let mut subject = na::Vector3::new(-6.0, -3.0, 4.0);\n        project_to_plane(&mut subject, &normal, &projection_direction, distance);\n        assert_abs_diff_eq!(normal.dot(&subject), distance, epsilon = 1.0e-5);\n    }\n\n    #[test]\n    fn rotation_between_axis_example() {\n        let from = na::UnitVector3::new_normalize(na::Vector3::new(1.0, 1.0, 3.0));\n        let to = na::UnitVector3::new_normalize(na::Vector3::new(2.0, 3.0, 2.0));\n        let expected = na::UnitQuaternion::rotation_between_axis(&from, &to).unwrap();\n        let actual = rotation_between_axis(&from, &to, 1e-5).unwrap();\n        assert_abs_diff_eq!(expected, actual, epsilon = 1.0e-5);\n    }\n}\n"
  },
  {
    "path": "common/src/node.rs",
    "content": "/*the name of this module is pretty arbitrary at the moment*/\n\nuse std::ops::{Index, IndexMut};\n\nuse serde::{Deserialize, Serialize};\n\nuse crate::collision_math::Ray;\nuse crate::dodeca::Vertex;\nuse crate::graph::{Graph, NodeId};\nuse crate::proto::{BlockUpdate, Position, SerializedVoxelData};\nuse crate::voxel_math::{ChunkDirection, CoordAxis, CoordSign, Coords};\nuse crate::world::Material;\nuse crate::worldgen::{NodeState, PartialNodeState};\nuse crate::{Chunks, margins, peer_traverser};\n\n/// Unique identifier for a single chunk (1/20 of a dodecahedron) in the graph\n#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]\npub struct ChunkId {\n    pub node: NodeId,\n    pub vertex: Vertex,\n}\n\nimpl ChunkId {\n    pub fn new(node: NodeId, vertex: Vertex) -> Self {\n        ChunkId { node, vertex }\n    }\n}\n\nimpl Graph {\n    /// Returns the PartialNodeState for the given node, panicking if it isn't initialized.\n    #[inline]\n    pub fn partial_node_state(&self, node_id: NodeId) -> &PartialNodeState {\n        self[node_id].partial_state.as_ref().unwrap()\n    }\n\n    /// Initializes the PartialNodeState for the given node if not already initialized,\n    /// initializing other nodes' NodeState and PartialNodeState as necessary\n    pub fn ensure_partial_node_state(&mut self, node_id: NodeId) {\n        if self[node_id].partial_state.is_some() {\n            return;\n        }\n\n        for (_, parent) in self.parents(node_id) {\n            self.ensure_node_state(parent);\n        }\n\n        let partial_node_state = PartialNodeState::new(self, node_id);\n        self[node_id].partial_state = Some(partial_node_state);\n    }\n\n    /// Returns the NodeState for the given node, panicking if it isn't initialized.\n    #[inline]\n    pub fn node_state(&self, node_id: NodeId) -> &NodeState {\n        self[node_id].state.as_ref().unwrap()\n    }\n\n    /// Initializes the NodeState for the given node if not already initialized,\n    /// initializing other nodes' NodeState and PartialNodeState as necessary\n    pub fn ensure_node_state(&mut self, node_id: NodeId) {\n        if self[node_id].state.is_some() {\n            return;\n        }\n\n        self.ensure_partial_node_state(node_id);\n        for peer in peer_traverser::ensure_peer_nodes(self, node_id) {\n            self.ensure_partial_node_state(peer.node());\n        }\n\n        let node_state = NodeState::new(self, node_id);\n        self[node_id].state = Some(node_state);\n    }\n\n    /// Returns the up-direction relative to the given position, or `None` if the\n    /// position is in an unpopulated node.\n    pub fn get_relative_up(&self, position: &Position) -> Option<na::UnitVector3<f32>> {\n        let node = &self[position.node];\n        Some(na::UnitVector3::new_normalize(\n            (position.local.inverse() * node.state.as_ref()?.up_direction())\n                .as_ref()\n                .xyz(),\n        ))\n    }\n\n    /// Returns the ID of the chunk neighboring the given chunk on the specified\n    /// cube face side, or `None` if it's on a node the graph hasn't populated.\n    pub fn get_chunk_neighbor(\n        &self,\n        chunk: ChunkId,\n        coord_axis: CoordAxis,\n        coord_sign: CoordSign,\n    ) -> Option<ChunkId> {\n        match coord_sign {\n            CoordSign::Plus => Some(ChunkId::new(\n                chunk.node,\n                chunk.vertex.adjacent_vertices()[coord_axis as usize],\n            )),\n            CoordSign::Minus => Some(ChunkId::new(\n                self.neighbor(\n                    chunk.node,\n                    chunk.vertex.canonical_sides()[coord_axis as usize],\n                )?,\n                chunk.vertex,\n            )),\n        }\n    }\n\n    /// Returns the block (voxel) neighboring the given block on the specified\n    /// cube face side, or `None` if it's on a node the graph hasn't populated.\n    pub fn get_block_neighbor(\n        &self,\n        mut chunk: ChunkId,\n        mut coords: Coords,\n        coord_axis: CoordAxis,\n        coord_sign: CoordSign,\n    ) -> Option<(ChunkId, Coords)> {\n        if coords[coord_axis] == Coords::boundary_coord(self.layout().dimension, coord_sign) {\n            match coord_sign {\n                CoordSign::Plus => {\n                    coords = chunk.vertex.chunk_axis_permutations()[coord_axis as usize] * coords;\n                    chunk.vertex = chunk.vertex.adjacent_vertices()[coord_axis as usize];\n                }\n                CoordSign::Minus => {\n                    chunk.node = self.neighbor(\n                        chunk.node,\n                        chunk.vertex.canonical_sides()[coord_axis as usize],\n                    )?;\n                }\n            }\n        } else {\n            coords[coord_axis] = coords[coord_axis].wrapping_add_signed(coord_sign as i8);\n        }\n\n        Some((chunk, coords))\n    }\n\n    /// Populates a chunk with the given voxel data and ensures that margins are correctly fixed up.\n    pub fn populate_chunk(&mut self, chunk: ChunkId, mut voxels: VoxelData) {\n        let dimension = self.layout().dimension;\n        // Fix up margins for the chunk we're inserting along with any neighboring chunks\n        for chunk_direction in ChunkDirection::iter() {\n            let Some(Chunk::Populated {\n                voxels: neighbor_voxels,\n                surface: neighbor_surface,\n                old_surface: neighbor_old_surface,\n            }) = self\n                .get_chunk_neighbor(chunk, chunk_direction.axis, chunk_direction.sign)\n                .map(|chunk_id| &mut self[chunk_id])\n            else {\n                continue;\n            };\n            margins::fix_margins(\n                dimension,\n                chunk.vertex,\n                &mut voxels,\n                chunk_direction,\n                neighbor_voxels,\n            );\n            *neighbor_old_surface = neighbor_surface.take().or(*neighbor_old_surface);\n        }\n\n        // After clearing any margins we needed to clear, we can now insert the data into the graph\n        self[chunk] = Chunk::Populated {\n            voxels,\n            surface: None,\n            old_surface: None,\n        };\n    }\n\n    /// Returns the material at the specified coordinates of the specified chunk, if the chunk is generated\n    pub fn get_material(&self, chunk_id: ChunkId, coords: Coords) -> Option<Material> {\n        let dimension = self.layout().dimension;\n\n        let Chunk::Populated { voxels, .. } = &self[chunk_id] else {\n            return None;\n        };\n        Some(voxels.get(coords.to_index(dimension)))\n    }\n\n    /// Tries to update the block at the given position to the given material.\n    /// Fails and returns false if the chunk is not populated yet.\n    #[must_use]\n    pub fn update_block(&mut self, block_update: &BlockUpdate) -> bool {\n        let dimension = self.layout().dimension;\n\n        // Update the block\n        let Chunk::Populated {\n            voxels,\n            surface,\n            old_surface,\n        } = &mut self[block_update.chunk_id]\n        else {\n            return false;\n        };\n        let voxel = voxels\n            .data_mut(dimension)\n            .get_mut(block_update.coords.to_index(dimension))\n            .expect(\"coords are in-bounds\");\n\n        *voxel = block_update.new_material;\n        *old_surface = surface.take().or(*old_surface);\n\n        for chunk_direction in ChunkDirection::iter() {\n            margins::reconcile_margin_voxels(\n                self,\n                block_update.chunk_id,\n                block_update.coords,\n                chunk_direction,\n            )\n        }\n        true\n    }\n}\n\nimpl Index<ChunkId> for Graph {\n    type Output = Chunk;\n\n    #[inline]\n    fn index(&self, chunk: ChunkId) -> &Chunk {\n        &self[chunk.node].chunks[chunk.vertex]\n    }\n}\n\nimpl IndexMut<ChunkId> for Graph {\n    #[inline]\n    fn index_mut(&mut self, chunk: ChunkId) -> &mut Chunk {\n        &mut self[chunk.node].chunks[chunk.vertex]\n    }\n}\n\n/// A single dodecahedral node in the graph. All information related to world\n/// generation and the blocks within the node, along with auxiliary information\n/// used for rendering, is stored here.\n#[derive(Default)]\npub struct Node {\n    pub partial_state: Option<PartialNodeState>,\n    pub state: Option<NodeState>,\n    /// We can only populate chunks which lie within a cube of populated nodes, so nodes on the edge\n    /// of the graph always have some `Fresh` chunks.\n    pub chunks: Chunks<Chunk>,\n}\n\n/// Stores the actual voxel data of the chunk, along with metadata used for\n/// rendering. This is an enum type to account for chunks that have not been\n/// fully generated yet.\n#[derive(Default)]\npub enum Chunk {\n    /// Worldgen has not started running on this chunk yet. This can be for\n    /// multiple reasons:\n    /// - It was just added to the graph and hasn't had time to be processed.\n    /// - All world generation threads are occupied, and it is not this chunk's\n    ///   turn yet.\n    /// - The chunk is not close enough to be worth generating. This might\n    ///   happen for chunks on the far side of a node.\n    #[default]\n    Fresh,\n\n    /// There is an active thread generating voxels for this chunk, but this\n    /// chunk has not yet received the results from this thread.\n    Generating,\n\n    /// This chunk's voxels are fully generated and ready for use.\n    Populated {\n        /// The voxels present in the chunk\n        voxels: VoxelData,\n\n        /// A reference to the \"mesh\" used to render the chunk. Set to `None` if\n        /// this mesh needs to be computed or recomputed.\n        surface: Option<u32>,\n\n        /// An outdated (but valid) reference to the \"mesh\" used to render the\n        /// chunk. This is used to allow the mesh to still be rendered while it\n        /// is being recomputed.\n        old_surface: Option<u32>,\n    },\n}\n\n/// The voxels present in a particular chunk, along a margin.\n///\n/// The margin consists of voxels of adjacent chunks, which is a necessary extra\n/// piece of data needed to properly compute the rendered surface of the chunk.\npub enum VoxelData {\n    /// All voxels, including the margin, are the same material. This data type\n    /// is used to save storage space and processing, since such chunks do not\n    /// need to be rendered at all.\n    // TODO: This abstraction needs some work, as it doesn't account for areas\n    // underground with a mixed set of materials, such as dirt and stone.\n    Solid(Material),\n\n    /// This chunk (or its margins) may consist of multiple materials, which are\n    /// represented in the given boxed slice.\n    Dense(Box<[Material]>),\n}\n\nimpl VoxelData {\n    pub fn data_mut(&mut self, dimension: u8) -> &mut [Material] {\n        match *self {\n            VoxelData::Dense(ref mut d) => d,\n            VoxelData::Solid(mat) => {\n                *self = VoxelData::Dense(vec![mat; (usize::from(dimension) + 2).pow(3)].into());\n                self.data_mut(dimension)\n            }\n        }\n    }\n\n    pub fn get(&self, index: usize) -> Material {\n        match *self {\n            VoxelData::Dense(ref d) => d[index],\n            VoxelData::Solid(mat) => mat,\n        }\n    }\n\n    pub fn is_solid(&self) -> bool {\n        match *self {\n            VoxelData::Dense(_) => false,\n            VoxelData::Solid(_) => true,\n        }\n    }\n\n    /// Returns a `VoxelData` with void margins based on the given `SerializedVoxelData`, or `None` if\n    /// the `SerializedVoxelData` came from a `VoxelData` with the wrong dimension or an unknown material.\n    pub fn deserialize(serialized: &SerializedVoxelData, dimension: u8) -> Option<Self> {\n        if serialized.inner.len() != usize::from(dimension).pow(3) * 2 {\n            return None;\n        }\n\n        let mut materials = serialized\n            .inner\n            .chunks_exact(2)\n            .map(|chunk| u16::from_le_bytes([chunk[0], chunk[1]]));\n\n        let mut data = vec![Material::Void; (usize::from(dimension) + 2).pow(3)];\n        for z in 0..dimension {\n            for y in 0..dimension {\n                for x in 0..dimension {\n                    // We cannot use a linear copy here because `data` has margins, while `serialized.inner` does not.\n                    data[Coords([x, y, z]).to_index(dimension)] =\n                        materials.next().unwrap().try_into().ok()?;\n                }\n            }\n        }\n        Some(VoxelData::Dense(data.into_boxed_slice()))\n    }\n\n    /// Returns a `SerializedVoxelData` corresponding to `self`. Assumes that `self` is `Dense` and\n    /// has the right dimension, as it will panic or return incorrect data otherwise.\n    pub fn serialize(&self, dimension: u8) -> SerializedVoxelData {\n        let VoxelData::Dense(data) = self else {\n            panic!(\"Only dense chunks can be serialized.\");\n        };\n\n        let mut serialized: Vec<u8> = Vec::with_capacity(usize::from(dimension).pow(3) * 2);\n        for z in 0..dimension {\n            for y in 0..dimension {\n                for x in 0..dimension {\n                    // We cannot use a linear copy here because `data` has margins, while `serialized.inner` does not.\n                    serialized\n                        .extend((data[Coords([x, y, z]).to_index(dimension)] as u16).to_le_bytes());\n                }\n            }\n        }\n        SerializedVoxelData { inner: serialized }\n    }\n}\n\n/// Contains the context needed to know the locations of individual cubes within a chunk in the chunk's coordinate\n/// system. A given `ChunkLayout` is uniquely determined by its dimension.\npub struct ChunkLayout {\n    dimension: u8,\n    dual_to_grid_factor: f32,\n}\n\nimpl ChunkLayout {\n    pub fn new(dimension: u8) -> Self {\n        ChunkLayout {\n            dimension,\n            dual_to_grid_factor: Vertex::dual_to_chunk_factor() * dimension as f32,\n        }\n    }\n\n    /// Number of cubes on one axis of the chunk.\n    #[inline]\n    pub fn dimension(&self) -> u8 {\n        self.dimension\n    }\n\n    /// Scale by this to convert dual coordinates to homogeneous grid coordinates.\n    #[inline]\n    pub fn dual_to_grid_factor(&self) -> f32 {\n        self.dual_to_grid_factor\n    }\n\n    /// Converts a single coordinate from dual coordinates in the Klein-Beltrami model to an integer coordinate\n    /// suitable for voxel lookup. Returns `None` if the coordinate is outside the chunk.\n    #[inline]\n    pub fn dual_to_voxel(&self, dual_coord: f32) -> Option<u8> {\n        let floor_grid_coord = (dual_coord * self.dual_to_grid_factor).floor();\n\n        if !(floor_grid_coord >= 0.0 && floor_grid_coord < self.dimension as f32) {\n            None\n        } else {\n            Some(floor_grid_coord as u8)\n        }\n    }\n\n    /// Converts a single coordinate from grid coordinates to dual coordiantes in the Klein-Beltrami model. This\n    /// can be used to find the positions of voxel gridlines.\n    #[inline]\n    pub fn grid_to_dual(&self, grid_coord: u8) -> f32 {\n        grid_coord as f32 / self.dual_to_grid_factor\n    }\n\n    /// Takes in a single grid coordinate and returns a range containing all voxel coordinates surrounding it.\n    #[inline]\n    pub fn neighboring_voxels(&self, grid_coord: u8) -> impl Iterator<Item = u8> + use<> {\n        grid_coord.saturating_sub(1)..grid_coord.saturating_add(1).min(self.dimension())\n    }\n}\n\n/// Represents a discretized region in the voxel grid contained by an axis-aligned bounding box.\npub struct VoxelAABB {\n    // The bounds are of the form [[x_min, x_max], [y_min, y_max], [z_min, z_max]], using voxel coordinates with a one-block\n    // wide margins added on both sides. This helps make sure that that we can detect if the AABB intersects the chunk's boundaries.\n    bounds: [[u8; 2]; 3],\n}\n\nimpl VoxelAABB {\n    /// Returns a bounding box that is guaranteed to cover a given radius around a ray segment. Returns None if the\n    /// bounding box lies entirely outside the chunk.\n    pub fn from_ray_segment_and_radius(\n        layout: &ChunkLayout,\n        ray: &Ray,\n        tanh_distance: f32,\n        radius: f32,\n    ) -> Option<VoxelAABB> {\n        // Convert the ray to grid coordinates\n        let grid_start = na::Point3::from_homogeneous(ray.position.into()).unwrap()\n            * layout.dual_to_grid_factor();\n        let grid_end = na::Point3::from_homogeneous(ray.ray_point(tanh_distance).into()).unwrap()\n            * layout.dual_to_grid_factor();\n        // Convert the radius to grid coordinates using a crude conservative estimate\n        let max_grid_radius = radius * layout.dual_to_grid_factor();\n        let mut bounds = [[0; 2]; 3];\n        for axis in 0..3 {\n            let grid_min = grid_start[axis].min(grid_end[axis]) - max_grid_radius;\n            let grid_max = grid_start[axis].max(grid_end[axis]) + max_grid_radius;\n            let voxel_min = (grid_min + 1.0).floor().max(0.0);\n            let voxel_max = (grid_max + 1.0)\n                .floor()\n                .min(layout.dimension() as f32 + 1.0);\n\n            // When voxel_min is greater than dimension or voxel_max is less than 1, the cube does not intersect\n            // the chunk.\n            if voxel_min > layout.dimension() as f32 || voxel_max < 1.0 {\n                return None;\n            }\n\n            // We convert to u8 here instead of earlier because out-of-range voxel coordinates can violate casting assumptions.\n            bounds[axis] = [voxel_min.floor() as u8, voxel_max.floor() as u8];\n        }\n\n        Some(VoxelAABB { bounds })\n    }\n\n    /// Iterator over grid points contained in the region, represented as ordered triples\n    pub fn grid_points(\n        &self,\n        axis0: usize,\n        axis1: usize,\n        axis2: usize,\n    ) -> impl Iterator<Item = (u8, u8, u8)> + use<> {\n        let bounds = self.bounds;\n        (bounds[axis0][0]..bounds[axis0][1]).flat_map(move |i| {\n            (bounds[axis1][0]..bounds[axis1][1])\n                .flat_map(move |j| (bounds[axis2][0]..bounds[axis2][1]).map(move |k| (i, j, k)))\n        })\n    }\n\n    /// Iterator over grid lines intersecting the region, represented as ordered pairs determining the line's two fixed coordinates\n    pub fn grid_lines(&self, axis0: usize, axis1: usize) -> impl Iterator<Item = (u8, u8)> + use<> {\n        let bounds = self.bounds;\n        (bounds[axis0][0]..bounds[axis0][1])\n            .flat_map(move |i| (bounds[axis1][0]..bounds[axis1][1]).map(move |j| (i, j)))\n    }\n\n    /// Iterator over grid planes intersecting the region, represented as integers determining the plane's fixed coordinate\n    pub fn grid_planes(&self, axis: usize) -> impl Iterator<Item = u8> + use<> {\n        self.bounds[axis][0]..self.bounds[axis][1]\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::collections::HashSet;\n\n    use crate::math::{MDirection, MIsometry, MPoint, MVector};\n\n    use super::*;\n\n    /// Any voxel AABB should at least cover a capsule-shaped region consisting of all points\n    /// `radius` units away from the ray's line segment. This region consists of two spheres\n    /// and a cylinder. We only test planes because covered lines and points are a strict subset.\n    #[test]\n    fn voxel_aabb_coverage() {\n        let dimension = 12;\n        let layout = ChunkLayout::new(dimension);\n\n        // Pick an arbitrary ray by transforming the positive-x-axis ray.\n        let ray = MIsometry::from(na::Rotation3::from_axis_angle(&na::Vector3::z_axis(), 0.4))\n            * MIsometry::translation_along(&na::Vector3::new(0.2, 0.3, 0.1))\n            * &Ray::new(MPoint::w(), MDirection::x());\n\n        let tanh_distance = 0.2;\n        let radius = 0.1;\n\n        // We want to test that the whole capsule-shaped region around the ray segment is covered by\n        // the AABB. However, the math to test for this is complicated, so we instead check a bunch of\n        // spheres along this ray segment.\n        let num_ray_test_points = 20;\n        let ray_test_points: Vec<_> = (0..num_ray_test_points)\n            .map(|i| {\n                ray.ray_point(tanh_distance * (i as f32 / (num_ray_test_points - 1) as f32))\n                    .normalized_point()\n            })\n            .collect();\n\n        let aabb =\n            VoxelAABB::from_ray_segment_and_radius(&layout, &ray, tanh_distance, radius).unwrap();\n\n        // For variable names and further comments, we use a tuv coordinate system, which\n        // is a permuted xyz coordinate system.\n\n        // Test planes in all 3 axes.\n        for t_axis in 0..3 {\n            let covered_planes: HashSet<_> = aabb.grid_planes(t_axis).collect();\n\n            // Check that all uv-aligned planes that should be covered are covered\n            for t in 0..=dimension {\n                if covered_planes.contains(&t) {\n                    continue;\n                }\n\n                let mut plane_normal = MVector::zero();\n                plane_normal[t_axis] = 1.0;\n                plane_normal[3] = layout.grid_to_dual(t);\n                let plane_normal = plane_normal.normalized_direction();\n\n                for test_point in &ray_test_points {\n                    assert!(\n                        test_point.mip(&plane_normal).abs() > radius.sinh(),\n                        \"Plane not covered: t_axis={t_axis}, t={t}, test_point={test_point:?}\",\n                    );\n                }\n            }\n        }\n\n        // Test lines in all 3 axes\n        for t_axis in 0..3 {\n            let u_axis = (t_axis + 1) % 3;\n            let v_axis = (u_axis + 1) % 3;\n            let covered_lines: HashSet<_> = aabb.grid_lines(u_axis, v_axis).collect();\n\n            // For a given axis, all lines have the same direction, so set up the appropriate vector\n            // in advance.\n            let mut line_direction = MVector::zero();\n            line_direction[t_axis] = 1.0;\n            let line_direction = line_direction;\n\n            // Check that all t-aligned lines that should be covered are covered\n            for u in 0..=dimension {\n                for v in 0..=dimension {\n                    if covered_lines.contains(&(u, v)) {\n                        continue;\n                    }\n\n                    let mut line_position = MVector::zero();\n                    line_position[u_axis] = layout.grid_to_dual(u);\n                    line_position[v_axis] = layout.grid_to_dual(v);\n                    line_position[3] = 1.0;\n                    let line_position = line_position.normalized_point();\n\n                    for test_point in &ray_test_points {\n                        assert!(\n                            (test_point.mip(&line_position).powi(2)\n                                - test_point.mip(&line_direction).powi(2))\n                            .sqrt()\n                                > radius.cosh(),\n                            \"Line not covered: t_axis={t_axis}, u={u}, v={v}, test_point={test_point:?}\",\n                        );\n                    }\n                }\n            }\n        }\n\n        // Test points\n        let covered_points: HashSet<_> = aabb.grid_points(0, 1, 2).collect();\n\n        // Check that all points that should be covered are covered\n        for x in 0..=dimension {\n            for y in 0..=dimension {\n                for z in 0..=dimension {\n                    if covered_points.contains(&(x, y, z)) {\n                        continue;\n                    }\n\n                    let point_position = MVector::new(\n                        layout.grid_to_dual(x),\n                        layout.grid_to_dual(y),\n                        layout.grid_to_dual(z),\n                        1.0,\n                    )\n                    .normalized_point();\n\n                    for test_point in &ray_test_points {\n                        assert!(\n                            -test_point.mip(&point_position) > radius.cosh(),\n                            \"Point not covered: x={x}, y={y}, z={z}, test_point={test_point:?}\",\n                        );\n                    }\n                }\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "common/src/peer_traverser.rs",
    "content": "use std::sync::LazyLock;\n\nuse arrayvec::ArrayVec;\n\nuse crate::{\n    dodeca::{SIDE_COUNT, Side},\n    graph::{Graph, NodeId},\n};\n\n/// Assumes the graph is expanded enough to traverse peer nodes and returns all peer nodes\n/// for the given base node. Panics if this assumption is false. See documentation of `PeerNode`\n/// for a definition of what a \"peer node\" is.\npub fn expect_peer_nodes(graph: &Graph, base_node: NodeId) -> Vec<PeerNode> {\n    peer_nodes_impl(AssertingGraphRef { graph }, base_node)\n}\n\n/// Returns all peer nodes for the given base node, expanding the graph if necessary.  See\n/// documentation of `PeerNode` for a definition of what a \"peer node\" is.\npub fn ensure_peer_nodes(graph: &mut Graph, base_node: NodeId) -> Vec<PeerNode> {\n    peer_nodes_impl(ExpandingGraphRef { graph }, base_node)\n}\n\n/// Internal implementation of peer node traversal, using a `GraphRef` to be generic over\n/// whether a mutable or immutable graph reference is available\nfn peer_nodes_impl(mut graph: impl GraphRef, base_node: NodeId) -> Vec<PeerNode> {\n    let mut nodes = Vec::new();\n\n    // Depth 1 paths\n    for (parent_side, parent_node) in graph.parents(base_node) {\n        for &child_side in &DEPTH1_CHILD_PATHS[parent_side as usize] {\n            let Some(peer_node) = graph.child(parent_node, child_side) else {\n                continue;\n            };\n            nodes.push(PeerNode {\n                node_id: peer_node,\n                parent_path: ArrayVec::from_iter([parent_side]),\n                child_path: ArrayVec::from_iter([child_side]),\n            });\n        }\n    }\n\n    // Depth 2 paths\n    for (parent_side0, parent_node0) in graph.parents(base_node) {\n        for (parent_side1, parent_node1) in graph.parents(parent_node0) {\n            // Avoid redundancies by enforcing shortlex order\n            if parent_side1.adjacent_to(parent_side0)\n                && (parent_side1 as usize) < (parent_side0 as usize)\n            {\n                continue;\n            }\n            for &child_sides in &DEPTH2_CHILD_PATHS[parent_side0 as usize][parent_side1 as usize] {\n                let Some(peer_node_parent) = graph.child(parent_node1, child_sides[0]) else {\n                    continue;\n                };\n                let Some(peer_node) = graph.child(peer_node_parent, child_sides[1]) else {\n                    continue;\n                };\n                nodes.push(PeerNode {\n                    node_id: peer_node,\n                    parent_path: ArrayVec::from_iter([parent_side0, parent_side1]),\n                    child_path: ArrayVec::from_iter(child_sides),\n                });\n            }\n        }\n    }\n\n    nodes\n}\n\n/// Details relating to a specific peer node. For a given base node, a peer node is\n/// any node of the same depth as the base node where it is possible to reach the\n/// same node from both the base and the peer node without \"going backwards\". Going backwards\n/// in this sense means going from a node with a higher depth to a node with a lower depth.\n///\n/// Peer nodes are important because if worldgen produces a structure at a given base node\n/// and another structure at a given peer node, those two structures could potentially intersect\n/// if care is not taken. Checking the peer nodes in advance will prevent this.\npub struct PeerNode {\n    node_id: NodeId,\n    parent_path: ArrayVec<Side, 2>,\n    child_path: ArrayVec<Side, 2>,\n}\n\nimpl PeerNode {\n    /// The ID of the peer node\n    #[inline]\n    pub fn node(&self) -> NodeId {\n        self.node_id\n    }\n\n    /// The sequence of sides that takes you from the peer node to the shared child node\n    #[inline]\n    pub fn peer_to_shared(&self) -> impl ExactSizeIterator<Item = Side> + Clone + use<> {\n        self.parent_path.clone().into_iter().rev()\n    }\n\n    /// The sequence of sides that takes you from the base node to the shared child node\n    #[inline]\n    pub fn base_to_shared(&self) -> impl ExactSizeIterator<Item = Side> + Clone + use<> {\n        self.child_path.clone().into_iter()\n    }\n}\n\n/// All paths that can potentially lead to a peer node after following the given parent path of length 1\nstatic DEPTH1_CHILD_PATHS: LazyLock<[ArrayVec<Side, 5>; SIDE_COUNT]> = LazyLock::new(|| {\n    Side::VALUES.map(|parent_side| {\n        // The main constraint is that all parent sides need to be adjacent to all child sides.\n        let mut path_list: ArrayVec<Side, 5> = ArrayVec::new();\n        for child_side in Side::iter() {\n            if !child_side.adjacent_to(parent_side) {\n                continue;\n            }\n            path_list.push(child_side);\n        }\n        path_list\n    })\n});\n\n/// All paths that can potentially lead to a peer node after following the given parent path of length 2\nstatic DEPTH2_CHILD_PATHS: LazyLock<[[ArrayVec<[Side; 2], 2>; SIDE_COUNT]; SIDE_COUNT]> =\n    LazyLock::new(|| {\n        Side::VALUES.map(|parent_side0| {\n            Side::VALUES.map(|parent_side1| {\n                let mut path_list: ArrayVec<[Side; 2], 2> = ArrayVec::new();\n                if parent_side0 == parent_side1 {\n                    // Backtracking parent paths are irrelevant and may result in more child paths than\n                    // can fit in the ArrayVec, so skip these.\n                    return path_list;\n                }\n                // The main constraint is that all parent sides need to be adjacent to all child sides.\n                for child_side0 in Side::iter() {\n                    if !child_side0.adjacent_to(parent_side0)\n                        || !child_side0.adjacent_to(parent_side1)\n                    {\n                        // Child paths need to have both parts adjacent to parent paths.\n                        continue;\n                    }\n                    for child_side1 in Side::iter() {\n                        // To avoid redundancies, only look at child paths that obey shortlex rules.\n                        if child_side0 == child_side1 {\n                            // Child path backtracks and should be discounted.\n                            continue;\n                        }\n                        if child_side0.adjacent_to(child_side1)\n                            && (child_side0 as usize) > (child_side1 as usize)\n                        {\n                            // There is a lexicographically earlier child path, so this should be discounted.\n                            continue;\n                        }\n                        if !child_side1.adjacent_to(parent_side0)\n                            || !child_side1.adjacent_to(parent_side1)\n                        {\n                            // Child paths need to have both parts adjacent to parent paths.\n                            continue;\n                        }\n                        path_list.push([child_side0, child_side1]);\n                    }\n                }\n                path_list\n            })\n        })\n    });\n\n/// A reference to the graph used by `PeerTraverser` to decide how to handle not-yet-created nodes\ntrait GraphRef: AsRef<Graph> {\n    fn depth(&self, node: NodeId) -> u32;\n    fn neighbor(&mut self, node: NodeId, side: Side) -> NodeId;\n    fn parents(&self, node: NodeId) -> impl ExactSizeIterator<Item = (Side, NodeId)> + use<Self>;\n\n    /// A helper function that returns the node at the particular side if it's a child, or `None` if it's a parent.\n    fn child(&mut self, node: NodeId, side: Side) -> Option<NodeId> {\n        let candidate_child = self.neighbor(node, side);\n        if self.depth(candidate_child) > self.depth(node) {\n            Some(candidate_child)\n        } else {\n            None\n        }\n    }\n}\n\n/// A `GraphRef` that asserts that all the nodes it needs already exist\nstruct AssertingGraphRef<'a> {\n    graph: &'a Graph,\n}\n\nimpl AsRef<Graph> for AssertingGraphRef<'_> {\n    fn as_ref(&self) -> &Graph {\n        self.graph\n    }\n}\n\nimpl<'a> GraphRef for AssertingGraphRef<'a> {\n    fn depth(&self, node: NodeId) -> u32 {\n        self.graph.depth(node)\n    }\n\n    fn neighbor(&mut self, node: NodeId, side: Side) -> NodeId {\n        self.graph.neighbor(node, side).unwrap()\n    }\n\n    fn parents(&self, node: NodeId) -> impl ExactSizeIterator<Item = (Side, NodeId)> + use<'a> {\n        self.graph.parents(node)\n    }\n}\n\n/// A `GraphRef` that expands the graph as necessary\nstruct ExpandingGraphRef<'a> {\n    graph: &'a mut Graph,\n}\n\nimpl<'a> GraphRef for ExpandingGraphRef<'a> {\n    fn depth(&self, node: NodeId) -> u32 {\n        self.graph.depth(node)\n    }\n\n    fn neighbor(&mut self, node: NodeId, side: Side) -> NodeId {\n        self.graph.ensure_neighbor(node, side)\n    }\n\n    fn parents(&self, node: NodeId) -> impl ExactSizeIterator<Item = (Side, NodeId)> + use<'a> {\n        self.graph.parents(node)\n    }\n}\n\nimpl AsRef<Graph> for ExpandingGraphRef<'_> {\n    fn as_ref(&self) -> &Graph {\n        self.graph\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use fxhash::FxHashSet;\n\n    use super::*;\n\n    // Returns the `NodeId` corresponding to the given path\n    fn node_from_path(\n        graph: &mut Graph,\n        start_node: NodeId,\n        path: impl IntoIterator<Item = Side>,\n    ) -> NodeId {\n        let mut current_node = start_node;\n        for side in path {\n            current_node = graph.ensure_neighbor(current_node, side);\n        }\n        current_node\n    }\n\n    #[test]\n    fn peer_traverser_example() {\n        let mut graph = Graph::new(1);\n        let base_node_path = [Side::B, Side::D, Side::C, Side::A];\n        let base_node = node_from_path(&mut graph, NodeId::ROOT, base_node_path);\n\n        let expected_paths: &[(&[Side], &[Side])] = &[\n            (&[Side::A], &[Side::B]),\n            (&[Side::A], &[Side::E]),\n            (&[Side::A], &[Side::I]),\n            (&[Side::C], &[Side::B]),\n            (&[Side::C], &[Side::F]),\n            (&[Side::C], &[Side::H]),\n            (&[Side::D], &[Side::H]),\n            (&[Side::D], &[Side::I]),\n            (&[Side::D], &[Side::K]),\n            (&[Side::C, Side::A], &[Side::B, Side::D]),\n            (&[Side::D, Side::A], &[Side::I, Side::C]),\n            (&[Side::D, Side::C], &[Side::H, Side::A]),\n        ];\n\n        let peers = ensure_peer_nodes(&mut graph, base_node);\n        assert_eq!(peers.len(), expected_paths.len());\n        for (peer, expected_path) in peers.iter().zip(expected_paths) {\n            assert_eq!(\n                peer.peer_to_shared().collect::<Vec<_>>(),\n                expected_path.0.to_vec(),\n            );\n            assert_eq!(\n                peer.base_to_shared().collect::<Vec<_>>(),\n                expected_path.1.to_vec(),\n            );\n        }\n\n        // Assert that the graph isn't expanded any more than necessary by generating\n        // a reference graph with the same base node and peer nodes manually.\n        let mut reference_graph = Graph::new(1);\n        assert_eq!(\n            base_node,\n            node_from_path(&mut reference_graph, NodeId::ROOT, base_node_path),\n            \"Sanity check for reusing base_node\"\n        );\n        for peer in &peers {\n            // Generate the peer node by taking the path from the base node to the peer node via their shared parent.\n            node_from_path(\n                &mut reference_graph,\n                base_node,\n                peer.parent_path\n                    .iter()\n                    .cloned()\n                    .chain(peer.child_path.iter().cloned()),\n            );\n        }\n\n        assert_eq!(graph.len(), reference_graph.len());\n    }\n\n    #[test]\n    fn peer_definition_holds() {\n        let mut graph = Graph::new(1);\n        let base_node = node_from_path(\n            &mut graph,\n            NodeId::ROOT,\n            [Side::B, Side::D, Side::C, Side::A],\n        );\n        let mut found_peer_nodes = FxHashSet::default();\n        for peer in ensure_peer_nodes(&mut graph, base_node) {\n            let peer_node = peer.node();\n\n            assert!(\n                found_peer_nodes.insert(peer_node),\n                \"The same peer node must not be returned more than once.\"\n            );\n\n            let destination_from_base =\n                node_from_path(&mut graph, base_node, peer.base_to_shared());\n            let destination_from_peer =\n                node_from_path(&mut graph, peer_node, peer.peer_to_shared());\n\n            assert_eq!(\n                graph.depth(base_node),\n                graph.depth(peer_node),\n                \"The base and peer nodes must have the same depth in the graph.\"\n            );\n            assert_eq!(\n                graph.depth(base_node) + peer.base_to_shared().len() as u32,\n                graph.depth(destination_from_base),\n                \"path_from_base must not backtrack to a parent node.\"\n            );\n            assert_eq!(\n                graph.depth(peer_node) + peer.peer_to_shared().len() as u32,\n                graph.depth(destination_from_peer),\n                \"path_from_peer must not backtrack to a parent node.\"\n            );\n            assert_eq!(\n                destination_from_base, destination_from_peer,\n                \"path_from_base and path_from_peer must lead to the same node.\"\n            );\n        }\n    }\n}\n"
  },
  {
    "path": "common/src/proto.rs",
    "content": "use serde::{Deserialize, Serialize};\n\nuse crate::{\n    EntityId, SimConfig, Step, dodeca, graph::NodeId, math::MIsometry, node::ChunkId,\n    voxel_math::Coords, world::Material,\n};\n\n#[derive(Debug, Serialize, Deserialize)]\npub struct ClientHello {\n    pub name: String,\n}\n\n#[derive(Debug, Serialize, Deserialize)]\npub struct ServerHello {\n    pub character: EntityId,\n    pub sim_config: SimConfig,\n}\n\n#[derive(Debug, Serialize, Deserialize, Copy, Clone)]\npub struct Position {\n    pub node: NodeId,\n    pub local: MIsometry<f32>,\n}\n\nimpl Position {\n    pub fn origin() -> Self {\n        Self {\n            node: NodeId::ROOT,\n            local: MIsometry::identity(),\n        }\n    }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct StateDelta {\n    pub step: Step,\n    /// Highest input generation received prior to `step`\n    pub latest_input: u16,\n    pub positions: Vec<(EntityId, Position)>,\n    pub character_states: Vec<(EntityId, CharacterState)>,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct CharacterState {\n    pub velocity: na::Vector3<f32>,\n    pub on_ground: bool,\n    pub orientation: na::UnitQuaternion<f32>,\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone)]\npub struct Spawns {\n    pub step: Step,\n    pub spawns: Vec<(EntityId, Vec<Component>)>,\n    pub despawns: Vec<EntityId>,\n    pub nodes: Vec<FreshNode>,\n    pub block_updates: Vec<BlockUpdate>,\n    pub voxel_data: Vec<(ChunkId, SerializedVoxelData)>,\n    pub inventory_additions: Vec<(EntityId, EntityId)>,\n    pub inventory_removals: Vec<(EntityId, EntityId)>,\n}\n\n#[derive(Debug, Serialize, Deserialize)]\npub struct Command {\n    pub generation: u16,\n    pub character_input: CharacterInput,\n    pub orientation: na::UnitQuaternion<f32>,\n}\n\n#[derive(Debug, Clone, Default, Serialize, Deserialize)]\npub struct CharacterInput {\n    /// Relative to the character's current position, excluding orientation\n    pub movement: na::Vector3<f32>,\n    pub jump: bool,\n    pub no_clip: bool,\n    pub block_update: Option<BlockUpdate>,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct BlockUpdate {\n    pub chunk_id: ChunkId,\n    pub coords: Coords,\n    pub new_material: Material,\n    pub consumed_entity: Option<EntityId>,\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone)]\npub struct SerializedVoxelData {\n    /// Dense 3D array of 16-bit material tags for all voxels in this chunk\n    pub inner: Vec<u8>,\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone)]\npub enum Component {\n    Character(Character),\n    Position(Position),\n    Material(Material),\n    Inventory(Inventory),\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone)]\npub struct FreshNode {\n    /// The side joining the new node to `parent`\n    pub side: dodeca::Side,\n    pub parent: NodeId,\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone)]\npub struct Character {\n    pub name: String,\n    pub state: CharacterState,\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone)]\npub struct Inventory {\n    pub contents: Vec<EntityId>,\n}\n\npub mod connection_error_codes {\n    use quinn::VarInt;\n\n    pub const CONNECTION_LOST: VarInt = VarInt::from_u32(0);\n    pub const STREAM_ERROR: VarInt = VarInt::from_u32(1);\n    pub const BAD_CLIENT_COMMAND: VarInt = VarInt::from_u32(2);\n    pub const NAME_CONFLICT: VarInt = VarInt::from_u32(3);\n    pub const CLIENT_CLOSED_CONNECTION: VarInt = VarInt::from_u32(4);\n}\n"
  },
  {
    "path": "common/src/sim_config.rs",
    "content": "use std::time::Duration;\n\nuse serde::{Deserialize, Serialize};\n\nuse crate::{dodeca, math::MVector};\n\n/// Manually specified simulation config parameters\n#[derive(Serialize, Deserialize, Default)]\n#[serde(deny_unknown_fields)]\npub struct SimConfigRaw {\n    /// Number of steps per second\n    pub rate: Option<u16>,\n    /// Maximum distance at which nodes will be rendered in meters\n    pub view_distance: Option<f32>,\n    /// Maximum distance at which new chunks will be generated in meters\n    pub chunk_generation_distance: Option<f32>,\n    /// Distance at which fog becomes completely opaque in meters\n    pub fog_distance: Option<f32>,\n    pub input_queue_size_ms: Option<u16>,\n    /// Whether gameplay-like restrictions exist, such as limited inventory\n    pub gameplay_enabled: Option<bool>,\n    /// Number of voxels along the edge of a chunk\n    pub chunk_size: Option<u8>,\n    /// Approximate length of the edge of a voxel in meters\n    ///\n    /// Curved spaces have a notion of absolute distance, defined with respect to the curvature. We\n    /// mostly work in those units, but the conversion from meters, used to scale the player and assets\n    /// and so forth, is configurable. This effectively allows for configurable curvature.\n    ///\n    /// Note that exact voxel size varies within each chunk. We reference the mean width of the voxels\n    /// along the X axis through the center of a chunk.\n    pub voxel_size: Option<f32>,\n    /// Static configuration information relevant to character physics\n    #[serde(default)]\n    pub character: CharacterConfigRaw,\n}\n\n/// Complete simulation config parameters\n#[derive(Clone, Debug, Serialize, Deserialize)]\npub struct SimConfig {\n    /// Amount of time between each step. Inverse of the rate\n    pub step_interval: Duration,\n    /// Maximum distance at which nodes will be rendered in absolute units\n    pub view_distance: f32,\n    /// Maximum distance at which new chunks will be generate in absolute units\n    pub chunk_generation_distance: f32,\n    /// Distance at which fog becomes completely opaque in absolute units\n    pub fog_distance: f32,\n    pub input_queue_size: Duration,\n    /// Whether gameplay-like restrictions exist, such as limited inventory\n    pub gameplay_enabled: bool,\n    /// Number of voxels along the edge of a chunk\n    pub chunk_size: u8,\n    /// Static configuration information relevant to character physics\n    pub character: CharacterConfig,\n    /// Scaling factor converting meters to absolute units\n    pub meters_to_absolute: f32,\n}\n\nimpl SimConfig {\n    pub fn from_raw(x: &SimConfigRaw) -> Self {\n        let chunk_size = x.chunk_size.unwrap_or(12);\n        let voxel_size = x.voxel_size.unwrap_or(1.0);\n        let meters_to_absolute = meters_to_absolute(chunk_size, voxel_size);\n        SimConfig {\n            step_interval: Duration::from_secs(1) / x.rate.unwrap_or(30) as u32,\n            view_distance: x.view_distance.unwrap_or(75.0) * meters_to_absolute,\n            chunk_generation_distance: x.chunk_generation_distance.unwrap_or(60.0)\n                * meters_to_absolute,\n            fog_distance: x.fog_distance.unwrap_or(90.0) * meters_to_absolute,\n            input_queue_size: Duration::from_millis(x.input_queue_size_ms.unwrap_or(50).into()),\n            gameplay_enabled: x.gameplay_enabled.unwrap_or(false),\n            chunk_size,\n            character: CharacterConfig::from_raw(&x.character, meters_to_absolute),\n            meters_to_absolute,\n        }\n    }\n}\n\n/// Compute the scaling factor from meters to absolute units, given the number of voxels in a chunk\n/// and the approximate size of a voxel in meters.\nfn meters_to_absolute(chunk_size: u8, voxel_size: f32) -> f32 {\n    let a = MVector::from(dodeca::Vertex::A.chunk_to_node() * na::Vector4::new(1.0, 0.5, 0.5, 1.0))\n        .normalized_point();\n    let b = MVector::from(dodeca::Vertex::A.chunk_to_node() * na::Vector4::new(0.0, 0.5, 0.5, 1.0))\n        .normalized_point();\n    let minimum_chunk_face_separation = a.distance(&b);\n    let absolute_voxel_size = minimum_chunk_face_separation / f32::from(chunk_size);\n    absolute_voxel_size / voxel_size\n}\n\n/// Static configuration information relevant to character physics as provided in configuration files\n#[derive(Default, Clone, Debug, Serialize, Deserialize)]\npub struct CharacterConfigRaw {\n    /// Character movement speed in m/s during no-clip\n    pub no_clip_movement_speed: Option<f32>,\n    /// Character maximumum movement speed while on the ground in m/s\n    pub max_ground_speed: Option<f32>,\n    /// Character artificial speed cap to avoid overloading the server in m/s\n    pub speed_cap: Option<f32>,\n    /// Maximum ground slope (0=horizontal, 1=45 degrees)\n    pub max_ground_slope: Option<f32>,\n    /// Character acceleration while on the ground in m/s^2\n    pub ground_acceleration: Option<f32>,\n    /// Character acceleration while in the air in m/s^2\n    pub air_acceleration: Option<f32>,\n    /// Acceleration of gravity in m/s^2\n    pub gravity_acceleration: Option<f32>,\n    /// Air resistance in (m/s^2) per (m/s); scales linearly with respect to speed\n    pub air_resistance: Option<f32>,\n    /// How fast the player jumps off the ground in m/s\n    pub jump_speed: Option<f32>,\n    /// How far away the player needs to be from the ground in meters to be considered in the air in meters\n    pub ground_distance_tolerance: Option<f32>,\n    /// Radius of the character in meters\n    pub character_radius: Option<f32>,\n    /// How far a character can reach when placing blocks in meters\n    pub block_reach: Option<f32>,\n}\n\n/// Static configuration information relevant to character physics. Most fields are based on\n/// absolute units and seconds.\n#[derive(Clone, Debug, Serialize, Deserialize)]\npub struct CharacterConfig {\n    /// Character movement speed in units/s during no-clip\n    pub no_clip_movement_speed: f32,\n    /// Character maximumum movement speed while on the ground in units/s\n    pub max_ground_speed: f32,\n    /// Character artificial speed cap to avoid overloading the server in units/s\n    pub speed_cap: f32,\n    /// Maximum ground slope (0=horizontal, 1=45 degrees)\n    pub max_ground_slope: f32,\n    /// Character acceleration while on the ground in units/s^2\n    pub ground_acceleration: f32,\n    /// Character acceleration while in the air in units/s^2\n    pub air_acceleration: f32,\n    /// Acceleration of gravity in units/s^2\n    pub gravity_acceleration: f32,\n    /// Air resistance in (units/s^2) per (units/s); scales linearly with respect to speed\n    pub air_resistance: f32,\n    /// How fast the player jumps off the ground in units/s\n    pub jump_speed: f32,\n    /// How far away the player needs to be from the ground in meters to be considered in the air in absolute units\n    pub ground_distance_tolerance: f32,\n    /// Radius of the character in absolute units\n    pub character_radius: f32,\n    /// How far a character can reach when placing blocks in absolute units\n    pub block_reach: f32,\n}\n\nimpl CharacterConfig {\n    pub fn from_raw(x: &CharacterConfigRaw, meters_to_absolute: f32) -> Self {\n        CharacterConfig {\n            no_clip_movement_speed: x.no_clip_movement_speed.unwrap_or(12.0) * meters_to_absolute,\n            max_ground_speed: x.max_ground_speed.unwrap_or(4.0) * meters_to_absolute,\n            speed_cap: x.speed_cap.unwrap_or(30.0) * meters_to_absolute,\n            max_ground_slope: x.max_ground_slope.unwrap_or(1.73), // 60 degrees\n            ground_acceleration: x.ground_acceleration.unwrap_or(20.0) * meters_to_absolute,\n            air_acceleration: x.air_acceleration.unwrap_or(2.0) * meters_to_absolute,\n            gravity_acceleration: x.gravity_acceleration.unwrap_or(20.0) * meters_to_absolute,\n            air_resistance: x.air_resistance.unwrap_or(0.2),\n            jump_speed: x.jump_speed.unwrap_or(8.0) * meters_to_absolute,\n            ground_distance_tolerance: x.ground_distance_tolerance.unwrap_or(0.2)\n                * meters_to_absolute,\n            character_radius: x.character_radius.unwrap_or(0.4) * meters_to_absolute,\n            block_reach: x.block_reach.unwrap_or(10.0) * meters_to_absolute,\n        }\n    }\n}\n"
  },
  {
    "path": "common/src/traversal.rs",
    "content": "use std::collections::VecDeque;\n\nuse fxhash::FxHashSet;\n\nuse crate::{\n    collision_math::Ray,\n    dodeca::{self, Side, Vertex},\n    graph::{Graph, NodeId},\n    math::{MIsometry, MPoint},\n    node::ChunkId,\n    proto::Position,\n};\n\n/// Ensure all nodes exist whose bounding spheres are within `distance` of `start`\npub fn ensure_nearby(graph: &mut Graph, start: &Position, distance: f32) {\n    let max_node_center_distance = distance + dodeca::BOUNDING_SPHERE_RADIUS;\n\n    // We do a breadth-first instead of a depth-first traversal here to ensure that we take the\n    // minimal path to each node. This greatly helps prevent error from accumulating due to\n    // hundreds of transformations being composed.\n    let mut pending = VecDeque::<(NodeId, MIsometry<f32>)>::new();\n    let mut visited = FxHashSet::<NodeId>::default();\n\n    pending.push_back((start.node, MIsometry::identity()));\n    visited.insert(start.node);\n    let start_p = start.local * MPoint::origin();\n\n    while let Some((node, current_transform)) = pending.pop_front() {\n        for side in Side::iter() {\n            let neighbor_transform = current_transform * side.reflection();\n            let neighbor_p = neighbor_transform * MPoint::origin();\n            if -start_p.mip(&neighbor_p) > max_node_center_distance.cosh() {\n                continue;\n            }\n            let neighbor = graph.ensure_neighbor(node, side);\n            if visited.contains(&neighbor) {\n                continue;\n            }\n            visited.insert(neighbor);\n            pending.push_back((neighbor, neighbor_transform));\n        }\n    }\n}\n\n/// Compute `start.node`-relative transforms of all nodes whose bounding spheres lie within `distance` of\n/// `start`\npub fn nearby_nodes(\n    graph: &Graph,\n    start: &Position,\n    distance: f32,\n) -> Vec<(NodeId, MIsometry<f32>)> {\n    let max_node_center_distance = distance + dodeca::BOUNDING_SPHERE_RADIUS;\n\n    struct PendingNode {\n        id: NodeId,\n        transform: MIsometry<f32>,\n    }\n\n    let mut result = Vec::new();\n\n    // We do a breadth-first instead of a depth-first traversal here to ensure that we take the\n    // minimal path to each node. This greatly helps prevent error from accumulating due to\n    // hundreds of transformations being composed.\n    let mut pending = VecDeque::<PendingNode>::new();\n    let mut visited = FxHashSet::<NodeId>::default();\n    let start_p = start.local * MPoint::origin();\n\n    pending.push_back(PendingNode {\n        id: start.node,\n        transform: MIsometry::identity(),\n    });\n    visited.insert(start.node);\n\n    while let Some(current) = pending.pop_front() {\n        let current_p = current.transform * MPoint::origin();\n        if -start_p.mip(&current_p) > max_node_center_distance.cosh() {\n            continue;\n        }\n        result.push((current.id, current.transform));\n\n        for side in Side::iter() {\n            let neighbor = match graph.neighbor(current.id, side) {\n                None => continue,\n                Some(x) => x,\n            };\n            if visited.contains(&neighbor) {\n                continue;\n            }\n            pending.push_back(PendingNode {\n                id: neighbor,\n                transform: current.transform * side.reflection(),\n            });\n            visited.insert(neighbor);\n        }\n    }\n\n    result\n}\n\npub struct RayTraverser<'a> {\n    graph: &'a Graph,\n    ray: &'a Ray,\n    radius: f32,\n    /// Chunks that have already been added to `iterator_queue` and shouldn't be added again\n    visited_chunks: FxHashSet<ChunkId>,\n    /// Chunks that should be returned by `next` in the future\n    iterator_queue: VecDeque<(Option<NodeId>, Vertex, MIsometry<f32>)>,\n    /// Chunks whose neighbors should be queried in the future\n    search_queue: VecDeque<(Option<NodeId>, Vertex, MIsometry<f32>)>,\n    klein_lower_boundary: f32,\n    klein_upper_boundary: f32,\n}\n\nimpl<'a> RayTraverser<'a> {\n    pub fn new(graph: &'a Graph, position: Position, ray: &'a Ray, radius: f32) -> Self {\n        // Pick the vertex closest to position.local as the vertex of the chunk to use to start collision checking\n        let mut closest_vertex = Vertex::A;\n        let mut closest_vertex_cosh_distance = f32::INFINITY;\n        for vertex in Vertex::iter() {\n            let vertex_cosh_distance =\n                (vertex.node_to_dual() * position.local * MPoint::origin()).w;\n            if vertex_cosh_distance < closest_vertex_cosh_distance {\n                closest_vertex = vertex;\n                closest_vertex_cosh_distance = vertex_cosh_distance;\n            }\n        }\n        let start_vertex = closest_vertex;\n\n        let mut visited_chunks = FxHashSet::<ChunkId>::default();\n        visited_chunks.insert(ChunkId::new(position.node, start_vertex));\n        let mut iterator_queue = VecDeque::new();\n        iterator_queue.push_back((Some(position.node), start_vertex, position.local));\n\n        // Precalculate the chunk boundaries for collision purposes. If the collider goes outside these bounds,\n        // the corresponding neighboring chunk will also be used for collision checking.\n        let klein_lower_boundary = radius.tanh();\n        let klein_upper_boundary = (Vertex::chunk_to_dual_factor().atanh() - radius).tanh();\n\n        Self {\n            graph,\n            radius,\n            ray,\n            visited_chunks,\n            iterator_queue,\n            search_queue: VecDeque::new(),\n            klein_lower_boundary,\n            klein_upper_boundary,\n        }\n    }\n\n    pub fn next(&mut self, tanh_distance: f32) -> Option<(Option<ChunkId>, MIsometry<f32>)> {\n        loop {\n            // Return the next entry that's queued up\n            if let Some(entry @ (node, vertex, node_transform)) = self.iterator_queue.pop_front() {\n                self.search_queue.push_back(entry);\n                // Combine node and vertex, and convert node transform to chunk transform\n                return Some((\n                    node.map(|node| ChunkId::new(node, vertex)),\n                    *vertex.node_to_dual() * node_transform,\n                ));\n            }\n\n            // If no entries are queued up, continue the breadth-first search to queue up new entries.\n            let (node, vertex, node_transform) = self.search_queue.pop_front()?;\n            let Some(node) = node else {\n                // Cannot branch from chunks that are outside the graph\n                continue;\n            };\n\n            let local_ray = vertex.node_to_dual() * node_transform * self.ray;\n\n            // Compute the Klein-Beltrami coordinates of the ray segment's endpoints. To check whether neighboring chunks\n            // are needed, we need to check whether the endpoints of the line segments lie outside the boundaries of the square\n            // bounded by `klein_lower_boundary` and `klein_upper_boundary`.\n            let klein_ray_start = na::Point3::from_homogeneous(local_ray.position.into()).unwrap();\n            let klein_ray_end =\n                na::Point3::from_homogeneous(local_ray.ray_point(tanh_distance).into()).unwrap();\n\n            // Add neighboring chunks as necessary based on a conservative AABB check, using one coordinate at a time.\n            for axis in 0..3 {\n                // Check for neighboring nodes\n                if klein_ray_start[axis] <= self.klein_lower_boundary\n                    || klein_ray_end[axis] <= self.klein_lower_boundary\n                {\n                    let side = vertex.canonical_sides()[axis];\n                    let next_node_transform = side.reflection() * node_transform;\n                    // Crude check to ensure that the neighboring chunk's node can be in the path of the ray. For simplicity, this\n                    // check treats each node as a sphere and assumes the ray is pointed directly towards its center. The check is\n                    // needed because chunk generation uses this approximation, and this check is not guaranteed to pass near corners\n                    // because the AABB check can have false positives.\n                    let ray_node_distance = (next_node_transform * self.ray.position).w.acosh();\n                    let ray_length = tanh_distance.atanh();\n                    if ray_node_distance - ray_length - self.radius > dodeca::BOUNDING_SPHERE_RADIUS\n                    {\n                        // Ray cannot intersect node\n                        continue;\n                    }\n                    // Add the new chunk to the queue.\n                    if let Some(neighbor) = self.graph.neighbor(node, side) {\n                        if self.visited_chunks.insert(ChunkId::new(neighbor, vertex)) {\n                            self.iterator_queue.push_back((\n                                Some(neighbor),\n                                vertex,\n                                next_node_transform,\n                            ));\n                        }\n                    } else {\n                        // There's `NodeId` for the requested chunk, so substitute `None`.\n                        self.iterator_queue\n                            .push_back((None, vertex, next_node_transform));\n                    }\n                }\n\n                // Check for neighboring chunks within the same node\n                if klein_ray_start[axis] >= self.klein_upper_boundary\n                    || klein_ray_end[axis] >= self.klein_upper_boundary\n                {\n                    let next_vertex = vertex.adjacent_vertices()[axis];\n                    if self.visited_chunks.insert(ChunkId::new(node, next_vertex)) {\n                        self.iterator_queue\n                            .push_back((Some(node), next_vertex, node_transform));\n                    }\n                }\n            }\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use approx::assert_abs_diff_eq;\n\n    use super::*;\n\n    // Make sure that ensure_nearby and nearby_nodes finish even for a relatively large radius\n    // and traverse the expected number of nodes\n    #[test]\n    fn traversal_functions_example() {\n        let mut graph = Graph::new(1);\n        ensure_nearby(&mut graph, &Position::origin(), 6.0);\n        assert_abs_diff_eq!(graph.len(), 687959, epsilon = 50);\n\n        let nodes = nearby_nodes(&graph, &Position::origin(), 6.0);\n        assert_abs_diff_eq!(nodes.len(), 687959, epsilon = 50);\n    }\n}\n"
  },
  {
    "path": "common/src/voxel_math.rs",
    "content": "use std::ops::{Index, IndexMut};\n\nuse serde::{Deserialize, Serialize};\n\nuse crate::dodeca::Side;\n\n/// Represents a particular axis in a voxel grid\n#[derive(Debug, Clone, Copy, PartialEq, Eq)]\npub enum CoordAxis {\n    X = 0,\n    Y = 1,\n    Z = 2,\n}\n\n/// Trying to convert a `usize` to a `CoordAxis` returns this struct if the provided\n/// `usize` is out-of-bounds\n#[derive(Debug, Clone, Copy)]\npub struct CoordAxisOutOfBounds;\n\nimpl CoordAxis {\n    /// Iterates through the the axes in ascending order\n    pub fn iter() -> impl ExactSizeIterator<Item = Self> {\n        [Self::X, Self::Y, Self::Z].into_iter()\n    }\n\n    /// Returns the pair axes orthogonal to the current axis\n    pub fn other_axes(self) -> [Self; 2] {\n        match self {\n            Self::X => [Self::Y, Self::Z],\n            Self::Y => [Self::Z, Self::X],\n            Self::Z => [Self::X, Self::Y],\n        }\n    }\n}\n\nimpl TryFrom<usize> for CoordAxis {\n    type Error = CoordAxisOutOfBounds;\n\n    fn try_from(value: usize) -> Result<Self, Self::Error> {\n        match value {\n            0 => Ok(Self::X),\n            1 => Ok(Self::Y),\n            2 => Ok(Self::Z),\n            _ => Err(CoordAxisOutOfBounds),\n        }\n    }\n}\n\n/// Represents a direction in a particular axis. This struct is meant to be used with a coordinate axis,\n/// so when paired with the X-axis, it represents the postitive X-direction when set to Plus and the\n/// negative X-direction when set to Minus.\n#[derive(Debug, Clone, Copy, PartialEq, Eq)]\npub enum CoordSign {\n    Plus = 1,\n    Minus = -1,\n}\n\nimpl CoordSign {\n    /// Iterates through the two possible coordinate directions\n    pub fn iter() -> impl ExactSizeIterator<Item = Self> {\n        [CoordSign::Plus, CoordSign::Minus].into_iter()\n    }\n}\n\nimpl std::ops::Mul for CoordSign {\n    type Output = CoordSign;\n\n    fn mul(self, rhs: Self) -> Self::Output {\n        match self == rhs {\n            true => CoordSign::Plus,\n            false => CoordSign::Minus,\n        }\n    }\n}\n\nimpl std::ops::MulAssign for CoordSign {\n    fn mul_assign(&mut self, rhs: Self) {\n        *self = *self * rhs;\n    }\n}\n\n/// Coordinates for a discrete voxel within a chunk, not including margins\n#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]\npub struct Coords(pub [u8; 3]);\n\nimpl Coords {\n    /// Returns the array index in `VoxelData` corresponding to these coordinates\n    pub fn to_index(self, chunk_size: u8) -> usize {\n        let chunk_size_with_margin = chunk_size as usize + 2;\n        (self.0[0] as usize + 1)\n            + (self.0[1] as usize + 1) * chunk_size_with_margin\n            + (self.0[2] as usize + 1) * chunk_size_with_margin.pow(2)\n    }\n\n    /// Returns the x, y, or z coordinate that would correspond to the voxel meeting the chunk boundary in the direction of `sign`\n    pub fn boundary_coord(chunk_size: u8, sign: CoordSign) -> u8 {\n        match sign {\n            CoordSign::Plus => chunk_size - 1,\n            CoordSign::Minus => 0,\n        }\n    }\n}\n\nimpl Index<CoordAxis> for Coords {\n    type Output = u8;\n\n    #[inline]\n    fn index(&self, coord_axis: CoordAxis) -> &u8 {\n        self.0.index(coord_axis as usize)\n    }\n}\n\nimpl IndexMut<CoordAxis> for Coords {\n    #[inline]\n    fn index_mut(&mut self, coord_axis: CoordAxis) -> &mut u8 {\n        self.0.index_mut(coord_axis as usize)\n    }\n}\n\n/// Represents one of the six main directions within a chunk: positive or negative x, y, and z.\n#[derive(Debug, Clone, Copy, PartialEq, Eq)]\npub struct ChunkDirection {\n    pub axis: CoordAxis,\n    pub sign: CoordSign,\n}\n\nimpl ChunkDirection {\n    pub const PLUS_X: Self = ChunkDirection {\n        axis: CoordAxis::X,\n        sign: CoordSign::Plus,\n    };\n    pub const PLUS_Y: Self = ChunkDirection {\n        axis: CoordAxis::Y,\n        sign: CoordSign::Plus,\n    };\n    pub const PLUS_Z: Self = ChunkDirection {\n        axis: CoordAxis::Z,\n        sign: CoordSign::Plus,\n    };\n    pub const MINUS_X: Self = ChunkDirection {\n        axis: CoordAxis::X,\n        sign: CoordSign::Minus,\n    };\n    pub const MINUS_Y: Self = ChunkDirection {\n        axis: CoordAxis::Y,\n        sign: CoordSign::Minus,\n    };\n    pub const MINUS_Z: Self = ChunkDirection {\n        axis: CoordAxis::Z,\n        sign: CoordSign::Minus,\n    };\n\n    pub fn iter() -> impl ExactSizeIterator<Item = ChunkDirection> {\n        [\n            Self::PLUS_X,\n            Self::PLUS_Y,\n            Self::PLUS_Z,\n            Self::MINUS_X,\n            Self::MINUS_Y,\n            Self::MINUS_Z,\n        ]\n        .into_iter()\n    }\n}\n\n/// Represents one of the 6 possible permutations a chunk's axes can have, useful for comparing the canonical sides of one chunk to an adjacent chunk.\n/// This is analogous to a 3x3 rotation/reflection matrix with a restricted domain.\n/// Note that it may make sense to define a more general `ChunkOrientation` class that takes three `ChunkDirection`s, to represent\n/// any cube rotation/reflection, but no use exists for it yet, so it has not yet been implemented.\n#[derive(Debug, Clone, Copy, PartialEq, Eq)]\npub struct ChunkAxisPermutation {\n    axes: [CoordAxis; 3],\n}\n\nimpl ChunkAxisPermutation {\n    pub const IDENTITY: Self = ChunkAxisPermutation {\n        axes: [CoordAxis::X, CoordAxis::Y, CoordAxis::Z],\n    };\n\n    /// Constructs a `ChunkAxisPermutation` that, when left-multiplying a set of coordinates, moves from `from`'s reference\n    /// frame to `to`'s reference frame, where `from` and `to` are represented as three dodeca sides incident to a vertex\n    /// that determine the orientation of a chunk.\n    pub fn from_permutation(from: [Side; 3], to: [Side; 3]) -> Self {\n        assert!(from[0] != from[1] && from[0] != from[2] && from[1] != from[2]);\n        assert!(to[0] != to[1] && to[0] != to[2] && to[1] != to[2]);\n        ChunkAxisPermutation {\n            axes: from.map(|f| {\n                CoordAxis::try_from(\n                    to.iter()\n                        .position(|&t| f == t)\n                        .expect(\"from and to must have same set of sides\"),\n                )\n                .unwrap()\n            }),\n        }\n    }\n}\n\nimpl Index<CoordAxis> for ChunkAxisPermutation {\n    type Output = CoordAxis;\n\n    fn index(&self, index: CoordAxis) -> &Self::Output {\n        &self.axes[index as usize]\n    }\n}\n\nimpl std::ops::Mul<Coords> for ChunkAxisPermutation {\n    type Output = Coords;\n\n    fn mul(self, rhs: Coords) -> Self::Output {\n        let mut result = Coords([0; 3]);\n        for axis in CoordAxis::iter() {\n            result[self[axis]] = rhs[axis];\n        }\n        result\n    }\n}\n\nimpl std::ops::Mul<ChunkDirection> for ChunkAxisPermutation {\n    type Output = ChunkDirection;\n\n    fn mul(self, rhs: ChunkDirection) -> Self::Output {\n        ChunkDirection {\n            axis: self[rhs.axis],\n            sign: rhs.sign,\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::dodeca::Vertex;\n\n    use super::*;\n\n    fn coords_to_vector3(coords: Coords) -> na::Vector3<i32> {\n        na::Vector3::new(\n            coords[CoordAxis::X] as i32,\n            coords[CoordAxis::Y] as i32,\n            coords[CoordAxis::Z] as i32,\n        )\n    }\n\n    fn coord_axis_to_vector3(coord_axis: CoordAxis) -> na::Vector3<i32> {\n        let mut vector = na::Vector3::new(0, 0, 0);\n        vector[coord_axis as usize] = 1;\n        vector\n    }\n\n    fn chunk_direction_to_vector3(chunk_direction: ChunkDirection) -> na::Vector3<i32> {\n        let mut vector = na::Vector3::new(0, 0, 0);\n        vector[chunk_direction.axis as usize] = chunk_direction.sign as i32;\n        vector\n    }\n\n    fn chunk_axis_permutation_to_matrix3(\n        chunk_axis_permutation: ChunkAxisPermutation,\n    ) -> na::Matrix3<i32> {\n        na::Matrix::from_columns(&chunk_axis_permutation.axes.map(coord_axis_to_vector3))\n    }\n\n    // Helper function to return all permutations as a list of ordered triples\n    fn get_all_permutations() -> Vec<(usize, usize, usize)> {\n        let mut permutations = vec![];\n        for i in 0..3 {\n            for j in 0..3 {\n                if j == i {\n                    continue;\n                }\n                for k in 0..3 {\n                    if k == i || k == j {\n                        continue;\n                    }\n                    permutations.push((i, j, k));\n                }\n            }\n        }\n        permutations\n    }\n\n    #[test]\n    fn test_chunk_axis_permutation() {\n        let sides = Vertex::A.canonical_sides();\n\n        let example_coords = Coords([3, 5, 9]);\n\n        for (i, j, k) in get_all_permutations() {\n            let permutation = ChunkAxisPermutation::from_permutation(\n                [sides[0], sides[1], sides[2]],\n                [sides[i], sides[j], sides[k]],\n            );\n\n            // Test that the permutation goes in the expected direction\n            assert_eq!(\n                permutation * example_coords,\n                Coords([\n                    example_coords.0[i],\n                    example_coords.0[j],\n                    example_coords.0[k]\n                ])\n            );\n\n            // Test that the multiplication operations are consistent with matrix multiplication\n            assert_eq!(\n                coords_to_vector3(permutation * example_coords),\n                chunk_axis_permutation_to_matrix3(permutation) * coords_to_vector3(example_coords)\n            );\n            for chunk_direction in ChunkDirection::iter() {\n                assert_eq!(\n                    chunk_direction_to_vector3(permutation * chunk_direction),\n                    chunk_axis_permutation_to_matrix3(permutation)\n                        * chunk_direction_to_vector3(chunk_direction)\n                )\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "common/src/world.rs",
    "content": "use serde::{Deserialize, Serialize};\n\n#[derive(\n    Debug, Copy, Clone, Default, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize,\n)]\n#[repr(u16)]\npub enum Material {\n    #[default]\n    Void = 0,\n    Dirt = 1,\n    Sand = 2,\n    Silt = 3,\n    Clay = 4,\n    Mud = 5,\n    SandyLoam = 6,\n    SiltyLoam = 7,\n    ClayLoam = 8,\n    RedSand = 9,\n    Limestone = 10,\n    Shale = 11,\n    Dolomite = 12,\n    Sandstone = 13,\n    RedSandstone = 14,\n    Marble = 15,\n    Slate = 16,\n    Granite = 17,\n    Diorite = 18,\n    Andesite = 19,\n    Gabbro = 20,\n    Basalt = 21,\n    Olivine = 22,\n    Water = 23,\n    Lava = 24,\n    Wood = 25,\n    Leaves = 26,\n    WoodPlanks = 27,\n    GreyBrick = 28,\n    WhiteBrick = 29,\n    Ice = 30,\n    IceSlush = 31,\n    Gravel = 32,\n    Snow = 33,\n    CoarseGrass = 34,\n    TanGrass = 35,\n    LushGrass = 36,\n    MudGrass = 37,\n    Grass = 38,\n    CaveGrass = 39,\n}\n\nimpl Material {\n    pub const COUNT: usize = 40;\n\n    pub const VALUES: [Self; Self::COUNT] = [\n        Material::Void,\n        Material::Dirt,\n        Material::Sand,\n        Material::Silt,\n        Material::Clay,\n        Material::Mud,\n        Material::SandyLoam,\n        Material::SiltyLoam,\n        Material::ClayLoam,\n        Material::RedSand,\n        Material::Limestone,\n        Material::Shale,\n        Material::Dolomite,\n        Material::Sandstone,\n        Material::RedSandstone,\n        Material::Marble,\n        Material::Slate,\n        Material::Granite,\n        Material::Diorite,\n        Material::Andesite,\n        Material::Gabbro,\n        Material::Basalt,\n        Material::Olivine,\n        Material::Water,\n        Material::Lava,\n        Material::Wood,\n        Material::Leaves,\n        Material::WoodPlanks,\n        Material::GreyBrick,\n        Material::WhiteBrick,\n        Material::Ice,\n        Material::IceSlush,\n        Material::Gravel,\n        Material::Snow,\n        Material::CoarseGrass,\n        Material::TanGrass,\n        Material::LushGrass,\n        Material::MudGrass,\n        Material::Grass,\n        Material::CaveGrass,\n    ];\n}\n\n#[derive(Debug, Clone, Copy)]\npub struct MaterialOutOfBounds;\n\nimpl std::fmt::Display for MaterialOutOfBounds {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        write!(f, \"Integer input does not represent a valid material\")\n    }\n}\n\nimpl std::error::Error for MaterialOutOfBounds {}\n\nimpl TryFrom<u16> for Material {\n    type Error = MaterialOutOfBounds;\n\n    fn try_from(value: u16) -> Result<Self, Self::Error> {\n        Material::VALUES\n            .get(value as usize)\n            .ok_or(MaterialOutOfBounds)\n            .copied()\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::Material;\n\n    #[test]\n    fn u16_to_material_consistency_check() {\n        for i in 0..Material::COUNT {\n            let index = u16::try_from(i).unwrap();\n            let material =\n                Material::try_from(index).expect(\"no missing entries in try_from match statement\");\n            assert_eq!(index, material as u16);\n        }\n    }\n}\n"
  },
  {
    "path": "common/src/worldgen/horosphere.rs",
    "content": "use libm::{cosf, sinf, sqrtf};\nuse rand::{Rng, SeedableRng};\nuse rand_distr::Poisson;\nuse rand_pcg::Pcg64Mcg;\n\nuse crate::{\n    dodeca::{Side, Vertex},\n    graph::{Graph, NodeId},\n    math::{MDirection, MIsometry, MPoint, MVector},\n    node::VoxelData,\n    peer_traverser,\n    voxel_math::Coords,\n    world::Material,\n    worldgen::hash,\n};\n\n/// Whether an assortment of random horospheres should be added to world generation. This is a temporary\n/// option until large structures that fit with the theme of the world are introduced.\n/// For code simplicity, this is made into a constant instead of a configuration option.\nconst HOROSPHERES_ENABLED: bool = true;\n\n/// Value to mix into the node's spice for generating horospheres. Chosen randomly.\nconst HOROSPHERE_SEED: u64 = 6046133366614030452;\n\n/// Represents a node's reference to a particular horosphere. As a general rule, for any give horosphere,\n/// every node in the convex hull of nodes containing the horosphere will have a `HorosphereNode`\n/// referencing it. The unique node in this convex hull with the smallest depth in the graph is the owner\n/// of the horosphere, where it is originally generated.\n#[derive(Copy, Clone)]\npub struct HorosphereNode {\n    /// The node that originally created the horosphere. All parts of the horosphere will\n    /// be in a node with this as an ancestor, and all HorosphereNodes with the same `owner` correspond\n    /// to the same horosphere.\n    owner: NodeId,\n\n    /// The horosphere's location relative to the node containing this `HorosphereNode`\n    horosphere: Horosphere,\n\n    /// A region that bounds the `HorosphereNode`'s descendents. A `HorosphereNode` will never propagate beyond\n    /// this region, and the region's bounds will be as tight as possible. Note that this region does note necessarily\n    /// contain the whole horosphere because parts of the horosphere that require backtracking towards the origin\n    /// are ignored.\n    // Note: All public constructors generate a `HorosphereNode` with tight bounds, but some `HorosphereNode`s\n    // might not have tight bounds because a `HorosphereNode` is used in an intermediate calculations, averaged\n    // together with other `HorosphereNode`s before the bounds are tightened.\n    region: NodeBoundedRegion,\n}\n\nimpl HorosphereNode {\n    /// Returns the `HorosphereNode` for the given node, either by propagating an existing parent\n    /// `HorosphereNode` or by randomly generating a new one.\n    pub fn new(graph: &Graph, node_id: NodeId) -> Option<HorosphereNode> {\n        if !HOROSPHERES_ENABLED {\n            return None;\n        }\n        HorosphereNode::create_from_parents(graph, node_id)\n            .or_else(|| HorosphereNode::maybe_create_fresh(graph, node_id))\n    }\n\n    /// Propagates `HorosphereNode` information from the given parent nodes to this child node. Returns\n    /// `None` if there's no horosphere to propagate, either because none of the parent nodes have a\n    /// horosphere associated with them, or because any existing horosphere is outside the range\n    /// of this node.\n    fn create_from_parents(graph: &Graph, node_id: NodeId) -> Option<HorosphereNode> {\n        // Rather than selecting an arbitrary parent HorosphereNode, we average all of them. This\n        // is important because otherwise, the propagation of floating point precision errors could\n        // create a seam. This ensures that all errors average out, keeping the horosphere smooth.\n        let mut horospheres_to_average_iter =\n            graph\n                .parents(node_id)\n                .filter_map(|(parent_side, parent_id)| {\n                    graph\n                        .node_state(parent_id)\n                        .horosphere\n                        .as_ref()\n                        .and_then(|h| h.propagate(parent_side))\n                });\n\n        let mut horosphere_node = horospheres_to_average_iter.next()?;\n        let mut count = 1;\n        for other in horospheres_to_average_iter {\n            // Take an average of all HorosphereNodes in this iterator, giving each of them equal weight\n            // by keeping track of a moving average with a weight that changes over time to make the\n            // numbers work out the same way.\n            count += 1;\n            horosphere_node.average_with(other, 1.0 / count as f32);\n        }\n\n        horosphere_node.horosphere.renormalize();\n        horosphere_node.tighten_region_bounds();\n        Some(horosphere_node)\n    }\n\n    /// Create a `HorosphereNode` corresponding to a freshly created horosphere with the given node as its owner,\n    /// if one should be created. This function is called on every node that doesn't already have a horosphere\n    /// associated with it, so this function has control over how frequent the horospheres should be.\n    fn maybe_create_fresh(graph: &Graph, node_id: NodeId) -> Option<HorosphereNode> {\n        const HOROSPHERE_DENSITY: f32 = 6.0;\n\n        let spice = graph.hash_of(node_id) as u64;\n        let mut rng = rand_pcg::Pcg64Mcg::seed_from_u64(hash(spice, HOROSPHERE_SEED));\n        for _ in 0..rng.sample(Poisson::new(HOROSPHERE_DENSITY).unwrap()) as u32 {\n            // This logic is designed to create an average of \"HOROSPHERE_DENSITY\" horosphere candiates\n            // in the region determined by `random_horosphere_pos` and then filters the resulting\n            // list of candiates to only ones where the current node is the suitable owner for them.\n            // Filtering instead of rejection sampling ensures a uniform distribution of horosphere\n            // even though different nodes have different-sized regions for valid horospheres.\n\n            // However, we do return early to ensure that after filtering, we only take the first\n            // horosphere if there is one, since a node can have at most one horosphere.\n            let horosphere = Horosphere::new_random(&mut rng, MAX_OWNED_HOROSPHERE_W);\n            if is_horosphere_valid(graph, node_id, &horosphere) {\n                let mut horosphere_node = HorosphereNode {\n                    owner: node_id,\n                    horosphere,\n                    region: NodeBoundedRegion::node_and_descendents(graph, node_id),\n                };\n                horosphere_node.tighten_region_bounds();\n                return Some(horosphere_node);\n            }\n        }\n        None\n    }\n\n    /// Updates the region associated with the `HorosphereNode` to have bounds that are as tight as possible.\n    fn tighten_region_bounds(&mut self) {\n        for side in Side::iter() {\n            if !self.region.is_bounded_by(side) && self.can_tighten_region_bound(side) {\n                self.region.add_bound(side);\n            }\n        }\n    }\n\n    /// Computes whether propagation can stop at a particular side due to no part of the horosphere\n    /// being behind it. This function is used to tighten region bounds.\n    fn can_tighten_region_bound(&self, side: Side) -> bool {\n        !self.horosphere.intersects_half_space(side.normal())\n    }\n\n    /// Returns an estimate of the `HorosphereNode` corresponding to the node adjacent to the current node\n    /// at the given side, or `None` if the horosphere is no longer relevant after crossing the given side.\n    /// The estimates given by multiple nodes may be used to produce the actual `HorosphereNode`.\n    fn propagate(&self, side: Side) -> Option<HorosphereNode> {\n        // Don't propagate beyond the already-computed bounds of the `HorosphereNode`.\n        if self.region.is_bounded_by(side) {\n            return None;\n        }\n\n        Some(HorosphereNode {\n            owner: self.owner,\n            horosphere: side.reflection() * self.horosphere,\n            region: self.region.neighbor(side),\n        })\n    }\n\n    /// Takes the weighted average of the coordinates of this horosphere with the coordinates of the other horosphere.\n    fn average_with(&mut self, other: HorosphereNode, other_weight: f32) {\n        if self.owner != other.owner {\n            // If this panic is triggered, it may mean that two horospheres were generated that interfere\n            // with each other. The logic in `should_generate` should prevent this, so this would be a sign\n            // of a bug in that function's implementation.\n            panic!(\"Tried to average two unrelated HorosphereNodes\");\n        }\n        self.horosphere.pos =\n            self.horosphere.pos * (1.0 - other_weight) + other.horosphere.pos * other_weight;\n        self.region = self.region.intersect(other.region);\n    }\n\n    /// Returns whether the horosphere is freshly created, instead of a\n    /// reference to a horosphere created earlier on in the node graph.\n    fn is_fresh(&self, node_id: NodeId) -> bool {\n        self.owner == node_id\n    }\n\n    /// If `self` and `other` would propagate to the same node, to avoid interference, only one of these\n    /// two horospheres can generate. This function determines whether `self` should be the one to generate.\n    fn has_priority(&self, other: &HorosphereNode, node_id: NodeId) -> bool {\n        // If both horospheres are fresh, use the owner's NodeId as an arbitrary\n        // tie-breaker to decide which horosphere should win.\n        !self.is_fresh(node_id) || (other.is_fresh(node_id) && self.owner < other.owner)\n    }\n\n    /// Based on other nodes in the graph, determines whether the horosphere\n    /// should generate. If false, it means that another horosphere elsewhere\n    /// would interfere, and generation should not proceed.\n    pub fn should_generate(&self, graph: &Graph, node_id: NodeId) -> bool {\n        if !self.is_fresh(node_id) {\n            // The horosphere is propagated and so is already proven to exist.\n            return true;\n        }\n\n        for peer in peer_traverser::expect_peer_nodes(graph, node_id) {\n            let Some(peer_horosphere) = graph\n                .partial_node_state(peer.node())\n                .candidate_horosphere\n                .as_ref()\n            else {\n                continue;\n            };\n            if !self.has_priority(peer_horosphere, node_id)\n                // Check that these horospheres can interfere by seeing if their regions share a node in common.\n                && peer_horosphere.region.contains_node(peer.peer_to_shared())\n                && self.region.contains_node(peer.base_to_shared())\n            {\n                return false;\n            }\n        }\n        true\n    }\n}\n\n/// Returns whether the given horosphere position could represent a horosphere generated by the\n/// given node. The requirement is that a horosphere must be bounded by all of the node's parent sides\n/// (as otherwise, a parent node would own the horosphere), and the horosphere must not be fully\n/// behind any of the other dodeca sides (as otherwise, a child node would own the horosphere). Note\n/// that the horosphere does not necessarily need to intersect the dodeca to be valid.\nfn is_horosphere_valid(graph: &Graph, node_id: NodeId, horosphere: &Horosphere) -> bool {\n    Side::iter().all(|s| !horosphere.is_inside_half_space(s.normal()))\n        && (graph.parents(node_id)).all(|(s, _)| !horosphere.intersects_half_space(s.normal()))\n}\n\n/// The maximum node-centric w-coordinate a horosphere can have such that the node in question\n/// is still the owner of the horosphere.\n// See `test_max_owned_horosphere_w()` for how this is computed.\nconst MAX_OWNED_HOROSPHERE_W: f32 = 5.9047837;\n\n/// Represents a chunks's reference to a particular horosphere.\npub struct HorosphereChunk {\n    /// The horosphere's location relative to the chunk containing this `HorosphereChunk`.\n    pub horosphere: Horosphere,\n}\n\nimpl HorosphereChunk {\n    /// Creates a `HorosphereChunk` based on a `HorosphereNode`\n    pub fn new(horosphere_node: &HorosphereNode, vertex: Vertex) -> Self {\n        HorosphereChunk {\n            horosphere: vertex.node_to_dual() * horosphere_node.horosphere,\n        }\n    }\n\n    /// Rasterizes the horosphere chunk into the given `VoxelData`\n    pub fn generate(&self, voxels: &mut VoxelData, chunk_size: u8) {\n        for z in 0..chunk_size {\n            for y in 0..chunk_size {\n                for x in 0..chunk_size {\n                    let pos = MVector::new(\n                        x as f32 + 0.5,\n                        y as f32 + 0.5,\n                        z as f32 + 0.5,\n                        chunk_size as f32 * Vertex::dual_to_chunk_factor(),\n                    )\n                    .normalized_point();\n                    if self.horosphere.contains_point(&pos) {\n                        voxels.data_mut(chunk_size)[Coords([x, y, z]).to_index(chunk_size)] =\n                            Material::RedSandstone;\n                    }\n                }\n            }\n        }\n    }\n}\n\n/// A horosphere in hyperbolic space. Contains helper functions for common operations\n#[derive(Copy, Clone)]\npub struct Horosphere {\n    /// This vector, `pos`, entirely represents the horosphere using the following rule: A vector\n    /// `point` is in this horosphere exactly when `point.mip(&self.pos) == -1`. This vector should\n    /// always have the invariant `self.pos.mip(&self.pos) == 0`, behaving much like a \"light-like\"\n    /// vector in Minkowski space.\n    ///\n    /// One recommended way to gain an intuition of this vector is to consider its direction separately.\n    /// The vector points in the direction of an ideal point in the hyperboloid model because it is on\n    /// the cone that the hyperboloid approaches. This ideal point is the center of the horosphere.\n    /// This determines `self.pos` up to a scalar multiple, and the remaining degree of freedom can\n    /// be pinned down by analyzing the w-coordinate.\n    ///\n    /// The w-coordinate determines which of the concentric horospheres with that ideal point is represented.\n    /// A larger w-coordinate represents a horosphere that is farther away from the origin.\n    /// If the w-coordinate is 1, the origin is on the horosphere's surface.\n    /// If it's less than 1, the origin is in the horosphere's interior, and if it's greater than 1, the origin\n    /// is outside the horosphere.\n    ///\n    /// TODO: If a player traverses too far inside a horosphere, this vector will underflow, preventing\n    /// the horosphere from generating properly. Fixing this requires using logic similar to `Plane` to\n    /// increase the range of magnitudes the vector can take.\n    pos: MVector<f32>,\n}\n\nimpl Horosphere {\n    /// Returns whether the point is inside the horosphere\n    pub fn contains_point(&self, point: &MPoint<f32>) -> bool {\n        self.pos.mip(point) >= -1.0\n    }\n\n    /// Returns whether the horosphere is entirely inside the half space in front of the plane defined by `normal`\n    pub fn is_inside_half_space(&self, normal: &MDirection<f32>) -> bool {\n        self.pos.mip(normal) >= 1.0\n    }\n\n    /// Returns whether any part of the horosphere intersects the half space in front of the plane defined by `normal`\n    pub fn intersects_half_space(&self, normal: &MDirection<f32>) -> bool {\n        self.pos.mip(normal) >= -1.0\n    }\n\n    /// Ensures that the horosphere invariant holds (`pos.mip(&pos) == 0`), as numerical error can otherwise propagate,\n    /// potentially making the surface behave more like a sphere or an equidistant surface.\n    pub fn renormalize(&mut self) {\n        self.pos.w = self.pos.xyz().norm();\n    }\n\n    /// Returns a uniformly random horosphere, restricted so that the w-coordinate of its representing vector\n    /// must be at most `max_w`.\n    pub fn new_random(rng: &mut Pcg64Mcg, max_w: f32) -> Self {\n        // Pick a w-coordinate whose probability density function is `p(w) = w`. By trial and error,\n        // this seems to produce horospheres with a uniform and isotropic distribution.\n        // TODO: Find a rigorous explanation for this. We would want to show that the probability density is unchanged\n        // when an isometry is applied.\n        let w = sqrtf(rng.random::<f32>()) * max_w;\n\n        // Uniformly pick spherical coordinates from a unit sphere\n        let cos_phi = rng.random::<f32>() * 2.0 - 1.0;\n        let sin_phi = sqrtf(1.0 - cos_phi * cos_phi);\n        let theta = rng.random::<f32>() * std::f32::consts::TAU;\n\n        // Construct the resulting vector.\n        Horosphere {\n            pos: MVector::new(\n                w * sin_phi * cosf(theta),\n                w * sin_phi * sinf(theta),\n                w * cos_phi,\n                w,\n            ),\n        }\n    }\n}\n\nimpl std::ops::Mul<Horosphere> for &MIsometry<f32> {\n    type Output = Horosphere;\n\n    fn mul(self, rhs: Horosphere) -> Self::Output {\n        Horosphere {\n            pos: self * rhs.pos,\n        }\n    }\n}\n\n/// Represents a region of space bounded by planes corresponding to a subset of a node's sides in that node's perspective.\n#[derive(Clone, Copy)]\nstruct NodeBoundedRegion {\n    /// A bit-array with 12 elements, one for each side. A 1 means that that side is a bound, and a 0 means it is not.\n    bounded_sides: u16,\n}\n\nimpl NodeBoundedRegion {\n    /// Creates a region with no bounds\n    #[cfg(test)]\n    fn unbounded() -> Self {\n        NodeBoundedRegion { bounded_sides: 0 }\n    }\n\n    /// Creates a region that contains the given node and all its descendents\n    fn node_and_descendents(graph: &Graph, node_id: NodeId) -> Self {\n        let mut bounded_sides = 0;\n        for (parent_side, _) in graph.parents(node_id) {\n            bounded_sides |= 1 << (parent_side as u8);\n        }\n        NodeBoundedRegion { bounded_sides }\n    }\n\n    /// Produces the set intersection of the `self` and `other` regions\n    fn intersect(self, other: NodeBoundedRegion) -> NodeBoundedRegion {\n        NodeBoundedRegion {\n            bounded_sides: self.bounded_sides | other.bounded_sides,\n        }\n    }\n\n    /// Returns the sub-region consisting of everything beyond the plane containing\n    /// the given side (in the perspective of the corresponding neighboring node).\n    /// As a precondition, the given side cannot be an existing bound, as that would\n    /// make the sub-region empty (which is non-representable in `NodeBoundedRegion`).\n    fn neighbor(self, neighbor_side: Side) -> NodeBoundedRegion {\n        debug_assert!(!self.is_bounded_by(neighbor_side));\n\n        let mut bounded_sides = self.bounded_sides;\n\n        // Don't allow backtracking\n        bounded_sides |= 1 << (neighbor_side as u8);\n\n        // As we're shifting perspective to a neighboring node, most sides now refer\n        // to different planes and no longer bound the region. The only exceptions to this\n        // are the shared side and its neighbors.\n        for side in Side::iter() {\n            if !side.adjacent_to(neighbor_side) && side != neighbor_side {\n                bounded_sides &= !(1 << (side as u8));\n            }\n        }\n        NodeBoundedRegion { bounded_sides }\n    }\n\n    /// Returns whether the node reachable via the given path is within the region.\n    /// Note that this path is required to be one of the shortest paths that can reach\n    /// that node.\n    fn contains_node(self, path: impl Iterator<Item = Side>) -> bool {\n        let mut current_region = self;\n        for side in path {\n            if current_region.is_bounded_by(side) {\n                return false;\n            }\n            current_region = current_region.neighbor(side);\n        }\n        true\n    }\n\n    /// Returns whether the given side bounds the region\n    fn is_bounded_by(self, side: Side) -> bool {\n        self.bounded_sides & (1 << (side as u8)) != 0\n    }\n\n    /// Adds the given side as a bound for the region\n    fn add_bound(&mut self, side: Side) {\n        self.bounded_sides |= 1 << (side as u8);\n    }\n}\n\n#[cfg(test)]\nmod test {\n    use super::*;\n    use crate::math::MPoint;\n    use approx::assert_abs_diff_eq;\n\n    #[test]\n    fn test_max_owned_horosphere_w() {\n        // This tests that `MAX_OWNED_HOROSPHERE_W` is set to the correct value.\n\n        // The worst case scenario would be a horosphere located directly in the direction of a dodeca's vertex.\n        // This is because the horosphere can be outside the dodeca, tangent to each of the planes that extend the\n        // dodeca's sides adjancent to that vertex. If that horosphere were brought any closer, it would intersect\n        // all three of those planes, making it impossible for any child node to own the dodeca and forcing the node\n        // in focus to own it.\n\n        // First, find an arbitrary horosphere in the direction of a vertex.\n        let example_vertex = Vertex::A;\n        let example_vertex_pos = example_vertex.dual_to_node() * MPoint::origin();\n        let mut horosphere_pos = MVector::from(example_vertex_pos);\n        horosphere_pos.w = horosphere_pos.xyz().norm();\n\n        // Then, scale the horosphere so that it's mip with each of the sides of the vertex is 1, making it tangent.\n        horosphere_pos /= horosphere_pos.mip(example_vertex.canonical_sides()[0].normal());\n        for side in example_vertex.canonical_sides() {\n            assert_abs_diff_eq!(horosphere_pos.mip(side.normal()), 1.0, epsilon = 1.0e-6);\n        }\n\n        // Finally, compare that horosphere's w-coordinate to `MAX_OWNED_HOROSPHERE_W`\n        assert_abs_diff_eq!(horosphere_pos.w, MAX_OWNED_HOROSPHERE_W, epsilon = 1.0e-6);\n    }\n\n    #[test]\n    fn node_bounded_region_intersect_example() {\n        let mut region0 = NodeBoundedRegion::unbounded();\n        region0.add_bound(Side::A);\n        region0.add_bound(Side::B);\n\n        let mut region1 = NodeBoundedRegion::unbounded();\n        region1.add_bound(Side::B);\n        region1.add_bound(Side::C);\n\n        let intersection = region0.intersect(region1);\n        for side in Side::iter() {\n            assert_eq!(\n                intersection.is_bounded_by(side),\n                [Side::A, Side::B, Side::C].contains(&side),\n                \"testing side {side:?}\",\n            );\n        }\n    }\n\n    #[test]\n    fn node_bounded_region_neighbor_example() {\n        let mut region = NodeBoundedRegion::unbounded();\n        // Sides adjacent to A\n        region.add_bound(Side::B);\n        region.add_bound(Side::C);\n        region.add_bound(Side::D);\n\n        // Sides not adjacent to A\n        region.add_bound(Side::F);\n        region.add_bound(Side::G);\n        region.add_bound(Side::J);\n\n        let neighbor = region.neighbor(Side::A);\n\n        for side in Side::iter() {\n            assert_eq!(\n                neighbor.is_bounded_by(side),\n                [Side::A, Side::B, Side::C, Side::D].contains(&side),\n                \"testing side {side:?}\",\n            );\n        }\n    }\n\n    #[test]\n    fn node_bounded_region_node_and_descendents_example() {\n        let mut graph = Graph::new(1);\n        let node_id = graph.ensure_neighbor(NodeId::ROOT, Side::A);\n        let region = NodeBoundedRegion::node_and_descendents(&graph, node_id);\n\n        for side in Side::iter() {\n            assert_eq!(\n                region.is_bounded_by(side),\n                [Side::A].contains(&side),\n                \"testing side {side:?}\",\n            );\n        }\n    }\n\n    #[test]\n    fn node_bounded_region_contains_node_example() {\n        let mut region = NodeBoundedRegion::unbounded();\n        // Sides adjacent to A\n        region.add_bound(Side::B);\n        region.add_bound(Side::C);\n\n        // Sides not adjacent to A\n        region.add_bound(Side::F);\n        region.add_bound(Side::G);\n        region.add_bound(Side::J);\n\n        assert!(region.contains_node([Side::A].into_iter()));\n        assert!(!region.contains_node([Side::B].into_iter()));\n        assert!(region.contains_node([Side::A, Side::F].into_iter()));\n        assert!(!region.contains_node([Side::A, Side::B].into_iter()));\n    }\n}\n"
  },
  {
    "path": "common/src/worldgen/mod.rs",
    "content": "use horosphere::{HorosphereChunk, HorosphereNode};\nuse plane::Plane;\nuse rand::{Rng, SeedableRng, distr::Uniform};\nuse rand_distr::Normal;\nuse terraingen::VoronoiInfo;\n\nuse crate::{\n    dodeca::{Side, Vertex},\n    graph::{Graph, NodeId},\n    margins,\n    math::{self, MVector},\n    node::{ChunkId, VoxelData},\n    world::Material,\n};\n\nmod horosphere;\nmod plane;\nmod terraingen;\n\n#[derive(Clone, Copy, PartialEq, Debug)]\nenum NodeStateKind {\n    Sky,\n    DeepSky,\n    Land,\n    DeepLand,\n}\nuse NodeStateKind::*;\n\nimpl NodeStateKind {\n    const ROOT: Self = Land;\n\n    /// What state comes after this state, from a given side?\n    fn child(self, side: Side) -> Self {\n        match (self, side) {\n            (Sky, Side::A) => Land,\n            (Land, Side::A) => Sky,\n            (Sky, _) if !side.adjacent_to(Side::A) => DeepSky,\n            (Land, _) if !side.adjacent_to(Side::A) => DeepLand,\n            _ => self,\n        }\n    }\n}\n\n#[derive(Clone, Copy, PartialEq, Debug)]\nenum NodeStateRoad {\n    East,\n    DeepEast,\n    West,\n    DeepWest,\n}\nuse NodeStateRoad::*;\n\nuse rand_pcg::Pcg64Mcg;\n\nimpl NodeStateRoad {\n    const ROOT: Self = West;\n\n    /// What state comes after this state, from a given side?\n    fn child(self, side: Side) -> Self {\n        match (self, side) {\n            (East, Side::B) => West,\n            (West, Side::B) => East,\n            (East, _) if !side.adjacent_to(Side::B) => DeepEast,\n            (West, _) if !side.adjacent_to(Side::B) => DeepWest,\n            _ => self,\n        }\n    }\n}\n\n/// Contains a minimal amount of information about a node that can be deduced entirely from\n/// the NodeState of its parents.\npub struct PartialNodeState {\n    /// This becomes a real horosphere only if it doesn't interfere with another higher-priority horosphere.\n    /// See `HorosphereNode::has_priority` for the definition of priority.\n    candidate_horosphere: Option<HorosphereNode>,\n}\n\nimpl PartialNodeState {\n    pub fn new(graph: &Graph, node: NodeId) -> Self {\n        Self {\n            candidate_horosphere: HorosphereNode::new(graph, node),\n        }\n    }\n}\n\n/// Contains all information about a node used for world generation. Most world\n/// generation logic uses this information as a starting point. The `NodeState` is deduced\n/// from the `NodeState` of the node's parents, along with the `PartialNodeState` of the node\n/// itself and its \"peer\" nodes (See `peer_traverser`).\npub struct NodeState {\n    kind: NodeStateKind,\n    surface: Plane,\n    road_state: NodeStateRoad,\n    enviro: EnviroFactors,\n    horosphere: Option<HorosphereNode>,\n}\nimpl NodeState {\n    pub fn new(graph: &Graph, node: NodeId) -> Self {\n        let mut parents = graph\n            .parents(node)\n            .map(|(s, n)| ParentInfo {\n                node_id: n,\n                side: s,\n                node_state: graph.node_state(n),\n            })\n            .fuse();\n        let parents = [parents.next(), parents.next(), parents.next()];\n\n        let enviro = match (parents[0], parents[1]) {\n            (None, None) => EnviroFactors {\n                max_elevation: 0.0,\n                temperature: 0.0,\n                rainfall: 0.0,\n                blockiness: 0.0,\n            },\n            (Some(parent), None) => {\n                let spice = graph.hash_of(node) as u64;\n                EnviroFactors::varied_from(parent.node_state.enviro, spice)\n            }\n            (Some(parent_a), Some(parent_b)) => {\n                let ab_node = graph.neighbor(parent_a.node_id, parent_b.side).unwrap();\n                let ab_state = &graph.node_state(ab_node);\n                EnviroFactors::continue_from(\n                    parent_a.node_state.enviro,\n                    parent_b.node_state.enviro,\n                    ab_state.enviro,\n                )\n            }\n            _ => unreachable!(),\n        };\n\n        let kind = parents[0].map_or(NodeStateKind::ROOT, |p| p.node_state.kind.child(p.side));\n        let road_state = parents[0].map_or(NodeStateRoad::ROOT, |p| {\n            p.node_state.road_state.child(p.side)\n        });\n\n        let horosphere = graph\n            .partial_node_state(node)\n            .candidate_horosphere\n            .filter(|h| h.should_generate(graph, node));\n\n        Self {\n            kind,\n            surface: match kind {\n                Land => Plane::from(Side::A),\n                Sky => -Plane::from(Side::A),\n                _ => parents[0].map(|p| p.side * p.node_state.surface).unwrap(),\n            },\n            road_state,\n            enviro,\n            horosphere,\n        }\n    }\n\n    pub fn up_direction(&self) -> MVector<f32> {\n        *self.surface.scaled_normal()\n    }\n}\n\n#[derive(Clone, Copy)]\nstruct ParentInfo<'a> {\n    node_id: NodeId,\n    side: Side,\n    node_state: &'a NodeState,\n}\n\nstruct VoxelCoords {\n    counter: u32,\n    dimension: u8,\n}\n\nimpl VoxelCoords {\n    fn new(dimension: u8) -> Self {\n        VoxelCoords {\n            counter: 0,\n            dimension,\n        }\n    }\n}\n\nimpl Iterator for VoxelCoords {\n    type Item = (u8, u8, u8);\n\n    fn next(&mut self) -> Option<Self::Item> {\n        let dim = u32::from(self.dimension);\n\n        if self.counter == dim.pow(3) {\n            return None;\n        }\n\n        let result = (\n            (self.counter / dim.pow(2)) as u8,\n            ((self.counter / dim) % dim) as u8,\n            (self.counter % dim) as u8,\n        );\n\n        self.counter += 1;\n        Some(result)\n    }\n}\n\n/// Data needed to generate a chunk\npub struct ChunkParams {\n    /// Number of voxels along an edge\n    dimension: u8,\n    /// Which vertex of the containing node this chunk lies against\n    chunk: Vertex,\n    /// Random quantities stored at the eight adjacent nodes, used for terrain generation\n    env: ChunkIncidentEnviroFactors,\n    /// Reference plane for the terrain surface\n    surface: Plane,\n    /// Whether this chunk contains a segment of the road\n    is_road: bool,\n    /// Whether this chunk contains a section of the road's supports\n    is_road_support: bool,\n    /// Random quantity used to seed terrain gen\n    node_spice: u64,\n    /// Horosphere to place in the chunk\n    horosphere: Option<HorosphereChunk>,\n}\n\nimpl ChunkParams {\n    /// Extract data necessary to generate a chunk, generating new graph nodes if necessary\n    pub fn new(graph: &mut Graph, chunk: ChunkId) -> Self {\n        graph.ensure_node_state(chunk.node);\n        let env = chunk_incident_enviro_factors(graph, chunk);\n        let state = graph.node_state(chunk.node);\n        Self {\n            dimension: graph.layout().dimension(),\n            chunk: chunk.vertex,\n            env,\n            surface: state.surface,\n            is_road: state.kind == Sky\n                && ((state.road_state == East) || (state.road_state == West)),\n            is_road_support: ((state.kind == Land) || (state.kind == DeepLand))\n                && ((state.road_state == East) || (state.road_state == West)),\n            node_spice: graph.hash_of(chunk.node) as u64,\n            horosphere: state\n                .horosphere\n                .as_ref()\n                .map(|h| HorosphereChunk::new(h, chunk.vertex)),\n        }\n    }\n\n    pub fn chunk(&self) -> Vertex {\n        self.chunk\n    }\n\n    /// Generate voxels making up the chunk\n    pub fn generate_voxels(&self) -> VoxelData {\n        let mut voxels = VoxelData::Solid(Material::Void);\n        let mut rng = rand_pcg::Pcg64Mcg::seed_from_u64(hash(self.node_spice, self.chunk as u64));\n\n        self.generate_terrain(&mut voxels, &mut rng);\n\n        if let Some(horosphere) = &self.horosphere {\n            horosphere.generate(&mut voxels, self.dimension);\n        }\n\n        if self.is_road {\n            self.generate_road(&mut voxels);\n        } else if self.is_road_support {\n            self.generate_road_support(&mut voxels);\n        }\n\n        // TODO: Don't generate detailed data for solid chunks with no neighboring voids\n\n        self.generate_trees(&mut voxels, &mut rng);\n\n        margins::initialize_margins(self.dimension, &mut voxels);\n        voxels\n    }\n\n    /// Performs all terrain generation that can be done one voxel at a time and with\n    /// only the containing chunk's surrounding nodes' envirofactors.\n    fn generate_terrain(&self, voxels: &mut VoxelData, rng: &mut Pcg64Mcg) {\n        // Determine whether this chunk might contain a boundary between solid and void\n        let mut me_min = self.env.max_elevations[0];\n        let mut me_max = self.env.max_elevations[0];\n        for &me in &self.env.max_elevations[1..] {\n            me_min = me_min.min(me);\n            me_max = me_max.max(me);\n        }\n        // Maximum difference between elevations at the center of a chunk and any other point in the chunk\n        // TODO: Compute what this actually is, current value is a guess! Real one must be > 0.6\n        // empirically.\n        const ELEVATION_MARGIN: f32 = 0.7;\n        let center_elevation = self\n            .surface\n            .distance_to_chunk(self.chunk, &na::Vector3::repeat(0.5));\n        if center_elevation - ELEVATION_MARGIN > me_max / TERRAIN_SMOOTHNESS {\n            // The whole chunk is above ground\n            *voxels = VoxelData::Solid(Material::Void);\n            return;\n        }\n        if center_elevation + ELEVATION_MARGIN < me_min / TERRAIN_SMOOTHNESS && !self.is_road {\n            // The whole chunk is underground\n            *voxels = VoxelData::Solid(Material::Dirt);\n            return;\n        }\n\n        // Otherwise, the chunk might contain a solid/void boundary, so the full terrain generation\n        // code should run.\n        let normal = Normal::new(0.0, 0.03).unwrap();\n\n        for (x, y, z) in VoxelCoords::new(self.dimension) {\n            let coords = na::Vector3::new(x, y, z);\n            let center = voxel_center(self.dimension, coords);\n            let trilerp_coords = center.map(|x| (1.0 - x) * 0.5);\n\n            let rain = trilerp(&self.env.rainfalls, trilerp_coords) + rng.sample(normal);\n            let temp = trilerp(&self.env.temperatures, trilerp_coords) + rng.sample(normal);\n\n            // elev is calculated in multiple steps. The initial value elev_pre_terracing\n            // is used to calculate elev_pre_noise which is used to calculate elev.\n            let elev_pre_terracing = trilerp(&self.env.max_elevations, trilerp_coords);\n            let block = trilerp(&self.env.blockinesses, trilerp_coords);\n            let voxel_elevation = self.surface.distance_to_chunk(self.chunk, &center);\n            let strength = 0.4 / (1.0 + math::sqr(voxel_elevation));\n            let terracing_small = terracing_diff(elev_pre_terracing, block, 5.0, strength, 2.0);\n            let terracing_big = terracing_diff(elev_pre_terracing, block, 15.0, strength, -1.0);\n            // Small and big terracing effects must not sum to more than 1,\n            // otherwise the terracing fails to be (nonstrictly) monotonic\n            // and the terrain gets trenches ringing around its cliffs.\n            let elev_pre_noise = elev_pre_terracing + 0.6 * terracing_small + 0.4 * terracing_big;\n\n            // initial value dist_pre_noise is the difference between the voxel's distance\n            // from the guiding plane and the voxel's calculated elev value. It represents\n            // how far from the terrain surface a voxel is.\n            let dist_pre_noise = elev_pre_noise / TERRAIN_SMOOTHNESS - voxel_elevation;\n\n            // adding noise allows interfaces between strata to be rough\n            let elev = elev_pre_noise + TERRAIN_SMOOTHNESS * rng.sample(normal);\n\n            // Final value of dist is calculated in this roundabout way for greater control\n            // over how noise in elev affects dist.\n            let dist = if dist_pre_noise > 0.0 {\n                // The .max(0.0) keeps the top of the ground smooth\n                // while still allowing the surface/general terrain interface to be rough\n                (elev / TERRAIN_SMOOTHNESS - voxel_elevation).max(0.0)\n            } else {\n                // Distance not updated for updated elevation if distance was originally\n                // negative. This ensures that no voxels that would have otherwise\n                // been void are changed to a material---so no floating dirt blocks.\n                dist_pre_noise\n            };\n\n            if dist >= 0.0 {\n                let voxel_mat = VoronoiInfo::terraingen_voronoi(elev, rain, temp, dist);\n                voxels.data_mut(self.dimension)[index(self.dimension, coords)] = voxel_mat;\n            }\n        }\n    }\n\n    /// Places a road along the guiding plane.\n    fn generate_road(&self, voxels: &mut VoxelData) {\n        let plane = -Plane::from(Side::B);\n\n        for (x, y, z) in VoxelCoords::new(self.dimension) {\n            let coords = na::Vector3::new(x, y, z);\n            let center = voxel_center(self.dimension, coords);\n            let horizontal_distance = plane.distance_to_chunk(self.chunk, &center);\n            let elevation = self.surface.distance_to_chunk(self.chunk, &center);\n\n            if horizontal_distance > 0.3 || elevation > 0.9 {\n                continue;\n            }\n\n            let mut mat: Material = Material::Void;\n\n            if elevation < 0.075 {\n                if horizontal_distance < 0.15 {\n                    // Inner\n                    mat = Material::WhiteBrick;\n                } else {\n                    // Outer\n                    mat = Material::GreyBrick;\n                }\n            }\n\n            voxels.data_mut(self.dimension)[index(self.dimension, coords)] = mat;\n        }\n    }\n\n    /// Fills the half-plane below the road with wooden supports.\n    fn generate_road_support(&self, voxels: &mut VoxelData) {\n        if voxels.is_solid() && voxels.get(0) != Material::Void {\n            // There is guaranteed no void to fill with the road supports, so\n            // nothing to do here.\n            return;\n        }\n\n        let plane = -Plane::from(Side::B);\n\n        for (x, y, z) in VoxelCoords::new(self.dimension) {\n            let coords = na::Vector3::new(x, y, z);\n            let center = voxel_center(self.dimension, coords);\n            let horizontal_distance = plane.distance_to_chunk(self.chunk, &center);\n\n            if horizontal_distance > 0.3 {\n                continue;\n            }\n\n            let mat = if self.trussing_at(coords) {\n                Material::WoodPlanks\n            } else {\n                Material::Void\n            };\n\n            if mat != Material::Void {\n                voxels.data_mut(self.dimension)[index(self.dimension, coords)] = mat;\n            }\n        }\n    }\n\n    /// Make a truss-shaped template\n    fn trussing_at(&self, coords: na::Vector3<u8>) -> bool {\n        // Generates planar diagonals, but corner is offset\n        let mut criteria_met = 0_u32;\n        let x = coords[0];\n        let y = coords[1];\n        let z = coords[2];\n        let offset = self.dimension / 3;\n\n        // straight lines.\n        criteria_met += u32::from(x == offset);\n        criteria_met += u32::from(y == offset);\n        criteria_met += u32::from(z == offset);\n\n        // main diagonal\n        criteria_met += u32::from(x == y);\n        criteria_met += u32::from(y == z);\n        criteria_met += u32::from(x == z);\n\n        criteria_met >= 2\n    }\n\n    /// Plants trees on dirt and grass. Trees consist of a block of wood\n    /// and a block of leaves. The leaf block is on the opposite face of the\n    /// wood block as the ground block.\n    fn generate_trees(&self, voxels: &mut VoxelData, rng: &mut Pcg64Mcg) {\n        if voxels.is_solid() {\n            // No trees can be generated unless there's both land and air.\n            return;\n        }\n\n        if self.dimension <= 4 {\n            // The tree generation algorithm can crash when the chunk size is too small.\n            return;\n        }\n\n        // margins are added to keep voxels outside the chunk from being read/written\n        let random_position = Uniform::new(1, self.dimension - 1).unwrap();\n\n        let rain = self.env.rainfalls[0];\n        let tree_candidate_count =\n            (u32::from(self.dimension - 2).pow(3) as f32 * (rain / 100.0).clamp(0.0, 0.5)) as usize;\n        for _ in 0..tree_candidate_count {\n            let loc = na::Vector3::from_fn(|_, _| rng.sample(random_position));\n            let voxel_of_interest_index = index(self.dimension, loc);\n            let neighbor_data = self.voxel_neighbors(loc, voxels);\n\n            let num_void_neighbors = neighbor_data\n                .iter()\n                .filter(|n| n.material == Material::Void)\n                .count();\n\n            // Only plant a tree if there is exactly one adjacent block of dirt or grass\n            if num_void_neighbors == 5 {\n                for i in neighbor_data.iter() {\n                    if (i.material == Material::Dirt)\n                        || (i.material == Material::Grass)\n                        || (i.material == Material::MudGrass)\n                        || (i.material == Material::LushGrass)\n                        || (i.material == Material::TanGrass)\n                        || (i.material == Material::CoarseGrass)\n                    {\n                        voxels.data_mut(self.dimension)[voxel_of_interest_index] = Material::Wood;\n                        let leaf_location = index(self.dimension, i.coords_opposing);\n                        voxels.data_mut(self.dimension)[leaf_location] = Material::Leaves;\n                    }\n                }\n            }\n        }\n    }\n\n    /// Provides information on the type of material in a voxel's six neighbours\n    fn voxel_neighbors(&self, coords: na::Vector3<u8>, voxels: &VoxelData) -> [NeighborData; 6] {\n        [\n            self.neighbor(coords, -1, 0, 0, voxels),\n            self.neighbor(coords, 1, 0, 0, voxels),\n            self.neighbor(coords, 0, -1, 0, voxels),\n            self.neighbor(coords, 0, 1, 0, voxels),\n            self.neighbor(coords, 0, 0, -1, voxels),\n            self.neighbor(coords, 0, 0, 1, voxels),\n        ]\n    }\n\n    fn neighbor(\n        &self,\n        w: na::Vector3<u8>,\n        x: i8,\n        y: i8,\n        z: i8,\n        voxels: &VoxelData,\n    ) -> NeighborData {\n        let coords = na::Vector3::new(\n            (w.x as i8 + x) as u8,\n            (w.y as i8 + y) as u8,\n            (w.z as i8 + z) as u8,\n        );\n        let coords_opposing = na::Vector3::new(\n            (w.x as i8 - x) as u8,\n            (w.y as i8 - y) as u8,\n            (w.z as i8 - z) as u8,\n        );\n        let material = voxels.get(index(self.dimension, coords));\n\n        NeighborData {\n            coords_opposing,\n            material,\n        }\n    }\n}\n\nconst TERRAIN_SMOOTHNESS: f32 = 10.0;\n\nstruct NeighborData {\n    coords_opposing: na::Vector3<u8>,\n    material: Material,\n}\n\n#[derive(Copy, Clone)]\nstruct EnviroFactors {\n    max_elevation: f32,\n    temperature: f32,\n    rainfall: f32,\n    blockiness: f32,\n}\nimpl EnviroFactors {\n    fn varied_from(parent: Self, spice: u64) -> Self {\n        let mut rng = rand_pcg::Pcg64Mcg::seed_from_u64(spice);\n        let unif = Uniform::new_inclusive(-1.0, 1.0).unwrap();\n        let max_elevation = parent.max_elevation + rng.sample(Normal::new(0.0, 4.0).unwrap());\n\n        Self {\n            max_elevation,\n            temperature: parent.temperature + rng.sample(unif),\n            rainfall: parent.rainfall + rng.sample(unif),\n            blockiness: parent.blockiness + rng.sample(unif),\n        }\n    }\n    fn continue_from(a: Self, b: Self, ab: Self) -> Self {\n        Self {\n            max_elevation: a.max_elevation + (b.max_elevation - ab.max_elevation),\n            temperature: a.temperature + (b.temperature - ab.temperature),\n            rainfall: a.rainfall + (b.rainfall - ab.rainfall),\n            blockiness: a.blockiness + (b.blockiness - ab.blockiness),\n        }\n    }\n}\nimpl From<EnviroFactors> for (f32, f32, f32, f32) {\n    fn from(envirofactors: EnviroFactors) -> Self {\n        (\n            envirofactors.max_elevation,\n            envirofactors.temperature,\n            envirofactors.rainfall,\n            envirofactors.blockiness,\n        )\n    }\n}\nstruct ChunkIncidentEnviroFactors {\n    max_elevations: [f32; 8],\n    temperatures: [f32; 8],\n    rainfalls: [f32; 8],\n    blockinesses: [f32; 8],\n}\n\n/// Returns the max_elevation values for the nodes that are incident to this chunk,\n/// sorted and converted to f32 for use in functions like trilerp.\n///\n/// Returns `None` if not all incident nodes are populated.\nfn chunk_incident_enviro_factors(graph: &mut Graph, chunk: ChunkId) -> ChunkIncidentEnviroFactors {\n    let mut i = chunk.vertex.dual_vertices().map(|(_, path)| {\n        let node = path.fold(chunk.node, |node, side| graph.ensure_neighbor(node, side));\n        graph.ensure_node_state(node);\n        graph.node_state(node).enviro\n    });\n\n    // this is a bit cursed, but I don't want to collect into a vec because perf,\n    // and I can't just return an iterator because then something still references graph.\n    let (e1, t1, r1, b1) = i.next().unwrap().into();\n    let (e2, t2, r2, b2) = i.next().unwrap().into();\n    let (e3, t3, r3, b3) = i.next().unwrap().into();\n    let (e4, t4, r4, b4) = i.next().unwrap().into();\n    let (e5, t5, r5, b5) = i.next().unwrap().into();\n    let (e6, t6, r6, b6) = i.next().unwrap().into();\n    let (e7, t7, r7, b7) = i.next().unwrap().into();\n    let (e8, t8, r8, b8) = i.next().unwrap().into();\n\n    ChunkIncidentEnviroFactors {\n        max_elevations: [e1, e2, e3, e4, e5, e6, e7, e8],\n        temperatures: [t1, t2, t3, t4, t5, t6, t7, t8],\n        rainfalls: [r1, r2, r3, r4, r5, r6, r7, r8],\n        blockinesses: [b1, b2, b3, b4, b5, b6, b7, b8],\n    }\n}\n\n/// Linearly interpolate at interior and boundary of a cube given values at the eight corners.\nfn trilerp<N: na::RealField + Copy>(\n    &[v000, v001, v010, v011, v100, v101, v110, v111]: &[N; 8],\n    t: na::Vector3<N>,\n) -> N {\n    fn lerp<N: na::RealField + Copy>(v0: N, v1: N, t: N) -> N {\n        v0 * (N::one() - t) + v1 * t\n    }\n    fn bilerp<N: na::RealField + Copy>(v00: N, v01: N, v10: N, v11: N, t: na::Vector2<N>) -> N {\n        lerp(lerp(v00, v01, t.x), lerp(v10, v11, t.x), t.y)\n    }\n\n    lerp(\n        bilerp(v000, v100, v010, v110, t.xy()),\n        bilerp(v001, v101, v011, v111, t.xy()),\n        t.z,\n    )\n}\n\n/// serp interpolates between two values v0 and v1 over the interval [0, 1] by yielding\n/// v0 for [0, threshold], v1 for [1-threshold, 1], and linear interpolation in between\n/// such that the overall shape is an S-shaped piecewise function.\n/// threshold should be between 0 and 0.5.\nfn serp<N: na::RealField + Copy>(v0: N, v1: N, t: N, threshold: N) -> N {\n    if t < threshold {\n        v0\n    } else if t < (N::one() - threshold) {\n        let s = (t - threshold) / ((N::one() - threshold) - threshold);\n        v0 * (N::one() - s) + v1 * s\n    } else {\n        v1\n    }\n}\n\n/// Intended to produce a number that is added to elev_raw.\n/// block is a real number, threshold is in (0, strength) via a logistic function\n/// scale controls wavelength and amplitude. It is not 1:1 to the number of blocks in a period.\n/// strength represents extremity of terracing effect. Sensible values are in (0, 0.5).\n/// The greater the value of limiter, the stronger the bias of threshold towards 0.\nfn terracing_diff(elev_raw: f32, block: f32, scale: f32, strength: f32, limiter: f32) -> f32 {\n    let threshold: f32 = strength / (1.0 + libm::powf(2.0, limiter - block));\n    let elev_floor = libm::floorf(elev_raw / scale);\n    let elev_rem = elev_raw / scale - elev_floor;\n    scale * elev_floor + serp(0.0, scale, elev_rem, threshold) - elev_raw\n}\n\n/// Location of the center of a voxel in a unit chunk\nfn voxel_center(dimension: u8, voxel: na::Vector3<u8>) -> na::Vector3<f32> {\n    voxel.map(|x| f32::from(x) + 0.5) / f32::from(dimension)\n}\n\nfn index(dimension: u8, v: na::Vector3<u8>) -> usize {\n    let v = v.map(|x| usize::from(x) + 1);\n\n    // LWM = Length (of cube sides) With Margins\n    let lwm = usize::from(dimension) + 2;\n    v.x + v.y * lwm + v.z * lwm.pow(2)\n}\n\nfn hash(a: u64, b: u64) -> u64 {\n    use std::ops::BitXor;\n    a.rotate_left(5)\n        .bitxor(b)\n        .wrapping_mul(0x517c_c1b7_2722_0a95)\n}\n\n#[cfg(test)]\nmod test {\n    use super::*;\n    use approx::*;\n\n    const CHUNK_SIZE: u8 = 12;\n\n    #[test]\n    fn chunk_indexing_origin() {\n        // (0, 0, 0) in localized coords\n        let origin_index = 1 + (usize::from(CHUNK_SIZE) + 2) + (usize::from(CHUNK_SIZE) + 2).pow(2);\n\n        // simple sanity check\n        assert_eq!(index(CHUNK_SIZE, na::Vector3::repeat(0)), origin_index);\n    }\n\n    #[test]\n    fn chunk_indexing_absolute() {\n        let origin_index = 1 + (usize::from(CHUNK_SIZE) + 2) + (usize::from(CHUNK_SIZE) + 2).pow(2);\n        // (0.5, 0.5, 0.5) in localized coords\n        let center_index = index(CHUNK_SIZE, na::Vector3::repeat(CHUNK_SIZE / 2));\n        // the point farthest from the origin, (1, 1, 1) in localized coords\n        let anti_index = index(CHUNK_SIZE, na::Vector3::repeat(CHUNK_SIZE));\n\n        assert_eq!(index(CHUNK_SIZE, na::Vector3::new(0, 0, 0)), origin_index);\n\n        // biggest possible index in subchunk closest to origin still isn't the center\n        assert!(\n            index(\n                CHUNK_SIZE,\n                na::Vector3::new(CHUNK_SIZE / 2 - 1, CHUNK_SIZE / 2 - 1, CHUNK_SIZE / 2 - 1,)\n            ) < center_index\n        );\n        // but the first chunk in the subchunk across from that is\n        assert_eq!(\n            index(\n                CHUNK_SIZE,\n                na::Vector3::new(CHUNK_SIZE / 2, CHUNK_SIZE / 2, CHUNK_SIZE / 2)\n            ),\n            center_index\n        );\n\n        // biggest possible index in subchunk closest to anti_origin is still not quite\n        // the anti_origin\n        assert!(\n            index(\n                CHUNK_SIZE,\n                na::Vector3::new(CHUNK_SIZE - 1, CHUNK_SIZE - 1, CHUNK_SIZE - 1,)\n            ) < anti_index\n        );\n\n        // one is added in the chunk indexing so this works out fine, the\n        // domain is still CHUNK_SIZE because 0 is included.\n        assert_eq!(\n            index(\n                CHUNK_SIZE,\n                na::Vector3::new(CHUNK_SIZE - 1, CHUNK_SIZE - 1, CHUNK_SIZE - 1,)\n            ),\n            index(CHUNK_SIZE, na::Vector3::repeat(CHUNK_SIZE - 1))\n        );\n    }\n\n    #[test]\n    fn check_chunk_incident_max_elevations() {\n        let mut g = Graph::new(1);\n        for (i, path) in Vertex::A.dual_vertices().map(|(_, p)| p).enumerate() {\n            let new_node = path.fold(NodeId::ROOT, |node, side| g.ensure_neighbor(node, side));\n\n            // assigning state\n            g.ensure_node_state(new_node);\n            g[new_node].state.as_mut().unwrap().enviro.max_elevation = i as f32 + 1.0;\n        }\n\n        let enviros = chunk_incident_enviro_factors(&mut g, ChunkId::new(NodeId::ROOT, Vertex::A));\n        for (i, max_elevation) in enviros.max_elevations.into_iter().enumerate() {\n            println!(\"{i}, {max_elevation}\");\n            assert_abs_diff_eq!(max_elevation, (i + 1) as f32, epsilon = 1e-8);\n        }\n\n        // see corresponding test for trilerp\n        let center_max_elevation = trilerp(&enviros.max_elevations, na::Vector3::repeat(0.5));\n        assert_abs_diff_eq!(center_max_elevation, 4.5, epsilon = 1e-8);\n\n        let mut checked_center = false;\n        let center = na::Vector3::repeat(CHUNK_SIZE / 2);\n        'top: for z in 0..CHUNK_SIZE {\n            for y in 0..CHUNK_SIZE {\n                for x in 0..CHUNK_SIZE {\n                    let a = na::Vector3::new(x, y, z);\n                    if a == center {\n                        checked_center = true;\n                        let c = center.map(|x| x as f32) / CHUNK_SIZE as f32;\n                        let center_max_elevation = trilerp(&enviros.max_elevations, c);\n                        assert_abs_diff_eq!(center_max_elevation, 4.5, epsilon = 1e-8);\n                        break 'top;\n                    }\n                }\n            }\n        }\n\n        if !checked_center {\n            panic!(\"Never checked trilerping center max_elevation!\");\n        }\n    }\n\n    #[test]\n    fn check_trilerp() {\n        assert_abs_diff_eq!(\n            1.0,\n            trilerp(\n                &[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n                na::Vector3::new(0.0, 0.0, 0.0),\n            ),\n            epsilon = 1e-8,\n        );\n        assert_abs_diff_eq!(\n            1.0,\n            trilerp(\n                &[0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],\n                na::Vector3::new(1.0, 0.0, 0.0),\n            ),\n            epsilon = 1e-8,\n        );\n        assert_abs_diff_eq!(\n            1.0,\n            trilerp(\n                &[0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n                na::Vector3::new(0.0, 1.0, 0.0),\n            ),\n            epsilon = 1e-8,\n        );\n        assert_abs_diff_eq!(\n            1.0,\n            trilerp(\n                &[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0],\n                na::Vector3::new(1.0, 1.0, 0.0),\n            ),\n            epsilon = 1e-8,\n        );\n        assert_abs_diff_eq!(\n            1.0,\n            trilerp(\n                &[0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n                na::Vector3::new(0.0, 0.0, 1.0),\n            ),\n            epsilon = 1e-8,\n        );\n        assert_abs_diff_eq!(\n            1.0,\n            trilerp(\n                &[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0],\n                na::Vector3::new(1.0, 0.0, 1.0),\n            ),\n            epsilon = 1e-8,\n        );\n        assert_abs_diff_eq!(\n            1.0,\n            trilerp(\n                &[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0],\n                na::Vector3::new(0.0, 1.0, 1.0),\n            ),\n            epsilon = 1e-8,\n        );\n        assert_abs_diff_eq!(\n            1.0,\n            trilerp(\n                &[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],\n                na::Vector3::new(1.0, 1.0, 1.0),\n            ),\n            epsilon = 1e-8,\n        );\n\n        assert_abs_diff_eq!(\n            0.5,\n            trilerp(\n                &[0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0],\n                na::Vector3::new(0.5, 0.5, 0.5),\n            ),\n            epsilon = 1e-8,\n        );\n        assert_abs_diff_eq!(\n            0.5,\n            trilerp(\n                &[0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],\n                na::Vector3::new(0.5, 0.5, 0.5),\n            ),\n            epsilon = 1e-8,\n        );\n\n        assert_abs_diff_eq!(\n            4.5,\n            trilerp(\n                &[1.0, 5.0, 3.0, 7.0, 2.0, 6.0, 4.0, 8.0],\n                na::Vector3::new(0.5, 0.5, 0.5),\n            ),\n            epsilon = 1e-8,\n        );\n    }\n\n    #[test]\n    fn check_voxel_iterable() {\n        let dimension = 12;\n\n        for (counter, (x, y, z)) in (VoxelCoords::new(dimension as u8)).enumerate() {\n            let index = z as usize + y as usize * dimension + x as usize * dimension.pow(2);\n            assert!(counter == index);\n        }\n    }\n}\n"
  },
  {
    "path": "common/src/worldgen/plane.rs",
    "content": "use std::ops::{Mul, Neg};\n\nuse crate::{\n    dodeca::{Side, Vertex},\n    math::{MDirection, MIsometry, MPoint, MVector},\n};\n\n/// A hyperbolic plane. This data structure uses a separate \"exponent\" field to\n/// allow for planes very far from the origin. This struct is meant to be used\n/// with world generation.\n#[derive(Debug, Copy, Clone)]\npub struct Plane {\n    scaled_normal: MVector<f32>,\n    exponent: f32, // Multiply \"normal\" by e^exponent to get the actual normal vector\n}\n\nimpl From<Side> for Plane {\n    /// A surface overlapping with a particular dodecahedron side\n    fn from(side: Side) -> Self {\n        Plane::from(*side.normal())\n    }\n}\n\nimpl From<MDirection<f32>> for Plane {\n    fn from(normal: MDirection<f32>) -> Self {\n        Plane {\n            scaled_normal: normal.into(),\n            exponent: 0.0,\n        }\n    }\n}\n\nimpl From<na::Unit<na::Vector3<f32>>> for Plane {\n    /// A plane passing through the origin\n    fn from(x: na::UnitVector3<f32>) -> Self {\n        Self::from(MDirection::from(x))\n    }\n}\n\nimpl Neg for Plane {\n    type Output = Self;\n    fn neg(self) -> Self {\n        Self {\n            scaled_normal: -self.scaled_normal,\n            exponent: self.exponent,\n        }\n    }\n}\n\nimpl Mul<Plane> for Side {\n    type Output = Plane;\n    /// Reflect a plane across the side\n    fn mul(self, rhs: Plane) -> Plane {\n        self.reflection() * rhs\n    }\n}\n\nimpl Mul<Plane> for &MIsometry<f32> {\n    type Output = Plane;\n    fn mul(self, rhs: Plane) -> Plane {\n        Plane {\n            scaled_normal: self * rhs.scaled_normal,\n            exponent: rhs.exponent,\n        }\n        .update_exponent()\n    }\n}\n\nimpl Plane {\n    /// Hyperbolic normal vector identifying the plane, possibly scaled to avoid\n    /// being too large to represent in an f32.\n    pub fn scaled_normal(&self) -> &MVector<f32> {\n        &self.scaled_normal\n    }\n\n    /// Shortest distance between the plane and a point\n    pub fn distance_to(&self, point: &MPoint<f32>) -> f32 {\n        if self.exponent == 0.0 {\n            libm::asinhf(self.scaled_normal.mip(point))\n        } else {\n            let mip_2 = self.scaled_normal.mip(point) * 2.0;\n            (libm::logf(mip_2.abs()) + self.exponent) * mip_2.signum()\n        }\n    }\n\n    /// Like `distance_to`, but using chunk coordinates for a chunk in the same node space\n    pub fn distance_to_chunk(&self, chunk: Vertex, coord: &na::Vector3<f32>) -> f32 {\n        let pos = (MVector::from(chunk.chunk_to_node() * coord.push(1.0))).normalized_point();\n        self.distance_to(&pos)\n    }\n\n    fn update_exponent(mut self) -> Self {\n        // For simplicity, we use the basic approximation of sinh whenever the\n        // exponent is nonzero, so we want to only update the exponent when\n        // we're sure such an approximation will be good enough. Once the\n        // vector's w-coordinate is above 1.0e8, the error becomes unnoticeable\n        // even with double-precision floating point. With single-precision,\n        // it's overkill, but there's no real downside to using a large value\n        // here.\n\n        // Note that this is a one-way operation. Since trying to use matrices\n        // to transform a plane closer to the origin would result in a kind of\n        // catastrophic cancellation, Plane is not designed to handle that kind\n        // of use case.\n        while self.scaled_normal.w.abs() > 1.0e8 {\n            self.scaled_normal *= libm::expf(-16.0);\n            self.exponent += 16.0;\n        }\n        self\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use approx::*;\n\n    #[test]\n    fn distance_sanity() {\n        for &axis in &[\n            na::Vector3::x_axis(),\n            na::Vector3::y_axis(),\n            na::Vector3::z_axis(),\n        ] {\n            for &distance in &[-1.5, 0.0, 1.5] {\n                let plane = Plane::from(axis);\n                assert_abs_diff_eq!(\n                    plane.distance_to(\n                        &(MIsometry::translation_along(&(axis.into_inner() * distance))\n                            * MPoint::origin())\n                    ),\n                    distance,\n                    epsilon = 1e-6\n                );\n            }\n        }\n    }\n\n    #[test]\n    fn check_surface_flipped() {\n        let root = Plane::from(Side::A);\n        assert_abs_diff_eq!(\n            root.distance_to_chunk(Vertex::A, &na::Vector3::new(-1.0, 1.0, 1.0)),\n            -root.distance_to_chunk(Vertex::J, &na::Vector3::new(-1.0, 1.0, 1.0)),\n            epsilon = 1e-5\n        );\n    }\n\n    #[test]\n    fn check_surface_on_plane() {\n        assert_abs_diff_eq!(\n            Plane::from(Side::A).distance_to_chunk(\n                Vertex::from_sides([Side::A, Side::B, Side::C]).unwrap(),\n                &na::Vector3::new(0.0, 0.7, 0.1), // The first 0.0 is important, the plane is the midplane of the cube in Side::A direction\n            ),\n            0.0,\n            epsilon = 1e-6,\n        );\n    }\n\n    #[test]\n    fn check_elevation_consistency() {\n        let abc = Vertex::from_sides([Side::A, Side::B, Side::C]).unwrap();\n\n        // A cube corner should have the same elevation seen from different cubes\n        assert_abs_diff_eq!(\n            Plane::from(Side::A).distance_to_chunk(abc, &na::Vector3::new(1.0, 1.0, 1.0)),\n            Plane::from(Side::A).distance_to_chunk(\n                Vertex::from_sides([Side::F, Side::H, Side::J]).unwrap(),\n                &na::Vector3::new(1.0, 1.0, 1.0),\n            ),\n            epsilon = 1e-6,\n        );\n\n        // The same corner should have the same distance_to_chunk when represented from the same cube at different corners\n        assert_abs_diff_eq!(\n            Plane::from(Side::A).distance_to_chunk(abc, &na::Vector3::new(0.0, 1.0, 1.0)),\n            (Side::A * Plane::from(Side::A))\n                .distance_to_chunk(abc, &na::Vector3::new(0.0, 1.0, 1.0),),\n            epsilon = 1e-6,\n        );\n\n        // Corners of midplane cubes separated by the midplane should have the same distance_to_chunk with a different sign\n        assert_abs_diff_eq!(\n            Plane::from(Side::A).distance_to_chunk(abc, &na::Vector3::new(1.0, 1.0, 1.0)),\n            -Plane::from(Side::A).distance_to_chunk(abc, &na::Vector3::new(-1.0, 1.0, 1.0)),\n            epsilon = 1e-6,\n        );\n\n        // Corners of midplane cubes not separated by the midplane should have the same distance_to_chunk\n        assert_abs_diff_eq!(\n            Plane::from(Side::A).distance_to_chunk(abc, &na::Vector3::new(1.0, 1.0, 1.0)),\n            Plane::from(Side::A).distance_to_chunk(abc, &na::Vector3::new(1.0, 1.0, -1.0)),\n            epsilon = 1e-6,\n        );\n    }\n\n    #[test]\n    fn large_distances() {\n        for offset in [10.0, -10.0] {\n            let mut plane = Plane::from(MDirection::x());\n            let point = MPoint::<f32>::origin();\n            let mut expected_distance = 0.0;\n\n            for _ in 0..200 {\n                plane = &MIsometry::translation_along(&(na::Vector3::x() * offset)) * plane;\n                expected_distance -= offset;\n                assert_abs_diff_eq!(\n                    plane.distance_to(&point),\n                    expected_distance,\n                    epsilon = 1.0e-6\n                );\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "common/src/worldgen/terraingen.rs",
    "content": "use crate::{math, world::Material};\n\nconst GENERAL_HIGH: [VoronoiInfo; 113] = [\n    VoronoiInfo::new(Material::ClayLoam, 10.50, -10.50),\n    VoronoiInfo::new(Material::ClayLoam, 10.50, -7.50),\n    VoronoiInfo::new(Material::SiltyLoam, 10.50, -4.50),\n    VoronoiInfo::new(Material::Mud, 10.50, -1.50),\n    VoronoiInfo::new(Material::Mud, 10.50, 1.50),\n    VoronoiInfo::new(Material::SandyLoam, 10.50, 4.50),\n    VoronoiInfo::new(Material::SandyLoam, 10.50, 7.50),\n    VoronoiInfo::new(Material::SandyLoam, 10.50, 10.50),\n    VoronoiInfo::new(Material::ClayLoam, 9.00, -9.00),\n    VoronoiInfo::new(Material::ClayLoam, 9.00, -6.00),\n    VoronoiInfo::new(Material::SiltyLoam, 9.00, -3.00),\n    VoronoiInfo::new(Material::Mud, 9.00, 0.00),\n    VoronoiInfo::new(Material::Dirt, 9.00, 3.00),\n    VoronoiInfo::new(Material::SandyLoam, 9.00, 6.00),\n    VoronoiInfo::new(Material::SandyLoam, 9.00, 9.00),\n    VoronoiInfo::new(Material::Clay, 7.50, -10.50),\n    VoronoiInfo::new(Material::Clay, 7.50, -7.50),\n    VoronoiInfo::new(Material::SiltyLoam, 7.50, -4.50),\n    VoronoiInfo::new(Material::SiltyLoam, 7.50, -1.50),\n    VoronoiInfo::new(Material::Mud, 7.50, 1.50),\n    VoronoiInfo::new(Material::SandyLoam, 7.50, 4.50),\n    VoronoiInfo::new(Material::SandyLoam, 7.50, 7.50),\n    VoronoiInfo::new(Material::SandyLoam, 7.50, 10.50),\n    VoronoiInfo::new(Material::Clay, 6.00, -9.00),\n    VoronoiInfo::new(Material::Clay, 6.00, -6.00),\n    VoronoiInfo::new(Material::SiltyLoam, 6.00, -3.00),\n    VoronoiInfo::new(Material::Mud, 6.00, 0.00),\n    VoronoiInfo::new(Material::SandyLoam, 6.00, 3.00),\n    VoronoiInfo::new(Material::SandyLoam, 6.00, 6.00),\n    VoronoiInfo::new(Material::SandyLoam, 6.00, 9.00),\n    VoronoiInfo::new(Material::Clay, 4.50, -10.50),\n    VoronoiInfo::new(Material::Clay, 4.50, -7.50),\n    VoronoiInfo::new(Material::Dirt, 4.50, -4.50),\n    VoronoiInfo::new(Material::SiltyLoam, 4.50, -1.50),\n    VoronoiInfo::new(Material::Dirt, 4.50, 1.50),\n    VoronoiInfo::new(Material::SandyLoam, 4.50, 4.50),\n    VoronoiInfo::new(Material::SandyLoam, 4.50, 7.50),\n    VoronoiInfo::new(Material::Sandstone, 4.50, 10.50),\n    VoronoiInfo::new(Material::Dolomite, 3.00, -9.00),\n    VoronoiInfo::new(Material::Gravel, 3.00, -6.00),\n    VoronoiInfo::new(Material::Dirt, 3.00, -3.00),\n    VoronoiInfo::new(Material::Dirt, 3.00, 0.00),\n    VoronoiInfo::new(Material::Dirt, 3.00, 3.00),\n    VoronoiInfo::new(Material::Sandstone, 3.00, 6.00),\n    VoronoiInfo::new(Material::Sandstone, 3.00, 9.00),\n    VoronoiInfo::new(Material::Dolomite, 1.50, -10.50),\n    VoronoiInfo::new(Material::Dolomite, 1.50, -7.50),\n    VoronoiInfo::new(Material::Gravel, 1.50, -4.50),\n    VoronoiInfo::new(Material::Dirt, 1.50, -1.50),\n    VoronoiInfo::new(Material::Dirt, 1.50, 1.50),\n    VoronoiInfo::new(Material::Dirt, 1.50, 4.50),\n    VoronoiInfo::new(Material::Sandstone, 1.50, 7.50),\n    VoronoiInfo::new(Material::Sandstone, 1.50, 10.50),\n    VoronoiInfo::new(Material::Dolomite, 0.00, -9.00),\n    VoronoiInfo::new(Material::Dolomite, 0.00, -6.00),\n    VoronoiInfo::new(Material::Dirt, 0.00, -3.00),\n    VoronoiInfo::new(Material::Dirt, 0.00, 0.00),\n    VoronoiInfo::new(Material::Dirt, 0.00, 3.00),\n    VoronoiInfo::new(Material::Sandstone, 0.00, 6.00),\n    VoronoiInfo::new(Material::Sandstone, 0.00, 9.00),\n    VoronoiInfo::new(Material::Ice, -1.50, -10.50),\n    VoronoiInfo::new(Material::Ice, -1.50, -7.50),\n    VoronoiInfo::new(Material::Gravel, -1.50, -4.50),\n    VoronoiInfo::new(Material::Dirt, -1.50, -1.50),\n    VoronoiInfo::new(Material::Sandstone, -1.50, 1.50),\n    VoronoiInfo::new(Material::Sandstone, -1.50, 4.50),\n    VoronoiInfo::new(Material::Sandstone, -1.50, 7.50),\n    VoronoiInfo::new(Material::Sandstone, -1.50, 10.50),\n    VoronoiInfo::new(Material::Ice, -3.00, -9.00),\n    VoronoiInfo::new(Material::Ice, -3.00, -6.00),\n    VoronoiInfo::new(Material::Gravel, -3.00, -3.00),\n    VoronoiInfo::new(Material::Dirt, -3.00, 0.00),\n    VoronoiInfo::new(Material::Sandstone, -3.00, 3.00),\n    VoronoiInfo::new(Material::Sandstone, -3.00, 6.00),\n    VoronoiInfo::new(Material::RedSandstone, -3.00, 9.00),\n    VoronoiInfo::new(Material::Ice, -4.50, -10.50),\n    VoronoiInfo::new(Material::Ice, -4.50, -7.50),\n    VoronoiInfo::new(Material::Ice, -4.50, -4.50),\n    VoronoiInfo::new(Material::Dirt, -4.50, -1.50),\n    VoronoiInfo::new(Material::Dirt, -4.50, 1.50),\n    VoronoiInfo::new(Material::Sandstone, -4.50, 4.50),\n    VoronoiInfo::new(Material::RedSandstone, -4.50, 7.50),\n    VoronoiInfo::new(Material::Sandstone, -4.50, 10.50),\n    VoronoiInfo::new(Material::Ice, -6.00, -9.00),\n    VoronoiInfo::new(Material::Ice, -6.00, -6.00),\n    VoronoiInfo::new(Material::Dirt, -6.00, -3.00),\n    VoronoiInfo::new(Material::Dirt, -6.00, 0.00),\n    VoronoiInfo::new(Material::Sandstone, -6.00, 3.00),\n    VoronoiInfo::new(Material::Sandstone, -6.00, 6.00),\n    VoronoiInfo::new(Material::RedSandstone, -6.00, 9.00),\n    VoronoiInfo::new(Material::Ice, -7.50, -10.50),\n    VoronoiInfo::new(Material::Ice, -7.50, -7.50),\n    VoronoiInfo::new(Material::Ice, -7.50, -4.50),\n    VoronoiInfo::new(Material::Dirt, -7.50, -1.50),\n    VoronoiInfo::new(Material::Dirt, -7.50, 1.50),\n    VoronoiInfo::new(Material::Sandstone, -7.50, 4.50),\n    VoronoiInfo::new(Material::RedSandstone, -7.50, 7.50),\n    VoronoiInfo::new(Material::RedSandstone, -7.50, 10.50),\n    VoronoiInfo::new(Material::Ice, -9.00, -9.00),\n    VoronoiInfo::new(Material::Ice, -9.00, -6.00),\n    VoronoiInfo::new(Material::Gravel, -9.00, -3.00),\n    VoronoiInfo::new(Material::Dirt, -9.00, 0.00),\n    VoronoiInfo::new(Material::Sandstone, -9.00, 3.00),\n    VoronoiInfo::new(Material::RedSandstone, -9.00, 6.00),\n    VoronoiInfo::new(Material::RedSandstone, -9.00, 9.00),\n    VoronoiInfo::new(Material::Ice, -10.50, -10.50),\n    VoronoiInfo::new(Material::Ice, -10.50, -7.50),\n    VoronoiInfo::new(Material::Gravel, -10.50, -4.50),\n    VoronoiInfo::new(Material::Dirt, -10.50, -1.50),\n    VoronoiInfo::new(Material::Dirt, -10.50, 1.50),\n    VoronoiInfo::new(Material::RedSandstone, -10.50, 4.50),\n    VoronoiInfo::new(Material::RedSandstone, -10.50, 7.50),\n    VoronoiInfo::new(Material::RedSandstone, -10.50, 10.50),\n];\n\nconst GENERAL_MED: [VoronoiInfo; 113] = [\n    VoronoiInfo::new(Material::Clay, 10.50, -10.50),\n    VoronoiInfo::new(Material::Clay, 10.50, -7.50),\n    VoronoiInfo::new(Material::SiltyLoam, 10.50, -4.50),\n    VoronoiInfo::new(Material::Mud, 10.50, -1.50),\n    VoronoiInfo::new(Material::Mud, 10.50, 1.50),\n    VoronoiInfo::new(Material::SandyLoam, 10.50, 4.50),\n    VoronoiInfo::new(Material::SandyLoam, 10.50, 7.50),\n    VoronoiInfo::new(Material::SandyLoam, 10.50, 10.50),\n    VoronoiInfo::new(Material::Clay, 9.00, -9.00),\n    VoronoiInfo::new(Material::Clay, 9.00, -6.00),\n    VoronoiInfo::new(Material::SiltyLoam, 9.00, -3.00),\n    VoronoiInfo::new(Material::Mud, 9.00, 0.00),\n    VoronoiInfo::new(Material::Dirt, 9.00, 3.00),\n    VoronoiInfo::new(Material::SandyLoam, 9.00, 6.00),\n    VoronoiInfo::new(Material::SandyLoam, 9.00, 9.00),\n    VoronoiInfo::new(Material::Clay, 7.50, -10.50),\n    VoronoiInfo::new(Material::SiltyLoam, 7.50, -7.50),\n    VoronoiInfo::new(Material::SiltyLoam, 7.50, -4.50),\n    VoronoiInfo::new(Material::Mud, 7.50, -1.50),\n    VoronoiInfo::new(Material::Mud, 7.50, 1.50),\n    VoronoiInfo::new(Material::SandyLoam, 7.50, 4.50),\n    VoronoiInfo::new(Material::SandyLoam, 7.50, 7.50),\n    VoronoiInfo::new(Material::SandyLoam, 7.50, 10.50),\n    VoronoiInfo::new(Material::Silt, 6.00, -9.00),\n    VoronoiInfo::new(Material::Silt, 6.00, -6.00),\n    VoronoiInfo::new(Material::SiltyLoam, 6.00, -3.00),\n    VoronoiInfo::new(Material::Mud, 6.00, 0.00),\n    VoronoiInfo::new(Material::Dirt, 6.00, 3.00),\n    VoronoiInfo::new(Material::SandyLoam, 6.00, 6.00),\n    VoronoiInfo::new(Material::SandyLoam, 6.00, 9.00),\n    VoronoiInfo::new(Material::Silt, 4.50, -10.50),\n    VoronoiInfo::new(Material::Silt, 4.50, -7.50),\n    VoronoiInfo::new(Material::SiltyLoam, 4.50, -4.50),\n    VoronoiInfo::new(Material::Dirt, 4.50, -1.50),\n    VoronoiInfo::new(Material::Dirt, 4.50, 1.50),\n    VoronoiInfo::new(Material::Dirt, 4.50, 4.50),\n    VoronoiInfo::new(Material::SandyLoam, 4.50, 7.50),\n    VoronoiInfo::new(Material::SandyLoam, 4.50, 10.50),\n    VoronoiInfo::new(Material::Dolomite, 3.00, -9.00),\n    VoronoiInfo::new(Material::Silt, 3.00, -6.00),\n    VoronoiInfo::new(Material::SiltyLoam, 3.00, -3.00),\n    VoronoiInfo::new(Material::Dirt, 3.00, 0.00),\n    VoronoiInfo::new(Material::Dirt, 3.00, 3.00),\n    VoronoiInfo::new(Material::SandyLoam, 3.00, 6.00),\n    VoronoiInfo::new(Material::SandyLoam, 3.00, 9.00),\n    VoronoiInfo::new(Material::Dolomite, 1.50, -10.50),\n    VoronoiInfo::new(Material::Dolomite, 1.50, -7.50),\n    VoronoiInfo::new(Material::SiltyLoam, 1.50, -4.50),\n    VoronoiInfo::new(Material::SiltyLoam, 1.50, -1.50),\n    VoronoiInfo::new(Material::Dirt, 1.50, 1.50),\n    VoronoiInfo::new(Material::Dirt, 1.50, 4.50),\n    VoronoiInfo::new(Material::SandyLoam, 1.50, 7.50),\n    VoronoiInfo::new(Material::Sandstone, 1.50, 10.50),\n    VoronoiInfo::new(Material::Dolomite, 0.00, -9.00),\n    VoronoiInfo::new(Material::Dolomite, 0.00, -6.00),\n    VoronoiInfo::new(Material::Dirt, 0.00, -3.00),\n    VoronoiInfo::new(Material::Dirt, 0.00, 0.00),\n    VoronoiInfo::new(Material::Dirt, 0.00, 3.00),\n    VoronoiInfo::new(Material::Shale, 0.00, 6.00),\n    VoronoiInfo::new(Material::Sandstone, 0.00, 9.00),\n    VoronoiInfo::new(Material::Dolomite, -1.50, -10.50),\n    VoronoiInfo::new(Material::Dolomite, -1.50, -7.50),\n    VoronoiInfo::new(Material::SiltyLoam, -1.50, -4.50),\n    VoronoiInfo::new(Material::Dirt, -1.50, -1.50),\n    VoronoiInfo::new(Material::Dirt, -1.50, 1.50),\n    VoronoiInfo::new(Material::Shale, -1.50, 4.50),\n    VoronoiInfo::new(Material::Sandstone, -1.50, 7.50),\n    VoronoiInfo::new(Material::Sandstone, -1.50, 10.50),\n    VoronoiInfo::new(Material::Ice, -3.00, -9.00),\n    VoronoiInfo::new(Material::Dolomite, -3.00, -6.00),\n    VoronoiInfo::new(Material::Dirt, -3.00, -3.00),\n    VoronoiInfo::new(Material::Dirt, -3.00, 0.00),\n    VoronoiInfo::new(Material::Dirt, -3.00, 3.00),\n    VoronoiInfo::new(Material::Shale, -3.00, 6.00),\n    VoronoiInfo::new(Material::RedSandstone, -3.00, 9.00),\n    VoronoiInfo::new(Material::Dolomite, -4.50, -10.50),\n    VoronoiInfo::new(Material::Ice, -4.50, -7.50),\n    VoronoiInfo::new(Material::Dirt, -4.50, -4.50),\n    VoronoiInfo::new(Material::Dirt, -4.50, -1.50),\n    VoronoiInfo::new(Material::Dirt, -4.50, 1.50),\n    VoronoiInfo::new(Material::Sandstone, -4.50, 4.50),\n    VoronoiInfo::new(Material::RedSandstone, -4.50, 7.50),\n    VoronoiInfo::new(Material::RedSandstone, -4.50, 10.50),\n    VoronoiInfo::new(Material::Ice, -6.00, -9.00),\n    VoronoiInfo::new(Material::Gravel, -6.00, -6.00),\n    VoronoiInfo::new(Material::Dirt, -6.00, -3.00),\n    VoronoiInfo::new(Material::Dirt, -6.00, 0.00),\n    VoronoiInfo::new(Material::Dirt, -6.00, 3.00),\n    VoronoiInfo::new(Material::RedSandstone, -6.00, 6.00),\n    VoronoiInfo::new(Material::RedSandstone, -6.00, 9.00),\n    VoronoiInfo::new(Material::Ice, -7.50, -10.50),\n    VoronoiInfo::new(Material::Ice, -7.50, -7.50),\n    VoronoiInfo::new(Material::Gravel, -7.50, -4.50),\n    VoronoiInfo::new(Material::Dirt, -7.50, -1.50),\n    VoronoiInfo::new(Material::Dirt, -7.50, 1.50),\n    VoronoiInfo::new(Material::Sandstone, -7.50, 4.50),\n    VoronoiInfo::new(Material::RedSandstone, -7.50, 7.50),\n    VoronoiInfo::new(Material::RedSandstone, -7.50, 10.50),\n    VoronoiInfo::new(Material::Ice, -9.00, -9.00),\n    VoronoiInfo::new(Material::Ice, -9.00, -6.00),\n    VoronoiInfo::new(Material::Dirt, -9.00, -3.00),\n    VoronoiInfo::new(Material::Dirt, -9.00, 0.00),\n    VoronoiInfo::new(Material::Dirt, -9.00, 3.00),\n    VoronoiInfo::new(Material::RedSandstone, -9.00, 6.00),\n    VoronoiInfo::new(Material::RedSandstone, -9.00, 9.00),\n    VoronoiInfo::new(Material::Ice, -10.50, -10.50),\n    VoronoiInfo::new(Material::Ice, -10.50, -7.50),\n    VoronoiInfo::new(Material::Gravel, -10.50, -4.50),\n    VoronoiInfo::new(Material::Dirt, -10.50, -1.50),\n    VoronoiInfo::new(Material::Dirt, -10.50, 1.50),\n    VoronoiInfo::new(Material::Sandstone, -10.50, 4.50),\n    VoronoiInfo::new(Material::RedSandstone, -10.50, 7.50),\n    VoronoiInfo::new(Material::RedSandstone, -10.50, 10.50),\n];\n\nconst GENERAL_LOW: [VoronoiInfo; 113] = [\n    VoronoiInfo::new(Material::Dolomite, 10.50, -10.50),\n    VoronoiInfo::new(Material::Limestone, 10.50, -7.50),\n    VoronoiInfo::new(Material::Limestone, 10.50, -4.50),\n    VoronoiInfo::new(Material::Limestone, 10.50, -1.50),\n    VoronoiInfo::new(Material::Limestone, 10.50, 1.50),\n    VoronoiInfo::new(Material::Limestone, 10.50, 4.50),\n    VoronoiInfo::new(Material::Marble, 10.50, 7.50),\n    VoronoiInfo::new(Material::Marble, 10.50, 10.50),\n    VoronoiInfo::new(Material::Dolomite, 9.00, -9.00),\n    VoronoiInfo::new(Material::Limestone, 9.00, -6.00),\n    VoronoiInfo::new(Material::Limestone, 9.00, -3.00),\n    VoronoiInfo::new(Material::Limestone, 9.00, 0.00),\n    VoronoiInfo::new(Material::Limestone, 9.00, 3.00),\n    VoronoiInfo::new(Material::Limestone, 9.00, 6.00),\n    VoronoiInfo::new(Material::Marble, 9.00, 9.00),\n    VoronoiInfo::new(Material::Dolomite, 7.50, -10.50),\n    VoronoiInfo::new(Material::Dolomite, 7.50, -7.50),\n    VoronoiInfo::new(Material::Limestone, 7.50, -4.50),\n    VoronoiInfo::new(Material::Limestone, 7.50, -1.50),\n    VoronoiInfo::new(Material::Limestone, 7.50, 1.50),\n    VoronoiInfo::new(Material::Limestone, 7.50, 4.50),\n    VoronoiInfo::new(Material::Marble, 7.50, 7.50),\n    VoronoiInfo::new(Material::Marble, 7.50, 10.50),\n    VoronoiInfo::new(Material::Dolomite, 6.00, -9.00),\n    VoronoiInfo::new(Material::Dolomite, 6.00, -6.00),\n    VoronoiInfo::new(Material::Limestone, 6.00, -3.00),\n    VoronoiInfo::new(Material::Limestone, 6.00, 0.00),\n    VoronoiInfo::new(Material::Limestone, 6.00, 3.00),\n    VoronoiInfo::new(Material::Marble, 6.00, 6.00),\n    VoronoiInfo::new(Material::Limestone, 6.00, 9.00),\n    VoronoiInfo::new(Material::Dolomite, 4.50, -10.50),\n    VoronoiInfo::new(Material::Limestone, 4.50, -7.50),\n    VoronoiInfo::new(Material::Limestone, 4.50, -4.50),\n    VoronoiInfo::new(Material::Limestone, 4.50, -1.50),\n    VoronoiInfo::new(Material::Limestone, 4.50, 1.50),\n    VoronoiInfo::new(Material::Limestone, 4.50, 4.50),\n    VoronoiInfo::new(Material::Limestone, 4.50, 7.50),\n    VoronoiInfo::new(Material::Limestone, 4.50, 10.50),\n    VoronoiInfo::new(Material::Dolomite, 3.00, -9.00),\n    VoronoiInfo::new(Material::Slate, 3.00, -6.00),\n    VoronoiInfo::new(Material::Limestone, 3.00, -3.00),\n    VoronoiInfo::new(Material::Limestone, 3.00, 0.00),\n    VoronoiInfo::new(Material::Limestone, 3.00, 3.00),\n    VoronoiInfo::new(Material::Limestone, 3.00, 6.00),\n    VoronoiInfo::new(Material::Limestone, 3.00, 9.00),\n    VoronoiInfo::new(Material::Dolomite, 1.50, -10.50),\n    VoronoiInfo::new(Material::Slate, 1.50, -7.50),\n    VoronoiInfo::new(Material::Shale, 1.50, -4.50),\n    VoronoiInfo::new(Material::Shale, 1.50, -1.50),\n    VoronoiInfo::new(Material::Limestone, 1.50, 1.50),\n    VoronoiInfo::new(Material::Limestone, 1.50, 4.50),\n    VoronoiInfo::new(Material::Marble, 1.50, 7.50),\n    VoronoiInfo::new(Material::Marble, 1.50, 10.50),\n    VoronoiInfo::new(Material::Slate, 0.00, -9.00),\n    VoronoiInfo::new(Material::Slate, 0.00, -6.00),\n    VoronoiInfo::new(Material::Shale, 0.00, -3.00),\n    VoronoiInfo::new(Material::Shale, 0.00, 0.00),\n    VoronoiInfo::new(Material::Limestone, 0.00, 3.00),\n    VoronoiInfo::new(Material::Limestone, 0.00, 6.00),\n    VoronoiInfo::new(Material::Marble, 0.00, 9.00),\n    VoronoiInfo::new(Material::Dolomite, -1.50, -10.50),\n    VoronoiInfo::new(Material::Slate, -1.50, -7.50),\n    VoronoiInfo::new(Material::Shale, -1.50, -4.50),\n    VoronoiInfo::new(Material::Marble, -1.50, -1.50),\n    VoronoiInfo::new(Material::Marble, -1.50, 1.50),\n    VoronoiInfo::new(Material::Marble, -1.50, 4.50),\n    VoronoiInfo::new(Material::Limestone, -1.50, 7.50),\n    VoronoiInfo::new(Material::Marble, -1.50, 10.50),\n    VoronoiInfo::new(Material::Slate, -3.00, -9.00),\n    VoronoiInfo::new(Material::Slate, -3.00, -6.00),\n    VoronoiInfo::new(Material::Marble, -3.00, -3.00),\n    VoronoiInfo::new(Material::Marble, -3.00, 0.00),\n    VoronoiInfo::new(Material::Marble, -3.00, 3.00),\n    VoronoiInfo::new(Material::Shale, -3.00, 6.00),\n    VoronoiInfo::new(Material::Shale, -3.00, 9.00),\n    VoronoiInfo::new(Material::Dolomite, -4.50, -10.50),\n    VoronoiInfo::new(Material::Slate, -4.50, -7.50),\n    VoronoiInfo::new(Material::Marble, -4.50, -4.50),\n    VoronoiInfo::new(Material::Marble, -4.50, -1.50),\n    VoronoiInfo::new(Material::Marble, -4.50, 1.50),\n    VoronoiInfo::new(Material::Shale, -4.50, 4.50),\n    VoronoiInfo::new(Material::Shale, -4.50, 7.50),\n    VoronoiInfo::new(Material::Slate, -4.50, 10.50),\n    VoronoiInfo::new(Material::Dolomite, -6.00, -9.00),\n    VoronoiInfo::new(Material::Slate, -6.00, -6.00),\n    VoronoiInfo::new(Material::Marble, -6.00, -3.00),\n    VoronoiInfo::new(Material::Marble, -6.00, 0.00),\n    VoronoiInfo::new(Material::Marble, -6.00, 3.00),\n    VoronoiInfo::new(Material::Shale, -6.00, 6.00),\n    VoronoiInfo::new(Material::Shale, -6.00, 9.00),\n    VoronoiInfo::new(Material::Slate, -7.50, -10.50),\n    VoronoiInfo::new(Material::Slate, -7.50, -7.50),\n    VoronoiInfo::new(Material::Slate, -7.50, -4.50),\n    VoronoiInfo::new(Material::Marble, -7.50, -1.50),\n    VoronoiInfo::new(Material::Shale, -7.50, 1.50),\n    VoronoiInfo::new(Material::Shale, -7.50, 4.50),\n    VoronoiInfo::new(Material::Slate, -7.50, 7.50),\n    VoronoiInfo::new(Material::Shale, -7.50, 10.50),\n    VoronoiInfo::new(Material::Slate, -9.00, -9.00),\n    VoronoiInfo::new(Material::Slate, -9.00, -6.00),\n    VoronoiInfo::new(Material::Slate, -9.00, -3.00),\n    VoronoiInfo::new(Material::Marble, -9.00, 0.00),\n    VoronoiInfo::new(Material::Shale, -9.00, 3.00),\n    VoronoiInfo::new(Material::Shale, -9.00, 6.00),\n    VoronoiInfo::new(Material::Shale, -9.00, 9.00),\n    VoronoiInfo::new(Material::Slate, -10.50, -10.50),\n    VoronoiInfo::new(Material::Slate, -10.50, -7.50),\n    VoronoiInfo::new(Material::Slate, -10.50, -4.50),\n    VoronoiInfo::new(Material::Marble, -10.50, -1.50),\n    VoronoiInfo::new(Material::Marble, -10.50, 1.50),\n    VoronoiInfo::new(Material::Shale, -10.50, 4.50),\n    VoronoiInfo::new(Material::Shale, -10.50, 7.50),\n    VoronoiInfo::new(Material::Shale, -10.50, 10.50),\n];\n\nconst GENERAL_DEEP: [VoronoiInfo; 113] = [\n    VoronoiInfo::new(Material::Water, 10.50, -10.50),\n    VoronoiInfo::new(Material::Diorite, 10.50, -7.50),\n    VoronoiInfo::new(Material::Diorite, 10.50, -4.50),\n    VoronoiInfo::new(Material::Diorite, 10.50, -1.50),\n    VoronoiInfo::new(Material::Granite, 10.50, 1.50),\n    VoronoiInfo::new(Material::Granite, 10.50, 4.50),\n    VoronoiInfo::new(Material::Granite, 10.50, 7.50),\n    VoronoiInfo::new(Material::Granite, 10.50, 10.50),\n    VoronoiInfo::new(Material::Diorite, 9.00, -9.00),\n    VoronoiInfo::new(Material::Diorite, 9.00, -6.00),\n    VoronoiInfo::new(Material::Diorite, 9.00, -3.00),\n    VoronoiInfo::new(Material::Diorite, 9.00, 0.00),\n    VoronoiInfo::new(Material::Granite, 9.00, 3.00),\n    VoronoiInfo::new(Material::Granite, 9.00, 6.00),\n    VoronoiInfo::new(Material::Granite, 9.00, 9.00),\n    VoronoiInfo::new(Material::Diorite, 7.50, -10.50),\n    VoronoiInfo::new(Material::Diorite, 7.50, -7.50),\n    VoronoiInfo::new(Material::Diorite, 7.50, -4.50),\n    VoronoiInfo::new(Material::Diorite, 7.50, -1.50),\n    VoronoiInfo::new(Material::Diorite, 7.50, 1.50),\n    VoronoiInfo::new(Material::Granite, 7.50, 4.50),\n    VoronoiInfo::new(Material::Granite, 7.50, 7.50),\n    VoronoiInfo::new(Material::Granite, 7.50, 10.50),\n    VoronoiInfo::new(Material::Diorite, 6.00, -9.00),\n    VoronoiInfo::new(Material::Diorite, 6.00, -6.00),\n    VoronoiInfo::new(Material::Granite, 6.00, -3.00),\n    VoronoiInfo::new(Material::Granite, 6.00, 0.00),\n    VoronoiInfo::new(Material::Granite, 6.00, 3.00),\n    VoronoiInfo::new(Material::Granite, 6.00, 6.00),\n    VoronoiInfo::new(Material::Granite, 6.00, 9.00),\n    VoronoiInfo::new(Material::Andesite, 4.50, -10.50),\n    VoronoiInfo::new(Material::Diorite, 4.50, -7.50),\n    VoronoiInfo::new(Material::Diorite, 4.50, -4.50),\n    VoronoiInfo::new(Material::Granite, 4.50, -1.50),\n    VoronoiInfo::new(Material::Granite, 4.50, 1.50),\n    VoronoiInfo::new(Material::Granite, 4.50, 4.50),\n    VoronoiInfo::new(Material::Granite, 4.50, 7.50),\n    VoronoiInfo::new(Material::Granite, 4.50, 10.50),\n    VoronoiInfo::new(Material::Andesite, 3.00, -9.00),\n    VoronoiInfo::new(Material::Andesite, 3.00, -6.00),\n    VoronoiInfo::new(Material::Andesite, 3.00, -3.00),\n    VoronoiInfo::new(Material::Granite, 3.00, 0.00),\n    VoronoiInfo::new(Material::Gabbro, 3.00, 3.00),\n    VoronoiInfo::new(Material::Granite, 3.00, 6.00),\n    VoronoiInfo::new(Material::Granite, 3.00, 9.00),\n    VoronoiInfo::new(Material::Andesite, 1.50, -10.50),\n    VoronoiInfo::new(Material::Andesite, 1.50, -7.50),\n    VoronoiInfo::new(Material::Andesite, 1.50, -4.50),\n    VoronoiInfo::new(Material::Granite, 1.50, -1.50),\n    VoronoiInfo::new(Material::Gabbro, 1.50, 1.50),\n    VoronoiInfo::new(Material::Gabbro, 1.50, 4.50),\n    VoronoiInfo::new(Material::Gabbro, 1.50, 7.50),\n    VoronoiInfo::new(Material::Granite, 1.50, 10.50),\n    VoronoiInfo::new(Material::Andesite, 0.00, -9.00),\n    VoronoiInfo::new(Material::Andesite, 0.00, -6.00),\n    VoronoiInfo::new(Material::Andesite, 0.00, -3.00),\n    VoronoiInfo::new(Material::Granite, 0.00, 0.00),\n    VoronoiInfo::new(Material::Gabbro, 0.00, 3.00),\n    VoronoiInfo::new(Material::Gabbro, 0.00, 6.00),\n    VoronoiInfo::new(Material::Granite, 0.00, 9.00),\n    VoronoiInfo::new(Material::Andesite, -1.50, -10.50),\n    VoronoiInfo::new(Material::Andesite, -1.50, -7.50),\n    VoronoiInfo::new(Material::Andesite, -1.50, -4.50),\n    VoronoiInfo::new(Material::Granite, -1.50, -1.50),\n    VoronoiInfo::new(Material::Granite, -1.50, 1.50),\n    VoronoiInfo::new(Material::Granite, -1.50, 4.50),\n    VoronoiInfo::new(Material::Gabbro, -1.50, 7.50),\n    VoronoiInfo::new(Material::Granite, -1.50, 10.50),\n    VoronoiInfo::new(Material::Andesite, -3.00, -9.00),\n    VoronoiInfo::new(Material::Andesite, -3.00, -6.00),\n    VoronoiInfo::new(Material::Andesite, -3.00, -3.00),\n    VoronoiInfo::new(Material::Gabbro, -3.00, 0.00),\n    VoronoiInfo::new(Material::Granite, -3.00, 3.00),\n    VoronoiInfo::new(Material::Gabbro, -3.00, 6.00),\n    VoronoiInfo::new(Material::Gabbro, -3.00, 9.00),\n    VoronoiInfo::new(Material::Andesite, -4.50, -10.50),\n    VoronoiInfo::new(Material::Olivine, -4.50, -7.50),\n    VoronoiInfo::new(Material::Andesite, -4.50, -4.50),\n    VoronoiInfo::new(Material::Gabbro, -4.50, -1.50),\n    VoronoiInfo::new(Material::Gabbro, -4.50, 1.50),\n    VoronoiInfo::new(Material::Gabbro, -4.50, 4.50),\n    VoronoiInfo::new(Material::Gabbro, -4.50, 7.50),\n    VoronoiInfo::new(Material::Gabbro, -4.50, 10.50),\n    VoronoiInfo::new(Material::Olivine, -6.00, -9.00),\n    VoronoiInfo::new(Material::Andesite, -6.00, -6.00),\n    VoronoiInfo::new(Material::Olivine, -6.00, -3.00),\n    VoronoiInfo::new(Material::Gabbro, -6.00, 0.00),\n    VoronoiInfo::new(Material::Gabbro, -6.00, 3.00),\n    VoronoiInfo::new(Material::Basalt, -6.00, 6.00),\n    VoronoiInfo::new(Material::Gabbro, -6.00, 9.00),\n    VoronoiInfo::new(Material::Andesite, -7.50, -10.50),\n    VoronoiInfo::new(Material::Olivine, -7.50, -7.50),\n    VoronoiInfo::new(Material::Olivine, -7.50, -4.50),\n    VoronoiInfo::new(Material::Gabbro, -7.50, -1.50),\n    VoronoiInfo::new(Material::Olivine, -7.50, 1.50),\n    VoronoiInfo::new(Material::Gabbro, -7.50, 4.50),\n    VoronoiInfo::new(Material::Basalt, -7.50, 7.50),\n    VoronoiInfo::new(Material::Gabbro, -7.50, 10.50),\n    VoronoiInfo::new(Material::Olivine, -9.00, -9.00),\n    VoronoiInfo::new(Material::Olivine, -9.00, -6.00),\n    VoronoiInfo::new(Material::Olivine, -9.00, -3.00),\n    VoronoiInfo::new(Material::Olivine, -9.00, 0.00),\n    VoronoiInfo::new(Material::Basalt, -9.00, 3.00),\n    VoronoiInfo::new(Material::Basalt, -9.00, 6.00),\n    VoronoiInfo::new(Material::Basalt, -9.00, 9.00),\n    VoronoiInfo::new(Material::Olivine, -10.50, -10.50),\n    VoronoiInfo::new(Material::Olivine, -10.50, -7.50),\n    VoronoiInfo::new(Material::Olivine, -10.50, -4.50),\n    VoronoiInfo::new(Material::Olivine, -10.50, -1.50),\n    VoronoiInfo::new(Material::Basalt, -10.50, 1.50),\n    VoronoiInfo::new(Material::Basalt, -10.50, 4.50),\n    VoronoiInfo::new(Material::Basalt, -10.50, 7.50),\n    VoronoiInfo::new(Material::Lava, -10.50, 10.50),\n];\n\nconst SURFACE_HIGH: [VoronoiInfo; 113] = [\n    VoronoiInfo::new(Material::Dirt, 10.50, -10.50),\n    VoronoiInfo::new(Material::Dirt, 10.50, -7.50),\n    VoronoiInfo::new(Material::Grass, 10.50, -4.50),\n    VoronoiInfo::new(Material::MudGrass, 10.50, -1.50),\n    VoronoiInfo::new(Material::MudGrass, 10.50, 1.50),\n    VoronoiInfo::new(Material::LushGrass, 10.50, 4.50),\n    VoronoiInfo::new(Material::LushGrass, 10.50, 7.50),\n    VoronoiInfo::new(Material::LushGrass, 10.50, 10.50),\n    VoronoiInfo::new(Material::Dirt, 9.00, -9.00),\n    VoronoiInfo::new(Material::CoarseGrass, 9.00, -6.00),\n    VoronoiInfo::new(Material::Grass, 9.00, -3.00),\n    VoronoiInfo::new(Material::MudGrass, 9.00, 0.00),\n    VoronoiInfo::new(Material::MudGrass, 9.00, 3.00),\n    VoronoiInfo::new(Material::LushGrass, 9.00, 6.00),\n    VoronoiInfo::new(Material::LushGrass, 9.00, 9.00),\n    VoronoiInfo::new(Material::Clay, 7.50, -10.50),\n    VoronoiInfo::new(Material::Dirt, 7.50, -7.50),\n    VoronoiInfo::new(Material::Grass, 7.50, -4.50),\n    VoronoiInfo::new(Material::Grass, 7.50, -1.50),\n    VoronoiInfo::new(Material::MudGrass, 7.50, 1.50),\n    VoronoiInfo::new(Material::LushGrass, 7.50, 4.50),\n    VoronoiInfo::new(Material::LushGrass, 7.50, 7.50),\n    VoronoiInfo::new(Material::LushGrass, 7.50, 10.50),\n    VoronoiInfo::new(Material::Clay, 6.00, -9.00),\n    VoronoiInfo::new(Material::Dirt, 6.00, -6.00),\n    VoronoiInfo::new(Material::Grass, 6.00, -3.00),\n    VoronoiInfo::new(Material::MudGrass, 6.00, 0.00),\n    VoronoiInfo::new(Material::Grass, 6.00, 3.00),\n    VoronoiInfo::new(Material::Grass, 6.00, 6.00),\n    VoronoiInfo::new(Material::LushGrass, 6.00, 9.00),\n    VoronoiInfo::new(Material::Clay, 4.50, -10.50),\n    VoronoiInfo::new(Material::Dirt, 4.50, -7.50),\n    VoronoiInfo::new(Material::CoarseGrass, 4.50, -4.50),\n    VoronoiInfo::new(Material::Grass, 4.50, -1.50),\n    VoronoiInfo::new(Material::Grass, 4.50, 1.50),\n    VoronoiInfo::new(Material::Grass, 4.50, 4.50),\n    VoronoiInfo::new(Material::LushGrass, 4.50, 7.50),\n    VoronoiInfo::new(Material::LushGrass, 4.50, 10.50),\n    VoronoiInfo::new(Material::Grass, 3.00, -9.00),\n    VoronoiInfo::new(Material::CoarseGrass, 3.00, -6.00),\n    VoronoiInfo::new(Material::Grass, 3.00, -3.00),\n    VoronoiInfo::new(Material::Grass, 3.00, 0.00),\n    VoronoiInfo::new(Material::Grass, 3.00, 3.00),\n    VoronoiInfo::new(Material::Grass, 3.00, 6.00),\n    VoronoiInfo::new(Material::LushGrass, 3.00, 9.00),\n    VoronoiInfo::new(Material::Gravel, 1.50, -10.50),\n    VoronoiInfo::new(Material::Gravel, 1.50, -7.50),\n    VoronoiInfo::new(Material::Gravel, 1.50, -4.50),\n    VoronoiInfo::new(Material::Grass, 1.50, -1.50),\n    VoronoiInfo::new(Material::Grass, 1.50, 1.50),\n    VoronoiInfo::new(Material::TanGrass, 1.50, 4.50),\n    VoronoiInfo::new(Material::TanGrass, 1.50, 7.50),\n    VoronoiInfo::new(Material::Grass, 1.50, 10.50),\n    VoronoiInfo::new(Material::Snow, 0.00, -9.00),\n    VoronoiInfo::new(Material::Snow, 0.00, -6.00),\n    VoronoiInfo::new(Material::Grass, 0.00, -3.00),\n    VoronoiInfo::new(Material::Grass, 0.00, 0.00),\n    VoronoiInfo::new(Material::TanGrass, 0.00, 3.00),\n    VoronoiInfo::new(Material::Sand, 0.00, 6.00),\n    VoronoiInfo::new(Material::Sand, 0.00, 9.00),\n    VoronoiInfo::new(Material::Snow, -1.50, -10.50),\n    VoronoiInfo::new(Material::Snow, -1.50, -7.50),\n    VoronoiInfo::new(Material::Gravel, -1.50, -4.50),\n    VoronoiInfo::new(Material::Grass, -1.50, -1.50),\n    VoronoiInfo::new(Material::TanGrass, -1.50, 1.50),\n    VoronoiInfo::new(Material::Sand, -1.50, 4.50),\n    VoronoiInfo::new(Material::Sand, -1.50, 7.50),\n    VoronoiInfo::new(Material::Sand, -1.50, 10.50),\n    VoronoiInfo::new(Material::Snow, -3.00, -9.00),\n    VoronoiInfo::new(Material::Snow, -3.00, -6.00),\n    VoronoiInfo::new(Material::Gravel, -3.00, -3.00),\n    VoronoiInfo::new(Material::Grass, -3.00, 0.00),\n    VoronoiInfo::new(Material::Sand, -3.00, 3.00),\n    VoronoiInfo::new(Material::Sand, -3.00, 6.00),\n    VoronoiInfo::new(Material::Sand, -3.00, 9.00),\n    VoronoiInfo::new(Material::Snow, -4.50, -10.50),\n    VoronoiInfo::new(Material::Snow, -4.50, -7.50),\n    VoronoiInfo::new(Material::Snow, -4.50, -4.50),\n    VoronoiInfo::new(Material::CoarseGrass, -4.50, -1.50),\n    VoronoiInfo::new(Material::CoarseGrass, -4.50, 1.50),\n    VoronoiInfo::new(Material::Sand, -4.50, 4.50),\n    VoronoiInfo::new(Material::RedSand, -4.50, 7.50),\n    VoronoiInfo::new(Material::Sand, -4.50, 10.50),\n    VoronoiInfo::new(Material::Snow, -6.00, -9.00),\n    VoronoiInfo::new(Material::Snow, -6.00, -6.00),\n    VoronoiInfo::new(Material::CoarseGrass, -6.00, -3.00),\n    VoronoiInfo::new(Material::CoarseGrass, -6.00, 0.00),\n    VoronoiInfo::new(Material::Sand, -6.00, 3.00),\n    VoronoiInfo::new(Material::Sand, -6.00, 6.00),\n    VoronoiInfo::new(Material::RedSand, -6.00, 9.00),\n    VoronoiInfo::new(Material::Snow, -7.50, -10.50),\n    VoronoiInfo::new(Material::Snow, -7.50, -7.50),\n    VoronoiInfo::new(Material::Snow, -7.50, -4.50),\n    VoronoiInfo::new(Material::CoarseGrass, -7.50, -1.50),\n    VoronoiInfo::new(Material::Grass, -7.50, 1.50),\n    VoronoiInfo::new(Material::Sand, -7.50, 4.50),\n    VoronoiInfo::new(Material::RedSand, -7.50, 7.50),\n    VoronoiInfo::new(Material::RedSand, -7.50, 10.50),\n    VoronoiInfo::new(Material::Snow, -9.00, -9.00),\n    VoronoiInfo::new(Material::Snow, -9.00, -6.00),\n    VoronoiInfo::new(Material::Snow, -9.00, -3.00),\n    VoronoiInfo::new(Material::CoarseGrass, -9.00, 0.00),\n    VoronoiInfo::new(Material::Sand, -9.00, 3.00),\n    VoronoiInfo::new(Material::RedSand, -9.00, 6.00),\n    VoronoiInfo::new(Material::RedSand, -9.00, 9.00),\n    VoronoiInfo::new(Material::Snow, -10.50, -10.50),\n    VoronoiInfo::new(Material::Snow, -10.50, -7.50),\n    VoronoiInfo::new(Material::Snow, -10.50, -4.50),\n    VoronoiInfo::new(Material::Dirt, -10.50, -1.50),\n    VoronoiInfo::new(Material::CoarseGrass, -10.50, 1.50),\n    VoronoiInfo::new(Material::Sand, -10.50, 4.50),\n    VoronoiInfo::new(Material::RedSand, -10.50, 7.50),\n    VoronoiInfo::new(Material::RedSand, -10.50, 10.50),\n];\n\nconst SURFACE_MED: [VoronoiInfo; 113] = [\n    VoronoiInfo::new(Material::Dirt, 10.50, -10.50),\n    VoronoiInfo::new(Material::Dirt, 10.50, -7.50),\n    VoronoiInfo::new(Material::Grass, 10.50, -4.50),\n    VoronoiInfo::new(Material::Grass, 10.50, -1.50),\n    VoronoiInfo::new(Material::MudGrass, 10.50, 1.50),\n    VoronoiInfo::new(Material::LushGrass, 10.50, 4.50),\n    VoronoiInfo::new(Material::LushGrass, 10.50, 7.50),\n    VoronoiInfo::new(Material::LushGrass, 10.50, 10.50),\n    VoronoiInfo::new(Material::Dirt, 9.00, -9.00),\n    VoronoiInfo::new(Material::Grass, 9.00, -6.00),\n    VoronoiInfo::new(Material::Grass, 9.00, -3.00),\n    VoronoiInfo::new(Material::MudGrass, 9.00, 0.00),\n    VoronoiInfo::new(Material::MudGrass, 9.00, 3.00),\n    VoronoiInfo::new(Material::LushGrass, 9.00, 6.00),\n    VoronoiInfo::new(Material::LushGrass, 9.00, 9.00),\n    VoronoiInfo::new(Material::Dirt, 7.50, -10.50),\n    VoronoiInfo::new(Material::Grass, 7.50, -7.50),\n    VoronoiInfo::new(Material::Grass, 7.50, -4.50),\n    VoronoiInfo::new(Material::MudGrass, 7.50, -1.50),\n    VoronoiInfo::new(Material::MudGrass, 7.50, 1.50),\n    VoronoiInfo::new(Material::LushGrass, 7.50, 4.50),\n    VoronoiInfo::new(Material::LushGrass, 7.50, 7.50),\n    VoronoiInfo::new(Material::LushGrass, 7.50, 10.50),\n    VoronoiInfo::new(Material::CoarseGrass, 6.00, -9.00),\n    VoronoiInfo::new(Material::Grass, 6.00, -6.00),\n    VoronoiInfo::new(Material::Grass, 6.00, -3.00),\n    VoronoiInfo::new(Material::MudGrass, 6.00, 0.00),\n    VoronoiInfo::new(Material::LushGrass, 6.00, 3.00),\n    VoronoiInfo::new(Material::LushGrass, 6.00, 6.00),\n    VoronoiInfo::new(Material::LushGrass, 6.00, 9.00),\n    VoronoiInfo::new(Material::CoarseGrass, 4.50, -10.50),\n    VoronoiInfo::new(Material::CoarseGrass, 4.50, -7.50),\n    VoronoiInfo::new(Material::Grass, 4.50, -4.50),\n    VoronoiInfo::new(Material::MudGrass, 4.50, -1.50),\n    VoronoiInfo::new(Material::Grass, 4.50, 1.50),\n    VoronoiInfo::new(Material::LushGrass, 4.50, 4.50),\n    VoronoiInfo::new(Material::LushGrass, 4.50, 7.50),\n    VoronoiInfo::new(Material::LushGrass, 4.50, 10.50),\n    VoronoiInfo::new(Material::CoarseGrass, 3.00, -9.00),\n    VoronoiInfo::new(Material::Grass, 3.00, -6.00),\n    VoronoiInfo::new(Material::Grass, 3.00, -3.00),\n    VoronoiInfo::new(Material::MudGrass, 3.00, 0.00),\n    VoronoiInfo::new(Material::TanGrass, 3.00, 3.00),\n    VoronoiInfo::new(Material::LushGrass, 3.00, 6.00),\n    VoronoiInfo::new(Material::LushGrass, 3.00, 9.00),\n    VoronoiInfo::new(Material::CoarseGrass, 1.50, -10.50),\n    VoronoiInfo::new(Material::CoarseGrass, 1.50, -7.50),\n    VoronoiInfo::new(Material::Grass, 1.50, -4.50),\n    VoronoiInfo::new(Material::Grass, 1.50, -1.50),\n    VoronoiInfo::new(Material::Grass, 1.50, 1.50),\n    VoronoiInfo::new(Material::TanGrass, 1.50, 4.50),\n    VoronoiInfo::new(Material::Sand, 1.50, 7.50),\n    VoronoiInfo::new(Material::Sand, 1.50, 10.50),\n    VoronoiInfo::new(Material::Snow, 0.00, -9.00),\n    VoronoiInfo::new(Material::CoarseGrass, 0.00, -6.00),\n    VoronoiInfo::new(Material::Grass, 0.00, -3.00),\n    VoronoiInfo::new(Material::Grass, 0.00, 0.00),\n    VoronoiInfo::new(Material::TanGrass, 0.00, 3.00),\n    VoronoiInfo::new(Material::TanGrass, 0.00, 6.00),\n    VoronoiInfo::new(Material::Sand, 0.00, 9.00),\n    VoronoiInfo::new(Material::CoarseGrass, -1.50, -10.50),\n    VoronoiInfo::new(Material::Snow, -1.50, -7.50),\n    VoronoiInfo::new(Material::CoarseGrass, -1.50, -4.50),\n    VoronoiInfo::new(Material::Grass, -1.50, -1.50),\n    VoronoiInfo::new(Material::TanGrass, -1.50, 1.50),\n    VoronoiInfo::new(Material::Sand, -1.50, 4.50),\n    VoronoiInfo::new(Material::Sand, -1.50, 7.50),\n    VoronoiInfo::new(Material::Sand, -1.50, 10.50),\n    VoronoiInfo::new(Material::Snow, -3.00, -9.00),\n    VoronoiInfo::new(Material::Snow, -3.00, -6.00),\n    VoronoiInfo::new(Material::CoarseGrass, -3.00, -3.00),\n    VoronoiInfo::new(Material::Grass, -3.00, 0.00),\n    VoronoiInfo::new(Material::TanGrass, -3.00, 3.00),\n    VoronoiInfo::new(Material::Sand, -3.00, 6.00),\n    VoronoiInfo::new(Material::RedSand, -3.00, 9.00),\n    VoronoiInfo::new(Material::Snow, -4.50, -10.50),\n    VoronoiInfo::new(Material::Snow, -4.50, -7.50),\n    VoronoiInfo::new(Material::Snow, -4.50, -4.50),\n    VoronoiInfo::new(Material::Grass, -4.50, -1.50),\n    VoronoiInfo::new(Material::TanGrass, -4.50, 1.50),\n    VoronoiInfo::new(Material::Sand, -4.50, 4.50),\n    VoronoiInfo::new(Material::RedSand, -4.50, 7.50),\n    VoronoiInfo::new(Material::RedSand, -4.50, 10.50),\n    VoronoiInfo::new(Material::Snow, -6.00, -9.00),\n    VoronoiInfo::new(Material::Snow, -6.00, -6.00),\n    VoronoiInfo::new(Material::CoarseGrass, -6.00, -3.00),\n    VoronoiInfo::new(Material::Grass, -6.00, 0.00),\n    VoronoiInfo::new(Material::Sand, -6.00, 3.00),\n    VoronoiInfo::new(Material::Sand, -6.00, 6.00),\n    VoronoiInfo::new(Material::RedSand, -6.00, 9.00),\n    VoronoiInfo::new(Material::Snow, -7.50, -10.50),\n    VoronoiInfo::new(Material::Snow, -7.50, -7.50),\n    VoronoiInfo::new(Material::CoarseGrass, -7.50, -4.50),\n    VoronoiInfo::new(Material::CoarseGrass, -7.50, -1.50),\n    VoronoiInfo::new(Material::TanGrass, -7.50, 1.50),\n    VoronoiInfo::new(Material::Sand, -7.50, 4.50),\n    VoronoiInfo::new(Material::RedSand, -7.50, 7.50),\n    VoronoiInfo::new(Material::RedSand, -7.50, 10.50),\n    VoronoiInfo::new(Material::Snow, -9.00, -9.00),\n    VoronoiInfo::new(Material::Snow, -9.00, -6.00),\n    VoronoiInfo::new(Material::CoarseGrass, -9.00, -3.00),\n    VoronoiInfo::new(Material::Grass, -9.00, 0.00),\n    VoronoiInfo::new(Material::Sand, -9.00, 3.00),\n    VoronoiInfo::new(Material::Sand, -9.00, 6.00),\n    VoronoiInfo::new(Material::RedSand, -9.00, 9.00),\n    VoronoiInfo::new(Material::Snow, -10.50, -10.50),\n    VoronoiInfo::new(Material::Snow, -10.50, -7.50),\n    VoronoiInfo::new(Material::Snow, -10.50, -4.50),\n    VoronoiInfo::new(Material::CoarseGrass, -10.50, -1.50),\n    VoronoiInfo::new(Material::CoarseGrass, -10.50, 1.50),\n    VoronoiInfo::new(Material::Sand, -10.50, 4.50),\n    VoronoiInfo::new(Material::RedSand, -10.50, 7.50),\n    VoronoiInfo::new(Material::RedSand, -10.50, 10.50),\n];\n\nconst SURFACE_LOW: [VoronoiInfo; 113] = [\n    VoronoiInfo::new(Material::Dirt, 10.50, -10.50),\n    VoronoiInfo::new(Material::Dirt, 10.50, -7.50),\n    VoronoiInfo::new(Material::Dirt, 10.50, -4.50),\n    VoronoiInfo::new(Material::Grass, 10.50, -1.50),\n    VoronoiInfo::new(Material::LushGrass, 10.50, 1.50),\n    VoronoiInfo::new(Material::LushGrass, 10.50, 4.50),\n    VoronoiInfo::new(Material::LushGrass, 10.50, 7.50),\n    VoronoiInfo::new(Material::LushGrass, 10.50, 10.50),\n    VoronoiInfo::new(Material::Dirt, 9.00, -9.00),\n    VoronoiInfo::new(Material::Dirt, 9.00, -6.00),\n    VoronoiInfo::new(Material::Grass, 9.00, -3.00),\n    VoronoiInfo::new(Material::Grass, 9.00, 0.00),\n    VoronoiInfo::new(Material::LushGrass, 9.00, 3.00),\n    VoronoiInfo::new(Material::LushGrass, 9.00, 6.00),\n    VoronoiInfo::new(Material::LushGrass, 9.00, 9.00),\n    VoronoiInfo::new(Material::Dirt, 7.50, -10.50),\n    VoronoiInfo::new(Material::Dirt, 7.50, -7.50),\n    VoronoiInfo::new(Material::CoarseGrass, 7.50, -4.50),\n    VoronoiInfo::new(Material::Grass, 7.50, -1.50),\n    VoronoiInfo::new(Material::Grass, 7.50, 1.50),\n    VoronoiInfo::new(Material::LushGrass, 7.50, 4.50),\n    VoronoiInfo::new(Material::LushGrass, 7.50, 7.50),\n    VoronoiInfo::new(Material::LushGrass, 7.50, 10.50),\n    VoronoiInfo::new(Material::CoarseGrass, 6.00, -9.00),\n    VoronoiInfo::new(Material::Grass, 6.00, -6.00),\n    VoronoiInfo::new(Material::Grass, 6.00, -3.00),\n    VoronoiInfo::new(Material::Grass, 6.00, 0.00),\n    VoronoiInfo::new(Material::Grass, 6.00, 3.00),\n    VoronoiInfo::new(Material::LushGrass, 6.00, 6.00),\n    VoronoiInfo::new(Material::LushGrass, 6.00, 9.00),\n    VoronoiInfo::new(Material::CoarseGrass, 4.50, -10.50),\n    VoronoiInfo::new(Material::CoarseGrass, 4.50, -7.50),\n    VoronoiInfo::new(Material::Grass, 4.50, -4.50),\n    VoronoiInfo::new(Material::Grass, 4.50, -1.50),\n    VoronoiInfo::new(Material::Grass, 4.50, 1.50),\n    VoronoiInfo::new(Material::LushGrass, 4.50, 4.50),\n    VoronoiInfo::new(Material::LushGrass, 4.50, 7.50),\n    VoronoiInfo::new(Material::Grass, 4.50, 10.50),\n    VoronoiInfo::new(Material::CoarseGrass, 3.00, -9.00),\n    VoronoiInfo::new(Material::CoarseGrass, 3.00, -6.00),\n    VoronoiInfo::new(Material::CoarseGrass, 3.00, -3.00),\n    VoronoiInfo::new(Material::Grass, 3.00, 0.00),\n    VoronoiInfo::new(Material::Grass, 3.00, 3.00),\n    VoronoiInfo::new(Material::CoarseGrass, 3.00, 6.00),\n    VoronoiInfo::new(Material::CoarseGrass, 3.00, 9.00),\n    VoronoiInfo::new(Material::CoarseGrass, 1.50, -10.50),\n    VoronoiInfo::new(Material::CoarseGrass, 1.50, -7.50),\n    VoronoiInfo::new(Material::CoarseGrass, 1.50, -4.50),\n    VoronoiInfo::new(Material::CoarseGrass, 1.50, -1.50),\n    VoronoiInfo::new(Material::Grass, 1.50, 1.50),\n    VoronoiInfo::new(Material::SandyLoam, 1.50, 4.50),\n    VoronoiInfo::new(Material::SandyLoam, 1.50, 7.50),\n    VoronoiInfo::new(Material::SandyLoam, 1.50, 10.50),\n    VoronoiInfo::new(Material::Dirt, 0.00, -9.00),\n    VoronoiInfo::new(Material::CoarseGrass, 0.00, -6.00),\n    VoronoiInfo::new(Material::CoarseGrass, 0.00, -3.00),\n    VoronoiInfo::new(Material::Grass, 0.00, 0.00),\n    VoronoiInfo::new(Material::Sand, 0.00, 3.00),\n    VoronoiInfo::new(Material::Sand, 0.00, 6.00),\n    VoronoiInfo::new(Material::SandyLoam, 0.00, 9.00),\n    VoronoiInfo::new(Material::Dirt, -1.50, -10.50),\n    VoronoiInfo::new(Material::Dirt, -1.50, -7.50),\n    VoronoiInfo::new(Material::Dirt, -1.50, -4.50),\n    VoronoiInfo::new(Material::CoarseGrass, -1.50, -1.50),\n    VoronoiInfo::new(Material::Grass, -1.50, 1.50),\n    VoronoiInfo::new(Material::Sand, -1.50, 4.50),\n    VoronoiInfo::new(Material::Limestone, -1.50, 7.50),\n    VoronoiInfo::new(Material::Limestone, -1.50, 10.50),\n    VoronoiInfo::new(Material::Gravel, -3.00, -9.00),\n    VoronoiInfo::new(Material::Dirt, -3.00, -6.00),\n    VoronoiInfo::new(Material::CoarseGrass, -3.00, -3.00),\n    VoronoiInfo::new(Material::CoarseGrass, -3.00, 0.00),\n    VoronoiInfo::new(Material::Grass, -3.00, 3.00),\n    VoronoiInfo::new(Material::Limestone, -3.00, 6.00),\n    VoronoiInfo::new(Material::Limestone, -3.00, 9.00),\n    VoronoiInfo::new(Material::Gravel, -4.50, -10.50),\n    VoronoiInfo::new(Material::IceSlush, -4.50, -7.50),\n    VoronoiInfo::new(Material::Dirt, -4.50, -4.50),\n    VoronoiInfo::new(Material::CoarseGrass, -4.50, -1.50),\n    VoronoiInfo::new(Material::Grass, -4.50, 1.50),\n    VoronoiInfo::new(Material::Sand, -4.50, 4.50),\n    VoronoiInfo::new(Material::Limestone, -4.50, 7.50),\n    VoronoiInfo::new(Material::Limestone, -4.50, 10.50),\n    VoronoiInfo::new(Material::IceSlush, -6.00, -9.00),\n    VoronoiInfo::new(Material::IceSlush, -6.00, -6.00),\n    VoronoiInfo::new(Material::Dirt, -6.00, -3.00),\n    VoronoiInfo::new(Material::Grass, -6.00, 0.00),\n    VoronoiInfo::new(Material::CoarseGrass, -6.00, 3.00),\n    VoronoiInfo::new(Material::Limestone, -6.00, 6.00),\n    VoronoiInfo::new(Material::Limestone, -6.00, 9.00),\n    VoronoiInfo::new(Material::IceSlush, -7.50, -10.50),\n    VoronoiInfo::new(Material::IceSlush, -7.50, -7.50),\n    VoronoiInfo::new(Material::Gravel, -7.50, -4.50),\n    VoronoiInfo::new(Material::CoarseGrass, -7.50, -1.50),\n    VoronoiInfo::new(Material::CoarseGrass, -7.50, 1.50),\n    VoronoiInfo::new(Material::Sand, -7.50, 4.50),\n    VoronoiInfo::new(Material::Limestone, -7.50, 7.50),\n    VoronoiInfo::new(Material::Limestone, -7.50, 10.50),\n    VoronoiInfo::new(Material::IceSlush, -9.00, -9.00),\n    VoronoiInfo::new(Material::Gravel, -9.00, -6.00),\n    VoronoiInfo::new(Material::Dirt, -9.00, -3.00),\n    VoronoiInfo::new(Material::CoarseGrass, -9.00, 0.00),\n    VoronoiInfo::new(Material::CoarseGrass, -9.00, 3.00),\n    VoronoiInfo::new(Material::Limestone, -9.00, 6.00),\n    VoronoiInfo::new(Material::Limestone, -9.00, 9.00),\n    VoronoiInfo::new(Material::IceSlush, -10.50, -10.50),\n    VoronoiInfo::new(Material::IceSlush, -10.50, -7.50),\n    VoronoiInfo::new(Material::Gravel, -10.50, -4.50),\n    VoronoiInfo::new(Material::CoarseGrass, -10.50, -1.50),\n    VoronoiInfo::new(Material::CoarseGrass, -10.50, 1.50),\n    VoronoiInfo::new(Material::Sand, -10.50, 4.50),\n    VoronoiInfo::new(Material::Limestone, -10.50, 7.50),\n    VoronoiInfo::new(Material::Limestone, -10.50, 10.50),\n];\n\nconst SURFACE_DEEP: [VoronoiInfo; 113] = [\n    VoronoiInfo::new(Material::Water, 10.50, -10.50),\n    VoronoiInfo::new(Material::CaveGrass, 10.50, -7.50),\n    VoronoiInfo::new(Material::CaveGrass, 10.50, -4.50),\n    VoronoiInfo::new(Material::CaveGrass, 10.50, -1.50),\n    VoronoiInfo::new(Material::Grass, 10.50, 1.50),\n    VoronoiInfo::new(Material::Grass, 10.50, 4.50),\n    VoronoiInfo::new(Material::LushGrass, 10.50, 7.50),\n    VoronoiInfo::new(Material::LushGrass, 10.50, 10.50),\n    VoronoiInfo::new(Material::CaveGrass, 9.00, -9.00),\n    VoronoiInfo::new(Material::CaveGrass, 9.00, -6.00),\n    VoronoiInfo::new(Material::CaveGrass, 9.00, -3.00),\n    VoronoiInfo::new(Material::Grass, 9.00, 0.00),\n    VoronoiInfo::new(Material::Grass, 9.00, 3.00),\n    VoronoiInfo::new(Material::Grass, 9.00, 6.00),\n    VoronoiInfo::new(Material::Grass, 9.00, 9.00),\n    VoronoiInfo::new(Material::CaveGrass, 7.50, -10.50),\n    VoronoiInfo::new(Material::CaveGrass, 7.50, -7.50),\n    VoronoiInfo::new(Material::CaveGrass, 7.50, -4.50),\n    VoronoiInfo::new(Material::CaveGrass, 7.50, -1.50),\n    VoronoiInfo::new(Material::CaveGrass, 7.50, 1.50),\n    VoronoiInfo::new(Material::Grass, 7.50, 4.50),\n    VoronoiInfo::new(Material::LushGrass, 7.50, 7.50),\n    VoronoiInfo::new(Material::Grass, 7.50, 10.50),\n    VoronoiInfo::new(Material::CaveGrass, 6.00, -9.00),\n    VoronoiInfo::new(Material::CaveGrass, 6.00, -6.00),\n    VoronoiInfo::new(Material::CaveGrass, 6.00, -3.00),\n    VoronoiInfo::new(Material::CaveGrass, 6.00, 0.00),\n    VoronoiInfo::new(Material::Grass, 6.00, 3.00),\n    VoronoiInfo::new(Material::LushGrass, 6.00, 6.00),\n    VoronoiInfo::new(Material::Grass, 6.00, 9.00),\n    VoronoiInfo::new(Material::CaveGrass, 4.50, -10.50),\n    VoronoiInfo::new(Material::CaveGrass, 4.50, -7.50),\n    VoronoiInfo::new(Material::CaveGrass, 4.50, -4.50),\n    VoronoiInfo::new(Material::CaveGrass, 4.50, -1.50),\n    VoronoiInfo::new(Material::CaveGrass, 4.50, 1.50),\n    VoronoiInfo::new(Material::Grass, 4.50, 4.50),\n    VoronoiInfo::new(Material::Grass, 4.50, 7.50),\n    VoronoiInfo::new(Material::Grass, 4.50, 10.50),\n    VoronoiInfo::new(Material::Dirt, 3.00, -9.00),\n    VoronoiInfo::new(Material::Dirt, 3.00, -6.00),\n    VoronoiInfo::new(Material::CaveGrass, 3.00, -3.00),\n    VoronoiInfo::new(Material::CaveGrass, 3.00, 0.00),\n    VoronoiInfo::new(Material::Grass, 3.00, 3.00),\n    VoronoiInfo::new(Material::TanGrass, 3.00, 6.00),\n    VoronoiInfo::new(Material::TanGrass, 3.00, 9.00),\n    VoronoiInfo::new(Material::Dirt, 1.50, -10.50),\n    VoronoiInfo::new(Material::IceSlush, 1.50, -7.50),\n    VoronoiInfo::new(Material::CaveGrass, 1.50, -4.50),\n    VoronoiInfo::new(Material::CaveGrass, 1.50, -1.50),\n    VoronoiInfo::new(Material::Grass, 1.50, 1.50),\n    VoronoiInfo::new(Material::CaveGrass, 1.50, 4.50),\n    VoronoiInfo::new(Material::TanGrass, 1.50, 7.50),\n    VoronoiInfo::new(Material::Sand, 1.50, 10.50),\n    VoronoiInfo::new(Material::IceSlush, 0.00, -9.00),\n    VoronoiInfo::new(Material::Dirt, 0.00, -6.00),\n    VoronoiInfo::new(Material::CaveGrass, 0.00, -3.00),\n    VoronoiInfo::new(Material::CaveGrass, 0.00, 0.00),\n    VoronoiInfo::new(Material::CaveGrass, 0.00, 3.00),\n    VoronoiInfo::new(Material::Sand, 0.00, 6.00),\n    VoronoiInfo::new(Material::Sand, 0.00, 9.00),\n    VoronoiInfo::new(Material::IceSlush, -1.50, -10.50),\n    VoronoiInfo::new(Material::IceSlush, -1.50, -7.50),\n    VoronoiInfo::new(Material::Dirt, -1.50, -4.50),\n    VoronoiInfo::new(Material::CaveGrass, -1.50, -1.50),\n    VoronoiInfo::new(Material::CaveGrass, -1.50, 1.50),\n    VoronoiInfo::new(Material::Sand, -1.50, 4.50),\n    VoronoiInfo::new(Material::Limestone, -1.50, 7.50),\n    VoronoiInfo::new(Material::Limestone, -1.50, 10.50),\n    VoronoiInfo::new(Material::IceSlush, -3.00, -9.00),\n    VoronoiInfo::new(Material::Gravel, -3.00, -6.00),\n    VoronoiInfo::new(Material::CaveGrass, -3.00, -3.00),\n    VoronoiInfo::new(Material::CaveGrass, -3.00, 0.00),\n    VoronoiInfo::new(Material::CaveGrass, -3.00, 3.00),\n    VoronoiInfo::new(Material::Limestone, -3.00, 6.00),\n    VoronoiInfo::new(Material::Limestone, -3.00, 9.00),\n    VoronoiInfo::new(Material::IceSlush, -4.50, -10.50),\n    VoronoiInfo::new(Material::Gravel, -4.50, -7.50),\n    VoronoiInfo::new(Material::Dirt, -4.50, -4.50),\n    VoronoiInfo::new(Material::CaveGrass, -4.50, -1.50),\n    VoronoiInfo::new(Material::CaveGrass, -4.50, 1.50),\n    VoronoiInfo::new(Material::Sand, -4.50, 4.50),\n    VoronoiInfo::new(Material::Limestone, -4.50, 7.50),\n    VoronoiInfo::new(Material::Limestone, -4.50, 10.50),\n    VoronoiInfo::new(Material::IceSlush, -6.00, -9.00),\n    VoronoiInfo::new(Material::Gravel, -6.00, -6.00),\n    VoronoiInfo::new(Material::CaveGrass, -6.00, -3.00),\n    VoronoiInfo::new(Material::CaveGrass, -6.00, 0.00),\n    VoronoiInfo::new(Material::Sand, -6.00, 3.00),\n    VoronoiInfo::new(Material::Sand, -6.00, 6.00),\n    VoronoiInfo::new(Material::RedSand, -6.00, 9.00),\n    VoronoiInfo::new(Material::IceSlush, -7.50, -10.50),\n    VoronoiInfo::new(Material::Gravel, -7.50, -7.50),\n    VoronoiInfo::new(Material::Dirt, -7.50, -4.50),\n    VoronoiInfo::new(Material::CaveGrass, -7.50, -1.50),\n    VoronoiInfo::new(Material::Sand, -7.50, 1.50),\n    VoronoiInfo::new(Material::RedSand, -7.50, 4.50),\n    VoronoiInfo::new(Material::RedSand, -7.50, 7.50),\n    VoronoiInfo::new(Material::RedSand, -7.50, 10.50),\n    VoronoiInfo::new(Material::IceSlush, -9.00, -9.00),\n    VoronoiInfo::new(Material::Gravel, -9.00, -6.00),\n    VoronoiInfo::new(Material::CaveGrass, -9.00, -3.00),\n    VoronoiInfo::new(Material::CaveGrass, -9.00, 0.00),\n    VoronoiInfo::new(Material::Sand, -9.00, 3.00),\n    VoronoiInfo::new(Material::RedSand, -9.00, 6.00),\n    VoronoiInfo::new(Material::RedSand, -9.00, 9.00),\n    VoronoiInfo::new(Material::Gravel, -10.50, -10.50),\n    VoronoiInfo::new(Material::Gravel, -10.50, -7.50),\n    VoronoiInfo::new(Material::Dirt, -10.50, -4.50),\n    VoronoiInfo::new(Material::CaveGrass, -10.50, -1.50),\n    VoronoiInfo::new(Material::CaveGrass, -10.50, 1.50),\n    VoronoiInfo::new(Material::RedSand, -10.50, 4.50),\n    VoronoiInfo::new(Material::RedSand, -10.50, 7.50),\n    VoronoiInfo::new(Material::Lava, -10.50, 10.50),\n];\n\nconst TERRAIN_SURFACE_THICKNESS: f32 = 0.2;\n\npub struct VoronoiInfo {\n    pub location: [f32; 2],\n    pub material: Material,\n}\nimpl VoronoiInfo {\n    pub const fn new(mat: Material, rain: f32, temp: f32) -> VoronoiInfo {\n        VoronoiInfo {\n            location: [rain, temp],\n            material: mat,\n        }\n    }\n\n    // This function picks the material with the help of arrays of points in\n    // 2D spaces defined by temp and rain with associated materials for each point.\n    // The values of elev and dist determine which of eight arrays is chosen.\n    // dist represents how far from the surface of the terrain a point is.\n    // For values of dist between 0 and TERRAIN_SURFACE_THICKNESS, the four surface strata\n    // arrays are used. Otherwise, the four general terrain strata arrays are used.\n    // Variations between surface and terrain are intended to represent erosion and vegatation.\n    // elev represents distance from the guiding plane. There are four strata of\n    // terrain that are defined by elev thresholds. Variations between strata\n    // are intended to represent the effects of more or less skylight being exposed.\n    pub fn terraingen_voronoi(elev: f32, rain: f32, temp: f32, dist: f32) -> Material {\n        let voronoi_choices = if dist <= TERRAIN_SURFACE_THICKNESS {\n            if elev < -30.0 {\n                SURFACE_DEEP\n            } else if elev < -12.0 {\n                SURFACE_LOW\n            } else if elev < 2.0 {\n                SURFACE_MED\n            } else {\n                SURFACE_HIGH\n            }\n        } else if elev < -30.0 {\n            GENERAL_DEEP\n        } else if elev < -12.0 {\n            GENERAL_LOW\n        } else if elev < 2.0 {\n            GENERAL_MED\n        } else {\n            GENERAL_HIGH\n        };\n\n        let y: [f32; 2] = [rain, temp];\n        let mut dist_squared = math::sqr(voronoi_choices[0].location[0] - y[0])\n            + math::sqr(voronoi_choices[0].location[1] - y[1]);\n        let mut voxel_mat = voronoi_choices[0].material;\n        for vc in voronoi_choices.iter().skip(1) {\n            let d_squared = math::sqr(vc.location[0] - y[0]) + math::sqr(vc.location[1] - y[1]);\n            if d_squared <= dist_squared {\n                dist_squared = d_squared;\n                voxel_mat = vc.material;\n            };\n        }\n\n        voxel_mat\n    }\n}\n"
  },
  {
    "path": "docs/README.md",
    "content": "# Current outline\nThis is subject to change.\n* Introduction\n* How to play Hypermine\n    * Controls\n    * Config file\n    * The save file (with a warning that compatibility between versions of Hypermine are not guaranteed and another warning to back up save files)\n    * Setting up multiplayer\n* Background math\n    * Linear algebra\n        * (This can link to external resources, but readers should be guided on what parts of linear algebra are worth learning, and making these docs self-contained would be a good long-term goal. If we do use external links, we should include a date so that readers know when health of each link was last checked.)\n        * Vectors\n        * Matrices, matrix-vector multiplication, and its meaning\n        * Matrix-matrix multiplication and its meaning\n            * (We should likely explain both the \"transformation\" and \"change of basis\" interpretations)\n        * 3D examples (assuming previous sections have used 2D examples)\n        * Dot products\n        * Projections, reflections, rotations\n        * Homogeneous coordinates and translations\n    * Spherical geometry\n        * (We want such a section because it's a good segue to problem solving techniques for hyperbolic geometry problems)\n        * Representing points as unit vectors\n        * Projections, reflections, rotations, translations (which are rotations expressed differently)\n    * Hyperbolic geometry\n        * Minkowski space (with the \"inner product\" as the main difference)\n        * Representing points as \"normalized\" vectors\n        * Projections, reflections, rotations, translations, horo-rotations\n        * A note about floating point precision\n        * (Do we put advanced shape-casting math here? Probably not.)\n* Tiling the world\n    * Nodes and their coordinate systems (describing the order-4 dodecahedral honeycomb and how Hypermine uses it)\n    * Chunks and their coordinate systems (explaining node_to_dual and dual_to_node)\n    * Voxels within chunks (Margins should be mentioned here.)\n    * The graph (how dodecahedra are organized into the tiling in code. Also mention how NodeId works.)\n* World generation (See `world_generation.md`)\n* How world generation is driven (wait until this section to describe anything async or anything related to margins)\n* Character physics (May want to mention the word \"player\" for searchability)\n    * The movement algorithm (generally describing character_controller/mod.rs)\n    * Constraining movement vectors (describing vector_bounds.rs)\n    * Sphere casting (how collisions are actually detected)\n* Block updates (placing and breaking blocks)\n* Entity/Component/System\n    * (Note that we use hecs, and point to documentation. Provide some tips on how to discover entities/components in use)\n    * (Do not list out all compoenents, as that kind of documentation belongs in code)\n* Netcode\n    * Introduction (note that we use quinn, and point to documentation)\n    * Syncing character movement\n    * Syncing chunk data and block updates\n    * Syncing entities\n    * Logging on and off\n* Rendering\n    * Introduction (note that we use Vulkan with the ash library, and point to documentation)\n    * Chunk rendering (Enable reader to trace through all the code involved in rendering a chunk. Include fog.)\n    * Surface extraction (including ambient occlusion)\n    * Character rendering (with mesh.vert and mesh.frag)\n    * Asset loading\n    * GUI (note that we use the yakui library, and point to documentation)\n* Miscellaneous\n    * (Note important entry points, such as core.rs for starting up Vulkan and window.rs for the event loop)\n* FAQ (placeholder to put questions if they do appear often)\n\n# Design guidelines\n* Docs should live in the repository itself rather than a separate wiki.\n    * This helps keep docs in sync with the code and allows the quality of docs to be enforced with pull requests. The wiki has a disadvantage of forcing users to make unilateral edits to it.\n* Docs should have a suggested linear order to read them.\n* We should provide documenting things that are impossible to learn elsewhere.\n* There should be a way for readers to know if it's safe to skip a section.\n    * This can for instance be done with \"After reading this section, readers should be able to (...). This could be questions they can answer.\n* Since math is heavily involved, exercises can be useful as knowledge checks.\n* While there is a linear order, readers should also be able to tell what parts they can skip if they are reading the docs for a particular purpose.\n* We should be prepared to keep placeholders in the docs, potentially with links to external sources, as this would allow us to separate the tasks of writing and organizing documentation.\n* Since geometry is heavily involved, the docs should contain pictures.\n    * Interactive elements would also be helpful.\n* As 3D geometry can be difficult to visualize, we should use a 2D analogy for anything that can be reduced to 2D without loss of generality. For instance, many diagrams that explain concepts can be 2D.\n    * It should still be made clear how the analogy extends to 3D.\n* To avoid running up against GitHub limits or making repositories take longer to clone, larger images and videos will need to be generated on the reader's machine.\n    * One option would be to add functionality to Hypermine itself for these visualizations. It is not decided what the preferred approach is.\n    * Images that don't take up much storage are fine to store with Git LFS. Try to keep the total size of all assets used in the documentation under approximately 1 MiB (subject to change based on feasibility/importance of larger images). It should be possible to use the SVG file format to keep most images very small. Producing such images with code is still recommended.\n* We should try to keep the reader interested/motivated, making the documentation enjoyable to read.\n    * Animations and interactive visualizations can help with this a lot (in addition to helping the learning process).\n* If some information in the docs can be made concrete by pointing to source code, we should do that.\n    * There could be a desync, but if such a mistake is made, readers can git blame the documentation to see what the code was like when the documentation was written.\n    * We can also try to avoid repeated work by referencing code comments, asking readers to check them for further detail. However, we cannot put diagrams in code comments.\n    * We should keep this documentation relatively short, especially when diagrams are not needed, as code comments are better. The documentation here should generally just give people less familiar with the codebase somewhere to look.\n"
  },
  {
    "path": "docs/world_generation.md",
    "content": "# World generation\nWorld generation in Hypermine is constrained the following principles:\n* Consistency: The contents of a chunk when it is first visited should be determined only by the chunk's location and the world generation parameters, which never change within a world.\n* Isotropy: World generation should be as isotropic as reasonably possible, so that it is not obvious which direction leads to the origin.\n\nThese constraints help inspire the details of the world generation algorithm, which are explained further in the sections below.\n\n## Noise\nHypermine relies on a random noise function for multiple purposes, such as the deciding the shape of the terrain and what material the ground should be. Because of this, to understand Hypermine's world generation, it's important to understand the noise function it uses.\n\nHyperbolic space has a unique challenge compared to Euclidean space: Common approaches to generating good-looking terrain, such as fractal noise, rely on using grids of multiple different scales, while in hyperbolic space, grids cannot be arbitrarily scaled. This would cause terrain features to have a limited size if generated this way. To avoid this limitation, Hypermine uses a different approach specifically designed for a hyperbolic tiling, inspired by Hyperrogue, which is described in the following section.\n\n### Coarse noise function output\nThe first step is to form a coarse approximation of the noise function, deciding on one value for each node. To do this, we take advantage of the tiling itself. For a 2D analogy, the pentagonal tiling of the hyperbolic plane can be thought of as a set of lines dividing the hyperbolic plane instead of individual pentagons.\n\nTODO: Picture of pentagonal tiling with lines colored to distinguish them from each other\n\nSimilarly, in 3D, the dodecahedral tiling can be thought of as a set of planes dividing hyperbolic space. This interpretation of the dodecahedral tiling is important for understanding how the noise function works between nodes.\n\nTo decide on a noise value for each node, we break the dodecahedral tiling up into these planes. We associate each plane with a randomly chosen noise delta, such that crossing a specific plane in one direction increases or decreases the noise value by a specific amount, and crossing the same plane from the other side has the opposite effect. Once we decide on a noise value for the root node, this definition fully determines the noise value of every other node.\n\nThe following diagram shows an example of the 2D equivalent of this algorithm.\n\nTODO: Picture of pentagonal tiling with each line labeled with the noise delta, using arrows or something similar to show how this offset applies. The center of each pentagon is also labeled with a number with its current noise value. Integers are used everywhere to allow the reader to verify the math easily in their head.\n\nIn this diagram, the randomly-chosen noise delta of each line, along with the derived noise value of each node, is shown. Note how the difference in noise values between any two adjacent nodes matches the noise delta of the line dividing them.\n\nThis algorithm allows for random variation while keeping nearby nodes similar to each other, which is what we need from a noise function. One notable quirk worth mentioning is that the noise value is unbounded, which currently means that hills and valleys in Hypermine can become arbitrarily high and deep, respectively.\n\n### Fine noise function output\nThe next step is to use this coarse approximation of the noise function to produce the actual noise function. To begin, set the noise value at the center of each node to the coarse output we computed earlier.\n\nTODO: Picture of pentagonal tiling with a color at the center of each node to represent the noise value.\n\nThen, trilinearly interpolate these values based on voxel coordinates to create a continuous function across the world. Note that in 2D, this would be bilinear interpolation.\n\nTODO: Picture of the same pentagonal tiling with gradients added. Decorate the center of each node with a dot to highlight the control points of the interpolation.\n\nFinally, for each voxel, add a random offset to its noise value, drawn independently from some distribution.\n\nTODO: Picture of same pentagonal tiling with the final noise value of each voxel.\n\n## Terrain shape\nHypermine uses the 3D noise function to determine the shape of the terrain. This may seem surprising, as it is arguably simpler to use a 2D noise function instead to form a heightmap of the world. However, using 3D noise instead is a useful way of generating more interesting terrain with overhangs, and more importantly, a 2D heightmap works less well in hyperbolic space because of the way that space expands as you move away from the ground plane. The naive approach would cause hills and valleys to have significantly less detail than terrain near the ground plane.\n\nInstead, the basic algorithm is as follows: Using a 3D noise function, we determine the hypothetical elevation of the terrain at each voxel. We then subtract this elevation by the actual height of the voxel above the ground plane to determine a value (denoted `dist` in code) that can be roughly translated to \"the voxel's depth relative to the terrain's surface\". If this value is above zero, we are inside the terrain, and the voxel should be solid, and otherwise, it should be void. If the value is above zero but close to zero, one of the surface materials (like dirt) will be used, while if the value is far above zero, a material like stone will be used instead.\n\nNote that the above is a simplification of the actual algorithm. It is recommended to read the implementation of `worldgen::ChunkParams::generate_terrain` to understand all the details. For instance, terracing is used to add flatter terrain layers with steeper hills between them, and the strength of this terracing effect is controlled by another parameter affected by noise called `blockiness`. In addition, some measures are taken to make the terrain surface smoother than the interface between the different terrain layers.\n\n## Terrain material\nThe terrain material depends entirely on the following four factors:\n* Elevation above the ground plane\n* Estimated distance below the terrain surface\n* Temperature\n* Rainfall\n\nNote that temperature and rainfall are noise functions set up for the purpose of allowing varying terrain materials. How these are used is described in detail in `terraingen.rs`, so the details are omitted here.\n\n## The road\nCurrently, the only megastructure in Hypermine is a single infinite straight road. This megastructure works by using the state machine `worldgen::NodeStateRoad`, which is much like `worldgen::NodeStateKind`, but instead of defining the ground plane, it defines the plane that divides the road into its \"east\" and \"west\" sides (with this terminology assuming that the road runs \"north\" and \"south\"). Based on this state, the `worldgen::ChunkParams::generate_road` function will be called for chunks right above the ground plane, generating the road's surface and carving out any terrain that is in the way. The `worldgen::ChunkParams::generate_road_support` function will be called for chunks below the ground plane, generating the wooden truss if the road is above the terrain, acting as a bridge.\n\n## Trees\nFor decoration, tiny two-block trees are scattered throughout the terrain, with their density depending on the amount of rainfall. Each tree is generated by placing a wood block next to a dirt or grass block, followed by placing a \"leaves\" block on that wood block, away from the dirt or grass block. While this algorithm is unaware of gravity, it often generates trees upright because the wood blocks are placed on relatively flat ground. See `worldgen::ChunkParams::generate_trees` for more details.\n\nNote that generating larger trees requires a more complicated algorithm that has not yet been planned out or implemented.\n\n## Random number generation\nThe above sections mention random choices being made in several areas, but Hypermine requires any given chunk to always be generated with the same contents no matter when it is generated and no matter which computer is used. To accomplish this, Hypermine uses a deterministic and portable random number generator (RNG), seeding it for each node and chunk.\n\nTo elaborate, each node is given a \"spice\" value, which is set to the last 64 bits of the node's unique `NodeId`. This node spice is used directly as the seed for an RNG, which is used for non-chunk-specific decisions, such as the noise deltas between nodes. The node spice is then hashed together with the chunk's `Vertex` to produce the seed for another RNG, which is used for chunk-specific-decisions, such as how trees should be scattered within each chunk.\n\n## Additional information\nFor additional information related to world generation, it is recommended to read the code and its documentation in `worldgen.rs`, as it has many details not covered here.\n"
  },
  {
    "path": "save/Cargo.toml",
    "content": "[package]\nname = \"save\"\nversion = \"0.1.0\"\nedition = \"2024\"\n\n# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html\n\n[dependencies]\nprost = \"0.14.3\"\nredb = \"3.1.1\"\nthiserror = \"2.0.0\"\nzstd = { package = \"zstd-safe\", version = \"7.1.0\", default-features = false, features = [\"std\", \"experimental\"] }\n\n[dev-dependencies]\ntempfile = \"3.4\"\ncriterion = \"0.8.2\"\nrand = { version = \"0.9.0\", features = [\"small_rng\"] }\n\n[[bench]]\nname = \"bench\"\nharness = false\n"
  },
  {
    "path": "save/benches/bench.rs",
    "content": "use std::hint::black_box;\n\nuse save::Save;\n\nuse criterion::{BatchSize, BenchmarkId, Criterion, Throughput, criterion_group, criterion_main};\nuse rand::{Rng, SeedableRng, rngs::SmallRng};\n\nfn save(c: &mut Criterion) {\n    let mut write = c.benchmark_group(\"write\");\n    let node = save::VoxelNode {\n        chunks: vec![save::Chunk {\n            vertex: 0,\n            voxels: vec![0; 12 * 12 * 12 * 2],\n        }],\n    };\n    let mut rng = SmallRng::from_os_rng();\n    for count in [1, 100, 10000] {\n        write.throughput(Throughput::Elements(count));\n        write.bench_function(BenchmarkId::from_parameter(count), |b| {\n            b.iter_batched(\n                || {\n                    let file = tempfile::NamedTempFile::new().unwrap();\n                    let save = Save::open(file.path(), 12).unwrap();\n                    let node_ids = (&mut rng)\n                        .sample_iter(rand::distr::StandardUniform)\n                        .take(count as usize)\n                        .collect::<Vec<u128>>();\n                    (file, save, node_ids)\n                },\n                |(_file, save, node_ids)| {\n                    let mut tx = save.write().unwrap();\n                    let mut writer = tx.get().unwrap();\n                    for i in node_ids {\n                        writer.put_voxel_node(i, &node).unwrap();\n                    }\n                    drop(writer);\n                    tx.commit().unwrap();\n                },\n                BatchSize::SmallInput,\n            );\n        });\n    }\n    write.finish();\n\n    let mut read = c.benchmark_group(\"read\");\n    for count in [1, 100, 10000] {\n        read.throughput(Throughput::Elements(count));\n        read.bench_function(BenchmarkId::from_parameter(count), |b| {\n            b.iter_batched(\n                || {\n                    let file = tempfile::NamedTempFile::new().unwrap();\n                    let save = Save::open(file.path(), 12).unwrap();\n                    let node_ids = (&mut rng)\n                        .sample_iter(rand::distr::StandardUniform)\n                        .take(count as usize)\n                        .collect::<Vec<u128>>();\n\n                    let mut tx = save.write().unwrap();\n                    let mut writer = tx.get().unwrap();\n                    for &i in &node_ids {\n                        writer.put_voxel_node(i, &node).unwrap();\n                    }\n                    drop(writer);\n                    tx.commit().unwrap();\n\n                    (file, save, node_ids)\n                },\n                |(_file, save, node_ids)| {\n                    let mut read = save.read().unwrap();\n                    for i in node_ids {\n                        black_box(read.get_voxel_node(i).unwrap().unwrap());\n                    }\n                },\n                BatchSize::SmallInput,\n            );\n        });\n    }\n}\n\ncriterion_group!(benches, save);\ncriterion_main!(benches);\n"
  },
  {
    "path": "save/gen-protos/Cargo.toml",
    "content": "[package]\nname = \"gen-protos\"\nversion = \"0.1.0\"\nedition = \"2024\"\npublish = false\n\n[dependencies]\nprost-build = \"0.14.3\"\n"
  },
  {
    "path": "save/gen-protos/src/main.rs",
    "content": "use std::{io::Result, path::Path};\n\nfn main() -> Result<()> {\n    let dir = Path::new(env!(\"CARGO_MANIFEST_DIR\"))\n        .parent()\n        .unwrap()\n        .join(\"src\");\n\n    prost_build::Config::new()\n        .out_dir(&dir)\n        .compile_protos(&[dir.join(\"protos.proto\")], &[dir])\n}\n"
  },
  {
    "path": "save/src/lib.rs",
    "content": "mod protos;\n\nuse std::path::Path;\n\nuse prost::Message;\nuse redb::{Database, ReadableDatabase, ReadableTable, TableDefinition};\nuse thiserror::Error;\n\npub use protos::*;\n\npub struct Save {\n    meta: Meta,\n    db: Database,\n}\n\nimpl Save {\n    pub fn open(path: &Path, default_chunk_size: u8) -> Result<Self, OpenError> {\n        let db = Database::create(path).map_err(redb::Error::from)?;\n        let meta = {\n            let tx = db.begin_read().map_err(redb::Error::from)?;\n            match tx.open_table(META_TABLE) {\n                Ok(meta) => {\n                    let Some(value) = meta.get(&[][..]).map_err(redb::Error::from)? else {\n                        return Err(OpenError::MissingMeta);\n                    };\n                    let mut dctx = dctx();\n                    let mut buffer = Vec::new();\n                    decompress(&mut dctx, value.value(), &mut buffer)\n                        .map_err(OpenError::DecompressionFailed)?;\n                    Meta::decode(&*buffer)?\n                }\n                Err(redb::TableError::TableDoesNotExist(_)) => {\n                    // Must be an empty save file. Initialize the meta record and create the other tables.\n                    let defaults = Meta {\n                        chunk_size: default_chunk_size.into(),\n                    };\n                    init_meta_table(&db, &defaults)?;\n                    defaults\n                }\n                Err(e) => return Err(OpenError::Db(DbError(Box::new(e.into())))),\n            }\n        };\n        Ok(Self { meta, db })\n    }\n\n    #[inline]\n    pub fn meta(&self) -> &Meta {\n        &self.meta\n    }\n\n    pub fn read(&self) -> Result<Reader, DbError> {\n        let tx = self.db.begin_read().map_err(redb::Error::from)?;\n        Ok(Reader {\n            voxel_nodes: tx.open_table(VOXEL_NODE_TABLE)?,\n            entity_nodes: tx.open_table(ENTITY_NODE_TABLE)?,\n            characters: tx.open_table(CHARACTERS_BY_NAME_TABLE)?,\n            dctx: dctx(),\n            accum: Vec::new(),\n        })\n    }\n\n    pub fn write(&self) -> Result<WriterGuard, DbError> {\n        let tx = self.db.begin_write().map_err(redb::Error::from)?;\n        Ok(WriterGuard { tx })\n    }\n}\n\nfn init_meta_table(db: &Database, value: &Meta) -> Result<(), DbError> {\n    let tx = db.begin_write().map_err(redb::Error::from)?;\n    let mut meta = tx.open_table(META_TABLE)?;\n    let mut cctx = cctx();\n    let mut plain = Vec::new();\n    let mut compressed = Vec::new();\n    prepare(&mut cctx, &mut plain, &mut compressed, value);\n    meta.insert(&[][..], &*compressed)?;\n    drop(meta);\n\n    tx.open_table(VOXEL_NODE_TABLE)?;\n    tx.open_table(ENTITY_NODE_TABLE)?;\n    tx.open_table(CHARACTERS_BY_NAME_TABLE)?;\n    tx.commit()?;\n    Ok(())\n}\n\nfn dctx() -> zstd::DCtx<'static> {\n    let mut dctx = zstd::DCtx::create();\n    dctx.set_parameter(zstd::DParameter::Format(zstd::FrameFormat::Magicless))\n        .unwrap();\n    dctx\n}\n\npub struct Reader {\n    voxel_nodes: redb::ReadOnlyTable<u128, &'static [u8]>,\n    entity_nodes: redb::ReadOnlyTable<u128, &'static [u8]>,\n    characters: redb::ReadOnlyTable<&'static str, &'static [u8]>,\n    dctx: zstd::DCtx<'static>,\n    accum: Vec<u8>,\n}\n\nimpl Reader {\n    pub fn get_voxel_node(&mut self, node_id: u128) -> Result<Option<VoxelNode>, GetError> {\n        let Some(node) = self.voxel_nodes.get(&node_id)? else {\n            return Ok(None);\n        };\n        self.accum.clear();\n        decompress(&mut self.dctx, node.value(), &mut self.accum)\n            .map_err(GetError::DecompressionFailed)?;\n        Ok(Some(VoxelNode::decode(&*self.accum)?))\n    }\n\n    pub fn get_entity_node(&mut self, node_id: u128) -> Result<Option<EntityNode>, GetError> {\n        let Some(node) = self.entity_nodes.get(&node_id)? else {\n            return Ok(None);\n        };\n        self.accum.clear();\n        decompress(&mut self.dctx, node.value(), &mut self.accum)\n            .map_err(GetError::DecompressionFailed)?;\n        Ok(Some(EntityNode::decode(&*self.accum)?))\n    }\n\n    pub fn get_character(&mut self, name: &str) -> Result<Option<Character>, GetError> {\n        let Some(node) = self.characters.get(name)? else {\n            return Ok(None);\n        };\n        self.accum.clear();\n        decompress(&mut self.dctx, node.value(), &mut self.accum)\n            .map_err(GetError::DecompressionFailed)?;\n        Ok(Some(Character::decode(&*self.accum)?))\n    }\n\n    /// Temporary function to load all voxel-related save data at once.\n    /// TODO: Replace this implementation with a streaming implementation\n    /// that does not require loading everything at once\n    pub fn get_all_voxel_node_ids(&self) -> Result<Vec<u128>, GetError> {\n        self.voxel_nodes\n            .iter()?\n            .map(|n| Ok(n.map_err(GetError::from)?.0.value()))\n            .collect()\n    }\n\n    /// Temporary function to load all entity-related save data at once.\n    /// TODO: Replace this implementation with a streaming implementation\n    /// that does not require loading everything at once\n    pub fn get_all_entity_node_ids(&self) -> Result<Vec<u128>, GetError> {\n        self.entity_nodes\n            .iter()?\n            .map(|n| Ok(n.map_err(GetError::from)?.0.value()))\n            .collect()\n    }\n}\n\nfn decompress(\n    dctx: &mut zstd::DCtx<'_>,\n    compressed: &[u8],\n    out: &mut Vec<u8>,\n) -> Result<(), &'static str> {\n    dctx.init().map_err(zstd::get_error_name)?;\n    let mut input = zstd::InBuffer::around(compressed);\n    let out_size = zstd::DCtx::out_size();\n    loop {\n        if out.len() + out_size > out.capacity() {\n            out.reserve(out_size);\n        }\n        let mut out = zstd::OutBuffer::around_pos(out, out.len());\n        let n = dctx\n            .decompress_stream(&mut out, &mut input)\n            .map_err(zstd::get_error_name)?;\n        if n == 0 {\n            return Ok(());\n        }\n    }\n}\n\npub struct WriterGuard {\n    tx: redb::WriteTransaction,\n}\n\nimpl WriterGuard {\n    pub fn get(&mut self) -> Result<Writer<'_>, DbError> {\n        Ok(Writer {\n            voxel_nodes: self\n                .tx\n                .open_table(VOXEL_NODE_TABLE)\n                .map_err(redb::Error::from)?,\n            entity_nodes: self\n                .tx\n                .open_table(ENTITY_NODE_TABLE)\n                .map_err(redb::Error::from)?,\n            characters: self\n                .tx\n                .open_table(CHARACTERS_BY_NAME_TABLE)\n                .map_err(redb::Error::from)?,\n            cctx: cctx(),\n            plain: Vec::new(),\n            compressed: Vec::new(),\n        })\n    }\n\n    pub fn commit(self) -> Result<(), DbError> {\n        self.tx.commit()?;\n        Ok(())\n    }\n}\n\nfn cctx() -> zstd::CCtx<'static> {\n    let mut cctx = zstd::CCtx::create();\n    cctx.set_parameter(zstd::CParameter::Format(zstd::FrameFormat::Magicless))\n        .unwrap();\n    cctx.set_parameter(zstd::CParameter::CompressionLevel(2))\n        .unwrap();\n    cctx\n}\n\npub struct Writer<'guard> {\n    voxel_nodes: redb::Table<'guard, u128, &'static [u8]>,\n    entity_nodes: redb::Table<'guard, u128, &'static [u8]>,\n    characters: redb::Table<'guard, &'static str, &'static [u8]>,\n    cctx: zstd::CCtx<'static>,\n    plain: Vec<u8>,\n    compressed: Vec<u8>,\n}\n\nimpl Writer<'_> {\n    pub fn put_voxel_node(&mut self, node_id: u128, state: &VoxelNode) -> Result<(), DbError> {\n        prepare(&mut self.cctx, &mut self.plain, &mut self.compressed, state);\n        self.voxel_nodes.insert(node_id, &*self.compressed)?;\n        Ok(())\n    }\n\n    pub fn put_entity_node(&mut self, node_id: u128, state: &EntityNode) -> Result<(), DbError> {\n        prepare(&mut self.cctx, &mut self.plain, &mut self.compressed, state);\n        self.entity_nodes.insert(node_id, &*self.compressed)?;\n        Ok(())\n    }\n\n    pub fn put_character(&mut self, name: &str, character: &Character) -> Result<(), DbError> {\n        prepare(\n            &mut self.cctx,\n            &mut self.plain,\n            &mut self.compressed,\n            character,\n        );\n        self.characters.insert(name, &*self.compressed)?;\n        Ok(())\n    }\n}\n\n/// Buffer the compressed, encoded form of `msg` in `compressed`\nfn prepare<T: prost::Message>(\n    cctx: &mut zstd::CCtx<'_>,\n    plain: &mut Vec<u8>,\n    compressed: &mut Vec<u8>,\n    msg: &T,\n) {\n    plain.clear();\n    msg.encode(plain).unwrap();\n    compressed.clear();\n    compressed.reserve(zstd::compress_bound(plain.len()));\n    cctx.compress2(compressed, plain)\n        .map_err(zstd::get_error_name)\n        .unwrap();\n}\n\nconst META_TABLE: TableDefinition<&[u8], &[u8]> = TableDefinition::new(\"meta\");\nconst VOXEL_NODE_TABLE: TableDefinition<u128, &[u8]> = TableDefinition::new(\"voxel nodes\");\nconst ENTITY_NODE_TABLE: TableDefinition<u128, &[u8]> = TableDefinition::new(\"entity nodes\");\nconst CHARACTERS_BY_NAME_TABLE: TableDefinition<&str, &[u8]> =\n    TableDefinition::new(\"characters by name\");\n\n#[derive(Debug, Error)]\npub enum OpenError {\n    #[error(transparent)]\n    Db(#[from] DbError),\n    #[error(\"missing metadata\")]\n    MissingMeta,\n    #[error(\"decompression failed: {0}\")]\n    DecompressionFailed(&'static str),\n    #[error(transparent)]\n    Corrupt(#[from] prost::DecodeError),\n}\n\nimpl From<redb::Error> for OpenError {\n    fn from(x: redb::Error) -> Self {\n        OpenError::Db(DbError(Box::new(x)))\n    }\n}\n\n#[derive(Debug, Error)]\npub enum GetError {\n    #[error(transparent)]\n    Db(#[from] DbError),\n    #[error(\"decompression failed: {0}\")]\n    DecompressionFailed(&'static str),\n    #[error(transparent)]\n    Corrupt(#[from] prost::DecodeError),\n}\n\nimpl From<redb::Error> for GetError {\n    fn from(x: redb::Error) -> Self {\n        GetError::Db(DbError(Box::new(x)))\n    }\n}\n\nimpl From<redb::StorageError> for GetError {\n    fn from(x: redb::StorageError) -> Self {\n        GetError::Db(DbError(Box::new(x.into())))\n    }\n}\n\n#[derive(Debug, Error)]\n#[error(transparent)]\npub struct DbError(Box<redb::Error>);\n\nimpl From<redb::Error> for DbError {\n    fn from(x: redb::Error) -> Self {\n        DbError(Box::new(x))\n    }\n}\n\nimpl From<redb::StorageError> for DbError {\n    fn from(x: redb::StorageError) -> Self {\n        DbError(Box::new(x.into()))\n    }\n}\n\nimpl From<redb::CommitError> for DbError {\n    fn from(x: redb::CommitError) -> Self {\n        DbError(Box::new(x.into()))\n    }\n}\n\nimpl From<redb::TableError> for DbError {\n    fn from(x: redb::TableError) -> Self {\n        DbError(Box::new(x.into()))\n    }\n}\n"
  },
  {
    "path": "save/src/protos.proto",
    "content": "syntax = \"proto3\";\n\npackage protos;\n\nmessage Meta {\n    // Number of voxels along the edge of a chunk\n    uint32 chunk_size = 1;\n}\n\nmessage Character {\n    // Graph edges to traverse from the origin to find the node containing the character's entity\n    bytes path = 1;\n}\n\nmessage EntityNode {\n    // Entities whose origins lie within this node, each encoded as:\n    // { entity: u64, component_count: varint, components: [{ type: varint, length: varint, data: [u8] }] }\n    repeated bytes entities = 1;\n}\n\nmessage VoxelNode {\n    // Voxel data for each modified chunk\n    repeated Chunk chunks = 1;\n}\n\nmessage Chunk {\n    // Which dodecahedron vertex is associated with this chunk\n    uint32 vertex = 1;\n\n    // Dense 3D array of 16-bit material tags for all voxels in this chunk\n    bytes voxels = 2;\n}\n\nenum ComponentType {\n    // 4x4 matrix of f32s\n    POSITION = 0;\n    // UTF-8 text\n    NAME = 1;\n    // u16\n    MATERIAL = 2;\n    // List of u64\n    INVENTORY = 3;\n}\n"
  },
  {
    "path": "save/src/protos.rs",
    "content": "// This file is @generated by prost-build.\n#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)]\npub struct Meta {\n    /// Number of voxels along the edge of a chunk\n    #[prost(uint32, tag = \"1\")]\n    pub chunk_size: u32,\n}\n#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]\npub struct Character {\n    /// Graph edges to traverse from the origin to find the node containing the character's entity\n    #[prost(bytes = \"vec\", tag = \"1\")]\n    pub path: ::prost::alloc::vec::Vec<u8>,\n}\n#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]\npub struct EntityNode {\n    /// Entities whose origins lie within this node, each encoded as:\n    /// { entity: u64, component_count: varint, components: \\[{ type: varint, length: varint, data: [u8\\] }] }\n    #[prost(bytes = \"vec\", repeated, tag = \"1\")]\n    pub entities: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec<u8>>,\n}\n#[derive(Clone, PartialEq, ::prost::Message)]\npub struct VoxelNode {\n    /// Voxel data for each modified chunk\n    #[prost(message, repeated, tag = \"1\")]\n    pub chunks: ::prost::alloc::vec::Vec<Chunk>,\n}\n#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]\npub struct Chunk {\n    /// Which dodecahedron vertex is associated with this chunk\n    #[prost(uint32, tag = \"1\")]\n    pub vertex: u32,\n    /// Dense 3D array of 16-bit material tags for all voxels in this chunk\n    #[prost(bytes = \"vec\", tag = \"2\")]\n    pub voxels: ::prost::alloc::vec::Vec<u8>,\n}\n#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]\n#[repr(i32)]\npub enum ComponentType {\n    /// 4x4 matrix of f32s\n    Position = 0,\n    /// UTF-8 text\n    Name = 1,\n    /// u16\n    Material = 2,\n    /// List of u64\n    Inventory = 3,\n}\nimpl ComponentType {\n    /// String value of the enum field names used in the ProtoBuf definition.\n    ///\n    /// The values are not transformed in any way and thus are considered stable\n    /// (if the ProtoBuf definition does not change) and safe for programmatic use.\n    pub fn as_str_name(&self) -> &'static str {\n        match self {\n            Self::Position => \"POSITION\",\n            Self::Name => \"NAME\",\n            Self::Material => \"MATERIAL\",\n            Self::Inventory => \"INVENTORY\",\n        }\n    }\n    /// Creates an enum from field names used in the ProtoBuf definition.\n    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {\n        match value {\n            \"POSITION\" => Some(Self::Position),\n            \"NAME\" => Some(Self::Name),\n            \"MATERIAL\" => Some(Self::Material),\n            \"INVENTORY\" => Some(Self::Inventory),\n            _ => None,\n        }\n    }\n}\n"
  },
  {
    "path": "save/tests/heavy.rs",
    "content": "use std::time::Instant;\n\nuse save::Save;\n\nuse rand::{Rng, SeedableRng, rngs::SmallRng};\n\n#[test]\nfn write() {\n    let mut rng = SmallRng::from_os_rng();\n    let file = tempfile::NamedTempFile::new().unwrap();\n    let save = Save::open(file.path(), 12).unwrap();\n    let node = save::VoxelNode {\n        chunks: vec![save::Chunk {\n            vertex: 0,\n            voxels: vec![0; 12 * 12 * 12 * 2],\n        }],\n    };\n\n    let start = Instant::now();\n    const PASSES: u32 = 100;\n    const NODES: u32 = 1_000;\n    for _ in 0..PASSES {\n        let mut writer_guard = save.write().unwrap();\n        let mut writer = writer_guard.get().unwrap();\n        for _ in 0..NODES {\n            writer.put_voxel_node(rng.random(), &node).unwrap();\n        }\n        drop(writer);\n        writer_guard.commit().unwrap();\n    }\n    let dt = start.elapsed();\n    println!(\n        \"{:?} per pass, {:?} per node\",\n        dt / PASSES,\n        dt / (PASSES * NODES)\n    );\n}\n"
  },
  {
    "path": "save/tests/tests.rs",
    "content": "use rand::{Rng, SeedableRng, rngs::SmallRng};\n\nuse save::Save;\n\n#[test]\nfn persist_meta() {\n    let file = tempfile::NamedTempFile::new().unwrap();\n    let save = Save::open(file.path(), 12).unwrap();\n    assert_eq!(save.meta().chunk_size, 12);\n    drop(save);\n    let save = Save::open(file.path(), 8).unwrap();\n    assert_eq!(save.meta().chunk_size, 12);\n}\n\n#[test]\nfn persist_node() {\n    let file = tempfile::NamedTempFile::new().unwrap();\n    let save = Save::open(file.path(), 12).unwrap();\n    let node = save::VoxelNode {\n        chunks: vec![save::Chunk {\n            vertex: 0,\n            voxels: vec![0; 12 * 12 * 12 * 2],\n        }],\n    };\n    let mut writer_guard = save.write().unwrap();\n    writer_guard\n        .get()\n        .unwrap()\n        .put_voxel_node(0, &node)\n        .unwrap();\n    writer_guard.commit().unwrap();\n    assert_eq!(\n        node,\n        save.read().unwrap().get_voxel_node(0).unwrap().unwrap()\n    );\n}\n\n#[test]\nfn persist_character() {\n    let file = tempfile::NamedTempFile::new().unwrap();\n    let save = Save::open(file.path(), 12).unwrap();\n    let mut writer_guard = save.write().unwrap();\n    let mut writer = writer_guard.get().unwrap();\n    let mut rng = SmallRng::from_os_rng();\n    let mut path = Vec::with_capacity(17000);\n    for _ in 0..17000 {\n        path.push(rng.random_range(0..12));\n    }\n    let ch = save::Character { path };\n    writer.put_character(\"asdf\", &ch).unwrap();\n    drop(writer);\n    writer_guard.commit().unwrap();\n    drop(save);\n\n    let save = Save::open(file.path(), 12).unwrap();\n    assert_eq!(\n        ch,\n        save.read().unwrap().get_character(\"asdf\").unwrap().unwrap()\n    );\n}\n"
  },
  {
    "path": "server/Cargo.toml",
    "content": "[package]\nname = \"server\"\nversion = \"0.1.0\"\nauthors = [\"Benjamin Saunders <ben.e.saunders@gmail.com>\"]\nedition = \"2024\"\npublish = false\nlicense = \"Apache-2.0 OR Zlib\"\n\n# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html\n\n[dependencies]\npostcard = { version = \"1.0.4\", default-features = false, features = [\"use-std\"] }\ncommon = { path = \"../common\" }\ntracing = \"0.1.10\"\ntokio = { version = \"1.18.2\", features = [\"rt-multi-thread\", \"time\", \"macros\", \"sync\"] }\nquinn = { workspace = true }\nserde = { version = \"1.0.104\", features = [\"derive\", \"rc\"] }\ntoml = { workspace = true }\nanyhow = \"1.0.26\"\nrcgen = { version = \"0.14.6\", default-features = false, features = [\"ring\"] }\nhostname = \"0.4.0\"\nhecs = { workspace = true }\nrand = { version = \"0.9.0\", features = [\"small_rng\"] }\nfxhash = \"0.2.1\"\nnalgebra = { workspace = true }\nlibm = \"0.2.16\"\nslotmap = \"1.0.6\"\nrustls-pemfile = \"2.1.2\"\nsave = { path = \"../save\" }\n"
  },
  {
    "path": "server/src/config.rs",
    "content": "use std::{\n    fs,\n    net::{Ipv6Addr, SocketAddr},\n    path::{Path, PathBuf},\n};\n\nuse anyhow::{Context, Result};\nuse serde::Deserialize;\n\nuse common::SimConfigRaw;\n\n#[derive(Deserialize)]\n#[serde(deny_unknown_fields)]\npub struct Config {\n    pub server_name: Option<String>,\n    pub certificate_chain: Option<PathBuf>,\n    pub private_key: Option<PathBuf>,\n    pub save: Option<PathBuf>,\n    pub listen: SocketAddr,\n    #[serde(default)]\n    pub simulation: SimConfigRaw,\n}\n\nimpl Config {\n    pub fn load(path: &Path) -> Result<Self> {\n        toml::from_str(\n            std::str::from_utf8(&fs::read(path).context(\"reading config file\")?)\n                .context(\"parsing config file\")?,\n        )\n        .context(\"parsing config file\")\n    }\n}\n\nimpl Default for Config {\n    fn default() -> Self {\n        Self {\n            server_name: None,\n            certificate_chain: None,\n            private_key: None,\n            save: None,\n            listen: SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 1234),\n            simulation: SimConfigRaw::default(),\n        }\n    }\n}\n"
  },
  {
    "path": "server/src/input_queue.rs",
    "content": "use std::{\n    collections::VecDeque,\n    time::{Duration, Instant},\n};\n\nuse common::proto::Command;\n\n/// A jitter-tolerant queue of inputs received from a client\n///\n/// Clients send a stream of input roughly at tickrate, but with an undefined time offset causing\n/// ticks to line up imperfectly. If they happen to line up very closely, or if tickrate is\n/// sufficiently high, then network jitter might cause frequent variation in whether an input is\n/// received just before or just after the simulation is stepped. If we applied inputs ASAP, this\n/// would make it impossible for the client to accurately predict the effects of its input, leading\n/// to severe visual jitter.\n///\n/// To correct this, we wait a certain amount of time after receiving the first input, and only then\n/// begin consuming one input per tick. This ensures that each input can be late by that amount of\n/// time without disrupting the client's prediction. If we nonetheless run out of inputs, it's\n/// likely that the client fell behind, e.g. due to a temporary hang, clock drift, or a change in\n/// the network path, so we wait again to recover the margin for error.\n#[derive(Default)]\npub struct InputQueue {\n    queue: VecDeque<Command>,\n    /// Time at which the first input in the latest uninterrupted sequence was received\n    epoch: Option<Instant>,\n}\n\nimpl InputQueue {\n    pub fn new() -> Self {\n        Self::default()\n    }\n\n    /// Enqueue a new input\n    ///\n    /// Called immediately on receipt\n    pub fn push(&mut self, input: Command, now: Instant) {\n        self.queue.push_back(input);\n        if self.epoch.is_none() {\n            self.epoch = Some(now);\n        }\n    }\n\n    /// Obtain the input for the next simulation step\n    ///\n    /// Must be called immediately prior to the step. `delay` is the amount of time after the first\n    /// (but not necessarily future) input in a given uninterrupted sequence of inputs we must wait\n    /// before beginning to consume inputs.\n    pub fn pop(&mut self, now: Instant, delay: Duration) -> Option<Command> {\n        if now - self.epoch? < delay {\n            // The first input hasn't aged long enough; try again later!\n            return None;\n        }\n        let result = self.queue.pop_front();\n        if result.is_none() {\n            // Queue underrun; the client may have fallen behind, so we need to re-establish our\n            // margin for error.\n            self.epoch = None;\n        }\n        result\n    }\n}\n"
  },
  {
    "path": "server/src/lib.rs",
    "content": "#![allow(clippy::needless_borrowed_reference)]\n\nextern crate nalgebra as na;\nmod input_queue;\nmod postcard_helpers;\nmod sim;\n\nuse std::{net::UdpSocket, sync::Arc, time::Instant};\n\nuse anyhow::{Context, Error, Result, anyhow};\nuse hecs::Entity;\nuse quinn::rustls::pki_types::{CertificateDer, PrivateKeyDer};\nuse slotmap::DenseSlotMap;\nuse tokio::sync::mpsc;\nuse tracing::{debug, error, error_span, info, trace, warn};\n\nuse common::{\n    SimConfig, codec,\n    proto::{self, connection_error_codes},\n};\nuse input_queue::InputQueue;\nuse save::Save;\nuse sim::Sim;\n\npub struct NetParams {\n    pub certificate_chain: Vec<CertificateDer<'static>>,\n    pub private_key: PrivateKeyDer<'static>,\n    pub socket: UdpSocket,\n}\n\npub struct Server {\n    cfg: Arc<SimConfig>,\n    sim: Sim,\n    clients: DenseSlotMap<ClientId, Client>,\n    save: Save,\n    endpoint: Option<quinn::Endpoint>,\n\n    new_clients_send: mpsc::UnboundedSender<(quinn::Connection, proto::ClientHello)>,\n    new_clients_recv: mpsc::UnboundedReceiver<(quinn::Connection, proto::ClientHello)>,\n    client_events_send: mpsc::Sender<(ClientId, ClientEvent)>,\n    client_events_recv: mpsc::Receiver<(ClientId, ClientEvent)>,\n}\n\nimpl Server {\n    pub fn new(net: Option<NetParams>, mut cfg: SimConfig, save: Save) -> Result<Self> {\n        cfg.chunk_size = save.meta().chunk_size as u8;\n        let endpoint = net\n            .map(|net| {\n                let server_config =\n                    quinn::ServerConfig::with_single_cert(net.certificate_chain, net.private_key)\n                        .context(\"parsing certificate\")?;\n                let endpoint = quinn::Endpoint::new(\n                    quinn::EndpointConfig::default(),\n                    Some(server_config),\n                    net.socket,\n                    quinn::default_runtime().unwrap(),\n                )?;\n                info!(address = %endpoint.local_addr().unwrap(), \"listening\");\n                Ok::<_, Error>(endpoint)\n            })\n            .transpose()?;\n\n        let (new_clients_send, new_clients_recv) = mpsc::unbounded_channel();\n        let (client_events_send, client_events_recv) = mpsc::channel(128);\n\n        let cfg = Arc::new(cfg);\n        Ok(Self {\n            sim: Sim::new(cfg.clone(), &save),\n            cfg,\n            clients: DenseSlotMap::default(),\n            save,\n            endpoint,\n\n            new_clients_send,\n            new_clients_recv,\n            client_events_send,\n            client_events_recv,\n        })\n    }\n\n    pub fn connect(&mut self, hello: proto::ClientHello, mut backend: HandleBackend) -> Result<()> {\n        let snapshot = Arc::new(self.sim.snapshot());\n        let (id, entity) = self\n            .sim\n            .activate_or_spawn_character(&hello)\n            .ok_or_else(|| anyhow!(\"could not spawn {} due to name conflict\", hello.name))?;\n        let (ordered_send, mut ordered_recv) = mpsc::channel(32);\n        ordered_send.try_send(snapshot).unwrap();\n        let (unordered_send, mut unordered_recv) = mpsc::channel(32);\n        let client_id = self.clients.insert(Client {\n            conn: None,\n            character: entity,\n            ordered: ordered_send,\n            unordered: unordered_send,\n            latest_input_received: 0,\n            latest_input_processed: 0,\n            inputs: InputQueue::new(),\n        });\n\n        backend\n            .incoming\n            .send(Message::Hello(proto::ServerHello {\n                character: id,\n                sim_config: (*self.cfg).clone(),\n            }))\n            .unwrap();\n\n        // Adapt channels. TODO: Make this unnecessary.\n        let client_events_send = self.client_events_send.clone();\n        tokio::spawn(async move {\n            while let Some(msg) = backend.outgoing.recv().await {\n                _ = client_events_send\n                    .send((client_id, ClientEvent::Command(msg)))\n                    .await;\n            }\n        });\n        tokio::spawn({\n            let incoming_send = backend.incoming.clone();\n            async move {\n                while let Some(msg) = ordered_recv.recv().await {\n                    _ = incoming_send.send(Message::Spawns(proto::Spawns::clone(&*msg)));\n                }\n            }\n        });\n        tokio::spawn(async move {\n            while let Some(msg) = unordered_recv.recv().await {\n                _ = backend.incoming.send(Message::StateDelta(msg));\n            }\n        });\n\n        info!(id = ?client_id.0, \"connected locally\");\n        Ok(())\n    }\n\n    pub async fn run(mut self) {\n        let mut ticks = tokio::time::interval(self.cfg.step_interval);\n        let mut incoming = self.handle_incoming();\n        loop {\n            tokio::select! {\n                _ = ticks.tick() => { self.on_step(); },\n                Some(conn) = incoming.recv() => { self.on_connect(conn); }\n                Some((id, event)) = self.client_events_recv.recv() => { self.on_client_event(id, event); }\n                Some((conn, hello)) = self.new_clients_recv.recv() => { self.on_client(conn, hello); }\n            }\n        }\n    }\n\n    fn handle_incoming(&self) -> mpsc::Receiver<quinn::Connection> {\n        let (incoming_send, incoming_recv) = mpsc::channel(16);\n        let Some(endpoint) = self.endpoint.clone() else {\n            return incoming_recv;\n        };\n        tokio::spawn(async move {\n            while let Some(conn) = endpoint.accept().await {\n                trace!(address = %conn.remote_address(), \"connection incoming\");\n                let incoming_send = incoming_send.clone();\n                tokio::spawn(async move {\n                    match conn.await {\n                        Err(e) => {\n                            error!(\"incoming connection failed: {}\", e.to_string());\n                        }\n                        Ok(connection) => {\n                            let _ = incoming_send.send(connection).await;\n                        }\n                    }\n                });\n            }\n        });\n        incoming_recv\n    }\n\n    fn on_step(&mut self) {\n        let now = Instant::now();\n        // Apply queued inputs\n        for (id, client) in &mut self.clients {\n            if let Some(cmd) = client.inputs.pop(now, self.cfg.input_queue_size) {\n                client.latest_input_processed = cmd.generation;\n                if let Err(e) = self.sim.command(client.character, cmd) {\n                    error!(client = ?id, \"couldn't process command: {}\", e);\n                }\n            }\n        }\n\n        // Step the simulation\n        let (spawns, delta) = self.sim.step();\n        let spawns = spawns.map(Arc::new);\n        let mut overran = Vec::new();\n        for (client_id, client) in &mut self.clients {\n            let mut delta = delta.clone();\n            delta.latest_input = client.latest_input_processed;\n            let r1 = client.unordered.try_send(delta);\n            let r2 = if let Some(spawns) = spawns.as_ref() {\n                client.ordered.try_send(Arc::clone(spawns))\n            } else {\n                Ok(())\n            };\n            use mpsc::error::TrySendError::Full;\n            match (r1, r2) {\n                (Err(Full(_)), _) | (_, Err(Full(_))) => {\n                    overran.push(client_id);\n                }\n                _ => {}\n            }\n        }\n        for client_id in overran {\n            match self.clients[client_id].conn {\n                Some(ref conn) => {\n                    error!(\"dropping slow client {:?}\", client_id.0);\n                    conn.close(\n                        connection_error_codes::STREAM_ERROR,\n                        b\"client reading too slowly\",\n                    );\n                    self.cleanup_client(client_id);\n                }\n                None => {\n                    warn!(\"slow local client {:?}\", client_id.0);\n                }\n            }\n        }\n\n        // Save the world. Could be less frequent if it becomes a bottleneck.\n        if let Err(e) = self.sim.save(&mut self.save) {\n            error!(\"couldn't save: {}\", e);\n        }\n    }\n\n    fn on_client_event(&mut self, client_id: ClientId, event: ClientEvent) {\n        let span = error_span!(\"client\", id = ?client_id.0);\n        let _guard = span.enter();\n        let Some(client) = self.clients.get_mut(client_id) else {\n            // Skip messages from cleaned-up clients\n            return;\n        };\n        match event {\n            ClientEvent::Lost(e) => {\n                error!(\"lost: {:#}\", e);\n                self.cleanup_client(client_id);\n            }\n            ClientEvent::Command(cmd) => {\n                if cmd.generation.wrapping_sub(client.latest_input_received) < u16::MAX / 2 {\n                    client.latest_input_received = cmd.generation;\n                    client.inputs.push(cmd, Instant::now());\n                } else {\n                    debug!(\"dropping obsolete command\");\n                }\n            }\n        }\n    }\n\n    fn cleanup_client(&mut self, client: ClientId) {\n        self.sim\n            .deactivate_character(self.clients[client].character);\n        self.clients.remove(client);\n    }\n\n    fn on_connect(&mut self, connection: quinn::Connection) {\n        let send = self.new_clients_send.clone();\n        tokio::spawn(async move {\n            let result: anyhow::Result<()> = async move {\n                let stream = connection.accept_uni().await.map_err(Error::msg)?;\n                let hello =\n                    codec::recv_whole::<proto::ClientHello>(MAX_CLIENT_MSG_SIZE, stream).await?;\n                _ = send.send((connection, hello));\n                Ok(())\n            }\n            .await;\n            if let Err(e) = result {\n                warn!(\"error reading client hello: {e:#}\");\n            }\n        });\n    }\n\n    fn on_client(&mut self, connection: quinn::Connection, hello: proto::ClientHello) {\n        let snapshot = Arc::new(self.sim.snapshot());\n        let Some((id, entity)) = self.sim.activate_or_spawn_character(&hello) else {\n            error!(\"could not spawn {} due to name conflict\", hello.name);\n            connection.close(connection_error_codes::NAME_CONFLICT, b\"name conflict\");\n            return;\n        };\n        let (ordered_send, ordered_recv) = mpsc::channel(32);\n        ordered_send.try_send(snapshot).unwrap();\n        let (unordered_send, unordered_recv) = mpsc::channel(32);\n        let client_id = self.clients.insert(Client {\n            conn: Some(connection.clone()),\n            character: entity,\n            ordered: ordered_send,\n            unordered: unordered_send,\n            latest_input_received: 0,\n            latest_input_processed: 0,\n            inputs: InputQueue::new(),\n        });\n        info!(id = ?client_id.0, address = %connection.remote_address(), \"connection established\");\n        let server_hello = proto::ServerHello {\n            character: id,\n            sim_config: (*self.cfg).clone(),\n        };\n        tokio::spawn({\n            let connection = connection.clone();\n            async move {\n                // Errors will be handled by recv task\n                let _ = drive_send(connection, server_hello, unordered_recv, ordered_recv).await;\n            }\n        });\n        let mut send = self.client_events_send.clone();\n        tokio::spawn(async move {\n            if let Err(e) = drive_recv(client_id, connection, &mut send).await {\n                // drive_recv returns an error when any connection-terminating issue occurs, so we\n                // send a `Lost` message to ensure the client is cleaned up. Note that this message may\n                // be redundant, as dropping a slow client also sends a `Lost` message.\n                let _ = send.send((client_id, ClientEvent::Lost(e))).await;\n            } else {\n                unreachable!(\"Graceful disconnects are not implemented.\")\n            }\n        });\n    }\n}\n\nconst MAX_CLIENT_MSG_SIZE: usize = 1 << 16;\n\nasync fn drive_recv(\n    id: ClientId,\n    connection: quinn::Connection,\n    send: &mut mpsc::Sender<(ClientId, ClientEvent)>,\n) -> Result<()> {\n    loop {\n        let stream = connection.accept_uni().await.map_err(Error::msg)?;\n        let send = send.clone();\n\n        // We spawn a separate task to allow messages to be processed in a different order from when they were\n        // initiated.\n        let connection = connection.clone();\n        tokio::spawn(async move {\n            match codec::recv_whole::<proto::Command>(MAX_CLIENT_MSG_SIZE, stream).await {\n                Err(e) => {\n                    // This error can occur if the client sends a badly-formatted command. In this case,\n                    // we want to drop the client. We close the connection, which will cause `drive_recv` to\n                    // return eventually.\n                    tracing::error!(\"Error when parsing unordered stream from client: {e}\");\n                    connection.close(\n                        connection_error_codes::BAD_CLIENT_COMMAND,\n                        b\"could not process stream\",\n                    );\n                }\n                Ok(msg) => {\n                    let _ = send.send((id, ClientEvent::Command(msg))).await;\n                }\n            }\n        });\n    }\n}\n\nasync fn drive_send(\n    conn: quinn::Connection,\n    hello: proto::ServerHello,\n    unordered: mpsc::Receiver<Unordered>,\n    mut ordered: mpsc::Receiver<Ordered>,\n) -> Result<()> {\n    let mut stream = conn.open_uni().await?;\n    codec::send(&mut stream, &hello).await?;\n\n    tokio::spawn(async move {\n        // Errors will be handled by recv task\n        let _ = drive_send_unordered(conn.clone(), unordered).await;\n    });\n\n    while let Some(msg) = ordered.recv().await {\n        codec::send(&mut stream, &msg).await?;\n    }\n\n    Ok(())\n}\n\nasync fn drive_send_unordered(\n    conn: quinn::Connection,\n    mut msgs: mpsc::Receiver<Unordered>,\n) -> Result<()> {\n    while let Some(msg) = msgs.recv().await {\n        let stream = conn.open_uni().await?;\n        codec::send_whole(stream, &msg).await?;\n    }\n    Ok(())\n}\n\nslotmap::new_key_type! {\n    struct ClientId;\n}\n\nstruct Client {\n    conn: Option<quinn::Connection>,\n    character: Entity,\n    ordered: mpsc::Sender<Ordered>,\n    unordered: mpsc::Sender<Unordered>,\n    latest_input_received: u16,\n    latest_input_processed: u16,\n    inputs: InputQueue,\n}\n\nenum ClientEvent {\n    Command(proto::Command),\n    Lost(Error),\n}\n\ntype Unordered = proto::StateDelta;\n\ntype Ordered = Arc<proto::Spawns>;\n\n/// A client's view of a server\npub struct Handle {\n    pub incoming: mpsc::UnboundedReceiver<Message>,\n    pub outgoing: mpsc::UnboundedSender<proto::Command>,\n}\n\nimpl Handle {\n    pub fn loopback() -> (Self, HandleBackend) {\n        let (incoming_send, incoming_recv) = mpsc::unbounded_channel();\n        let (outgoing_send, outgoing_recv) = mpsc::unbounded_channel();\n        (\n            Self {\n                incoming: incoming_recv,\n                outgoing: outgoing_send,\n            },\n            HandleBackend {\n                incoming: incoming_send,\n                outgoing: outgoing_recv,\n            },\n        )\n    }\n}\n\npub struct HandleBackend {\n    incoming: mpsc::UnboundedSender<Message>,\n    outgoing: mpsc::UnboundedReceiver<proto::Command>,\n}\n\n#[derive(Debug)]\npub enum Message {\n    Hello(proto::ServerHello),\n    Spawns(proto::Spawns),\n    StateDelta(proto::StateDelta),\n    ConnectionLost(Error),\n}\n"
  },
  {
    "path": "server/src/main.rs",
    "content": "#![allow(clippy::needless_borrowed_reference)]\n\nmod config;\n\nuse std::{fs, net::UdpSocket, path::Path};\n\nuse anyhow::{Context, Result, anyhow};\nuse quinn::rustls::pki_types::{CertificateDer, PrivateKeyDer, PrivatePkcs8KeyDer};\nuse tracing::{info, warn};\n\nuse common::{Anonymize, SimConfig};\nuse config::Config;\nuse save::Save;\n\nfn main() {\n    // Set up logging\n    common::init_tracing();\n\n    if let Err(e) = run() {\n        eprintln!(\"{e:#}\");\n        std::process::exit(1);\n    }\n}\n\n#[tokio::main]\npub async fn run() -> Result<()> {\n    let cfg = match std::env::args_os().nth(1) {\n        Some(path) => Config::load(Path::new(&path))?,\n        None => Config::default(),\n    };\n\n    let (certificate_chain, private_key) = match (&cfg.certificate_chain, &cfg.private_key) {\n        (&Some(ref certificate_chain), &Some(ref private_key)) => (\n            rustls_pemfile::certs(\n                &mut &*fs::read(certificate_chain).context(\"reading certificate chain\")?,\n            )\n            .collect::<Result<Vec<_>, _>>()\n            .context(\"parsing certificate chain\")?,\n            rustls_pemfile::pkcs8_private_keys(\n                &mut &*fs::read(private_key).context(\"reading private key\")?,\n            )\n            .next()\n            .ok_or_else(|| anyhow!(\"no private key found with PKCS #8 format\"))?\n            .context(\"parsing private key\")?\n            .into(),\n        ),\n        _ => {\n            // TODO: Cache on disk\n            warn!(\"generating self-signed certificate\");\n            let certified_key = rcgen::generate_simple_self_signed(vec![\n                cfg.server_name.clone().map(Ok).unwrap_or_else(|| {\n                    hostname::get().context(\"getting hostname\").and_then(|x| {\n                        x.into_string()\n                            .map_err(|_| anyhow!(\"hostname is not valid UTF-8\"))\n                    })\n                })?,\n            ])\n            .unwrap();\n            let key = certified_key.signing_key.serialize_der();\n            let cert = certified_key.cert.der().to_vec();\n            (\n                vec![CertificateDer::from(cert)],\n                PrivateKeyDer::from(PrivatePkcs8KeyDer::from(key)),\n            )\n        }\n    };\n\n    let sim_cfg = SimConfig::from_raw(&cfg.simulation);\n\n    let save = cfg.save.unwrap_or_else(|| \"hypermine.save\".into());\n    info!(\"using save file {}\", save.anonymize().display());\n    let save = Save::open(&save, sim_cfg.chunk_size)?;\n\n    let server = server::Server::new(\n        Some(server::NetParams {\n            certificate_chain,\n            private_key,\n            socket: UdpSocket::bind(cfg.listen).context(\"binding socket\")?,\n        }),\n        sim_cfg,\n        save,\n    )?;\n    server.run().await;\n    Ok(())\n}\n"
  },
  {
    "path": "server/src/postcard_helpers.rs",
    "content": "//! Postcard doesn't support serializing to an existing vec out of the box.\n//! See https://github.com/jamesmunns/postcard/pull/208.\n\nuse postcard::Result;\n\npub fn serialize<T: serde::Serialize + ?Sized>(value: &T, vec: &mut Vec<u8>) -> Result<()> {\n    postcard::serialize_with_flavor(value, ExtendVec(vec))\n}\n\nstruct ExtendVec<'a>(&'a mut Vec<u8>);\n\nimpl postcard::ser_flavors::Flavor for ExtendVec<'_> {\n    type Output = ();\n\n    fn try_push(&mut self, data: u8) -> Result<()> {\n        self.0.push(data);\n        Ok(())\n    }\n\n    fn finalize(self) -> Result<()> {\n        Ok(())\n    }\n\n    fn try_extend(&mut self, data: &[u8]) -> Result<()> {\n        self.0.extend_from_slice(data);\n        Ok(())\n    }\n}\n\n#[derive(serde::Serialize, serde::Deserialize)]\npub struct SaveEntity {\n    /// [`EntityId`] value, represented as an array to avoid wastefully varint encoding random bytes\n    pub entity: [u8; 8],\n    pub components: Vec<(u64, Vec<u8>)>,\n}\n"
  },
  {
    "path": "server/src/sim.rs",
    "content": "use std::sync::Arc;\n\nuse anyhow::Context;\nuse common::dodeca::{Side, Vertex};\nuse common::math::MIsometry;\nuse common::node::VoxelData;\nuse common::proto::{BlockUpdate, Inventory, SerializedVoxelData};\nuse common::world::Material;\nuse common::{GraphEntities, node::ChunkId};\nuse fxhash::{FxHashMap, FxHashSet};\nuse hecs::{DynamicBundle, Entity, EntityBuilder};\nuse rand::rngs::SmallRng;\nuse rand::{Rng, SeedableRng};\nuse save::ComponentType;\nuse serde::{Deserialize, Serialize};\nuse tracing::{error, error_span, info, trace};\n\nuse common::{\n    EntityId, SimConfig, Step, character_controller, dodeca,\n    graph::{Graph, NodeId},\n    node::Chunk,\n    proto::{\n        Character, CharacterInput, CharacterState, ClientHello, Command, Component, FreshNode,\n        Position, Spawns, StateDelta,\n    },\n    traversal::{ensure_nearby, nearby_nodes},\n    worldgen::ChunkParams,\n};\n\nuse crate::postcard_helpers::{self, SaveEntity};\n\npub struct Sim {\n    cfg: Arc<SimConfig>,\n    rng: SmallRng,\n    step: Step,\n    entity_ids: FxHashMap<EntityId, Entity>,\n    world: hecs::World,\n    graph: Graph,\n    /// Voxel data that has been fetched from a savefile but not yet introduced to the graph\n    preloaded_voxel_data: FxHashMap<ChunkId, VoxelData>,\n    accumulated_changes: AccumulatedChanges,\n    graph_entities: GraphEntities,\n    /// All nodes that have entity-related information yet to be saved\n    dirty_nodes: FxHashSet<NodeId>,\n    /// All nodes that have voxel-related information yet to be saved\n    dirty_voxel_nodes: FxHashSet<NodeId>,\n    /// All chunks in the graph have ever had any block updates applied to them and can no longer be regenerated with worldgen.\n    /// This doesn't include chunks that have not been added to the graph yet (See `preloaded_voxel_data`).\n    modified_chunks: FxHashSet<ChunkId>,\n}\n\nimpl Sim {\n    pub fn new(cfg: Arc<SimConfig>, save: &save::Save) -> Self {\n        let mut result = Self {\n            rng: SmallRng::from_os_rng(),\n            step: 0,\n            entity_ids: FxHashMap::default(),\n            world: hecs::World::new(),\n            graph: Graph::new(cfg.chunk_size),\n            preloaded_voxel_data: FxHashMap::default(),\n            accumulated_changes: AccumulatedChanges::default(),\n            graph_entities: GraphEntities::new(),\n            dirty_nodes: FxHashSet::default(),\n            dirty_voxel_nodes: FxHashSet::default(),\n            modified_chunks: FxHashSet::default(),\n            cfg,\n        };\n\n        result\n            .load_all_voxels(save)\n            .expect(\"save file must be of a valid format\");\n        result\n            .load_all_entities(save)\n            .expect(\"save file must be of a valid format\");\n        // As no players have logged in yet, and `snapshot` may be called before the first call of `step`,\n        // make sure that `accumulated_changes` is empty to avoid accidental double-spawns of anything.\n        result.accumulated_changes = AccumulatedChanges::default();\n        result\n    }\n\n    pub fn save(&mut self, save: &mut save::Save) -> Result<(), save::DbError> {\n        fn path_from_origin(graph: &Graph, mut node: NodeId) -> Vec<u8> {\n            let mut result = Vec::new();\n            while let Some(primary_parent) = graph.primary_parent_side(node) {\n                result.push(primary_parent as u8);\n                node = graph.neighbor(node, primary_parent).unwrap();\n            }\n            result.reverse();\n            result\n        }\n\n        let mut tx = save.write()?;\n        let mut writer = tx.get()?;\n        for (pos, ch) in self.world.query::<(&Position, &Character)>().iter() {\n            writer.put_character(\n                &ch.name,\n                &save::Character {\n                    path: path_from_origin(&self.graph, pos.node),\n                },\n            )?;\n        }\n\n        let dirty_nodes = self.dirty_nodes.drain().collect::<Vec<_>>();\n        let dirty_voxel_nodes = self.dirty_voxel_nodes.drain().collect::<Vec<_>>();\n        for node in dirty_nodes {\n            let entities = self.snapshot_node(node);\n            writer.put_entity_node(self.graph.hash_of(node), &entities)?;\n        }\n        for node in dirty_voxel_nodes {\n            let voxels = self.snapshot_voxel_node(node);\n            writer.put_voxel_node(self.graph.hash_of(node), &voxels)?;\n        }\n\n        drop(writer);\n        tx.commit()?;\n        Ok(())\n    }\n\n    /// Loads all entities from the given save file. Note that this must be called before any players\n    /// log in, as `accumulated_changes` will not properly reflect the entities that were loaded in.\n    fn load_all_entities(&mut self, save: &save::Save) -> anyhow::Result<()> {\n        let mut read = save.read()?;\n        for node_hash in read.get_all_entity_node_ids()? {\n            let Some(entity_node) = read.get_entity_node(node_hash)? else {\n                continue;\n            };\n            let node_id = self.graph.from_hash(node_hash);\n            for entity_bytes in entity_node.entities {\n                let save_entity: SaveEntity = postcard::from_bytes(&entity_bytes)?;\n                self.load_entity(&mut read, node_id, save_entity)?;\n            }\n        }\n        Ok(())\n    }\n\n    fn load_entity(\n        &mut self,\n        read: &mut save::Reader,\n        node: NodeId,\n        save_entity: SaveEntity,\n    ) -> anyhow::Result<()> {\n        let entity_id = EntityId::from_bits(u64::from_le_bytes(save_entity.entity));\n        let mut entity_builder = EntityBuilder::new();\n        entity_builder.add(entity_id);\n        entity_builder.add(node);\n        for (component_type, component_bytes) in save_entity.components {\n            self.load_component(\n                read,\n                &mut entity_builder,\n                node,\n                ComponentType::try_from(component_type as i32).unwrap(),\n                component_bytes,\n            )?;\n        }\n        let entity = self.world.spawn(entity_builder.build());\n        self.graph_entities.insert(node, entity);\n        self.entity_ids.insert(entity_id, entity);\n        Ok(())\n    }\n\n    fn load_component(\n        &mut self,\n        read: &mut save::Reader,\n        entity_builder: &mut EntityBuilder,\n        node: NodeId,\n        component_type: ComponentType,\n        component_bytes: Vec<u8>,\n    ) -> anyhow::Result<()> {\n        match component_type {\n            ComponentType::Position => {\n                let column_slice: [f32; 16] = postcard::from_bytes(&component_bytes)?;\n                entity_builder.add(Position {\n                    node,\n                    local: MIsometry::from_column_slice_unchecked(&column_slice),\n                });\n            }\n            ComponentType::Name => {\n                let name = String::from_utf8(component_bytes)?;\n                // Ensure that every node occupied by a character is generated.\n                let Some(character) = read.get_character(&name)? else {\n                    // Skip loading named entities that lack path information.\n                    error!(\n                        \"Entity {} will not be loaded because their node path information is missing.\",\n                        name\n                    );\n                    return Ok(());\n                };\n                let mut current_node = NodeId::ROOT;\n                for side in character\n                    .path\n                    .into_iter()\n                    .map(|side| Side::VALUES[side as usize])\n                {\n                    current_node = self.graph.ensure_neighbor(current_node, side);\n                }\n                if current_node != node {\n                    // Skip loading named entities that are in the wrong place. This can happen\n                    // when there are multiple entities with the same name, which has been possible\n                    // in previous versions of Hypermine.\n                    error!(\n                        \"Entity {} will not be loaded because their node path information is incorrect.\",\n                        name\n                    );\n                    return Ok(());\n                }\n                // Prepare all relevant components that are needed to support ComponentType::Name\n                entity_builder.add(InactiveCharacter(Character {\n                    name,\n                    state: CharacterState {\n                        velocity: na::Vector3::zeros(),\n                        on_ground: false,\n                        orientation: na::UnitQuaternion::identity(),\n                    },\n                }));\n            }\n            ComponentType::Material => {\n                let material: u16 =\n                    u16::from_le_bytes(component_bytes.try_into().map_err(|_| {\n                        anyhow::anyhow!(\"Expected Material component in save file to be 2 bytes\")\n                    })?);\n                entity_builder.add(Material::try_from(material)?);\n            }\n            ComponentType::Inventory => {\n                let mut contents = vec![];\n                for chunk in component_bytes.chunks(8) {\n                    contents.push(EntityId::from_bits(u64::from_le_bytes(\n                        chunk.try_into().unwrap(),\n                    )));\n                }\n                entity_builder.add(Inventory { contents });\n            }\n        }\n        Ok(())\n    }\n\n    fn load_all_voxels(&mut self, save: &save::Save) -> anyhow::Result<()> {\n        let mut read = save.read()?;\n        for node_hash in read.get_all_voxel_node_ids()? {\n            let Some(voxel_node) = read.get_voxel_node(node_hash)? else {\n                continue;\n            };\n            for chunk in voxel_node.chunks {\n                let voxels = SerializedVoxelData {\n                    inner: chunk.voxels,\n                };\n                let vertex = Vertex::iter()\n                    .nth(chunk.vertex as usize)\n                    .context(\"deserializing vertex ID\")?;\n                self.preloaded_voxel_data.insert(\n                    ChunkId::new(self.graph.from_hash(node_hash), vertex),\n                    VoxelData::deserialize(&voxels, self.cfg.chunk_size)\n                        .context(\"deserializing voxel data\")?,\n                );\n            }\n        }\n        Ok(())\n    }\n\n    fn snapshot_node(&self, node: NodeId) -> save::EntityNode {\n        let mut entities = Vec::new();\n        for &entity in self.graph_entities.get(node) {\n            let Ok(entity) = self.world.entity(entity) else {\n                error!(\"stale graph entity {:?}\", entity);\n                continue;\n            };\n            let Some(id) = entity.get::<&EntityId>() else {\n                continue;\n            };\n            let mut components = Vec::new();\n            if let Some(pos) = entity.get::<&Position>() {\n                components.push((\n                    ComponentType::Position as u64,\n                    postcard::to_stdvec(&pos.local.as_ref()).unwrap(),\n                ));\n            }\n            if let Some(ch) = entity.get::<&Character>().or_else(|| {\n                entity\n                    .get::<&InactiveCharacter>()\n                    .map(|ich| hecs::Ref::map(ich, |ich| &ich.0)) // Extract Ref<Character> from Ref<InactiveCharacter>\n            }) {\n                components.push((ComponentType::Name as u64, ch.name.as_bytes().into()));\n            }\n            if let Some(material) = entity.get::<&Material>() {\n                components.push((\n                    ComponentType::Material as u64,\n                    (*material as u16).to_le_bytes().into(),\n                ));\n            }\n            if let Some(inventory) = entity.get::<&Inventory>() {\n                let mut serialized_inventory_contents = vec![];\n                for entity_id in &inventory.contents {\n                    serialized_inventory_contents\n                        .extend_from_slice(&entity_id.to_bits().to_le_bytes());\n                }\n                components.push((\n                    ComponentType::Inventory as u64,\n                    serialized_inventory_contents,\n                ));\n            }\n            let mut repr = Vec::new();\n            postcard_helpers::serialize(\n                &SaveEntity {\n                    entity: id.to_bits().to_le_bytes(),\n                    components,\n                },\n                &mut repr,\n            )\n            .unwrap();\n            entities.push(repr);\n        }\n\n        save::EntityNode { entities }\n    }\n\n    fn snapshot_voxel_node(&self, node: NodeId) -> save::VoxelNode {\n        let mut chunks = vec![];\n        let node_data = &self.graph[node];\n        for vertex in Vertex::iter() {\n            if !self.modified_chunks.contains(&ChunkId::new(node, vertex)) {\n                continue;\n            }\n            let Chunk::Populated { ref voxels, .. } = node_data.chunks[vertex] else {\n                panic!(\"Unknown chunk listed as modified\");\n            };\n            chunks.push(save::Chunk {\n                vertex: vertex as u32,\n                voxels: voxels.serialize(self.cfg.chunk_size).inner,\n            })\n        }\n        save::VoxelNode { chunks }\n    }\n\n    /// Activates or spawns a character with a given name, or returns None if there is already an active\n    /// character with that name\n    pub fn activate_or_spawn_character(\n        &mut self,\n        hello: &ClientHello,\n    ) -> Option<(EntityId, Entity)> {\n        // Check for conflicting characters\n        if self\n            .world\n            .query::<&Character>()\n            .iter()\n            .any(|character| character.name == hello.name)\n        {\n            return None;\n        }\n\n        // Check for matching characters\n        let matching_character = self\n            .world\n            .query::<(Entity, &EntityId, &InactiveCharacter)>()\n            .iter()\n            .find(|(_, _, inactive_character)| inactive_character.0.name == hello.name)\n            .map(|(entity, entity_id, _)| (*entity_id, entity));\n        if let Some((entity_id, entity)) = matching_character {\n            info!(id = %entity_id, name = %hello.name, \"activating character\");\n            let inactive_character = self.world.remove_one::<InactiveCharacter>(entity).unwrap();\n            self.world\n                .insert(entity, (inactive_character.0, CharacterInput::default()))\n                .unwrap();\n            self.accumulated_changes.spawns.push(entity);\n            return Some((entity_id, entity));\n        }\n\n        // Spawn entirely new character\n        let position = Position {\n            node: NodeId::ROOT,\n            local: MIsometry::translation_along(&(na::Vector3::y() * 1.4)),\n        };\n        let character = Character {\n            name: hello.name.clone(),\n            state: CharacterState {\n                orientation: na::one(),\n                velocity: na::Vector3::zeros(),\n                on_ground: false,\n            },\n        };\n        let inventory = Inventory { contents: vec![] };\n        let initial_input = CharacterInput::default();\n        Some(self.spawn((position.node, position, character, inventory, initial_input)))\n    }\n\n    pub fn deactivate_character(&mut self, entity: Entity) {\n        let entity_id = *self.world.get::<&EntityId>(entity).unwrap();\n        let (character, _) = self\n            .world\n            .remove::<(Character, CharacterInput)>(entity)\n            .unwrap();\n        self.world\n            .insert_one(entity, InactiveCharacter(character))\n            .unwrap();\n        if let Some(index) = self\n            .accumulated_changes\n            .spawns\n            .iter()\n            .position(|e| *e == entity)\n        {\n            // Ensure that the same entity does not show up in the spawns\n            // and despawns list if the character entity is spawned and deactivated\n            // in the same frame. This can happen if a client connects and\n            // immediately disconnects due to an error.\n            self.accumulated_changes.spawns.remove(index);\n        } else {\n            self.accumulated_changes.despawns.push(entity_id);\n        }\n    }\n\n    fn spawn(&mut self, bundle: impl DynamicBundle) -> (EntityId, Entity) {\n        let id = self.new_id();\n        let mut entity_builder = EntityBuilder::new();\n        entity_builder.add(id);\n        entity_builder.add_bundle(bundle);\n        let entity = self.world.spawn(entity_builder.build());\n\n        if let Ok(node) = self.world.get::<&NodeId>(entity) {\n            self.graph_entities.insert(*node, entity);\n            self.dirty_nodes.insert(*node);\n        }\n\n        if let Ok(character) = self.world.get::<&Character>(entity) {\n            info!(%id, name = %character.name, \"spawning character\");\n        }\n\n        self.entity_ids.insert(id, entity);\n\n        if !self.world.satisfies::<&InactiveCharacter>(entity) {\n            self.accumulated_changes.spawns.push(entity);\n        }\n\n        (id, entity)\n    }\n\n    pub fn command(\n        &mut self,\n        entity: Entity,\n        command: Command,\n    ) -> Result<(), hecs::ComponentError> {\n        let mut input = self.world.get::<&mut CharacterInput>(entity)?;\n        *input = command.character_input;\n        let mut ch = self.world.get::<&mut Character>(entity)?;\n        ch.state.orientation = command.orientation;\n        Ok(())\n    }\n\n    pub fn destroy(&mut self, entity: Entity) {\n        let id = *self.world.get::<&EntityId>(entity).unwrap();\n        self.entity_ids.remove(&id);\n        if let Ok(node) = self.world.get::<&NodeId>(entity) {\n            self.graph_entities.remove(*node, entity);\n        }\n        if !self.world.satisfies::<&InactiveCharacter>(entity) {\n            self.accumulated_changes.despawns.push(id);\n        }\n        self.world.despawn(entity).unwrap();\n    }\n\n    /// Collect information about all entities, for transmission to new clients\n    pub fn snapshot(&self) -> Spawns {\n        let mut spawns = Spawns {\n            step: self.step,\n            spawns: Vec::new(),\n            despawns: Vec::new(),\n            nodes: self\n                .graph\n                .tree()\n                .map(|(side, parent)| FreshNode { side, parent })\n                .collect(),\n            block_updates: Vec::new(),\n            voxel_data: Vec::new(),\n            inventory_additions: Vec::new(),\n            inventory_removals: Vec::new(),\n        };\n        for (entity, &id) in &mut self\n            .world\n            .query::<hecs::Without<(Entity, &EntityId), &InactiveCharacter>>()\n        {\n            spawns.spawns.push((id, dump_entity(&self.world, entity)));\n        }\n        for &chunk_id in self.modified_chunks.iter() {\n            let voxels = match self.graph[chunk_id] {\n                Chunk::Populated { ref voxels, .. } => voxels,\n                _ => panic!(\"ungenerated chunk is marked as modified\"),\n            };\n\n            spawns\n                .voxel_data\n                .push((chunk_id, voxels.serialize(self.cfg.chunk_size)));\n        }\n        for (&chunk_id, voxels) in self.preloaded_voxel_data.iter() {\n            spawns\n                .voxel_data\n                .push((chunk_id, voxels.serialize(self.cfg.chunk_size)));\n        }\n        spawns\n    }\n\n    pub fn step(&mut self) -> (Option<Spawns>, StateDelta) {\n        let span = error_span!(\"step\", step = self.step);\n        let _guard = span.enter();\n\n        // We want to load all chunks that a player can interact with in a single step, so chunk_generation_distance\n        // is set up to cover that distance.\n        let chunk_generation_distance = self.cfg.character.character_radius\n            + self.cfg.character.speed_cap * self.cfg.step_interval.as_secs_f32()\n            + self.cfg.character.ground_distance_tolerance\n            + self.cfg.character.block_reach\n            + 0.001;\n\n        // Load all chunks around entities corresponding to clients, which correspond to entities\n        // with a \"Character\" component.\n        for (position, _) in self.world.query::<(&Position, &Character)>().iter() {\n            ensure_nearby(&mut self.graph, position, chunk_generation_distance);\n            let nodes = nearby_nodes(&self.graph, position, chunk_generation_distance);\n            for &(node, _) in &nodes {\n                for vertex in dodeca::Vertex::iter() {\n                    let chunk = ChunkId::new(node, vertex);\n                    if !matches!(self.graph[chunk], Chunk::Fresh) {\n                        continue;\n                    }\n                    if let Some(voxel_data) = self.preloaded_voxel_data.remove(&chunk) {\n                        self.modified_chunks.insert(chunk);\n                        self.graph.populate_chunk(chunk, voxel_data);\n                    } else {\n                        let params = ChunkParams::new(&mut self.graph, chunk);\n                        self.graph.populate_chunk(chunk, params.generate_voxels());\n                    }\n                }\n            }\n        }\n\n        let mut pending_block_updates: Vec<(Entity, BlockUpdate)> = vec![];\n\n        // Simulate\n        for (entity, node, position, character, input) in self\n            .world\n            .query::<(\n                Entity,\n                &NodeId,\n                &mut Position,\n                &mut Character,\n                &CharacterInput,\n            )>()\n            .iter()\n        {\n            character_controller::run_character_step(\n                &self.cfg,\n                &self.graph,\n                position,\n                &mut character.state.velocity,\n                &mut character.state.on_ground,\n                input,\n                self.cfg.step_interval.as_secs_f32(),\n            );\n            if let Some(block_update) = input.block_update.clone() {\n                pending_block_updates.push((entity, block_update));\n            }\n            self.dirty_nodes.insert(*node);\n        }\n\n        for (entity, block_update) in pending_block_updates {\n            let id = *self.world.get::<&EntityId>(entity).unwrap();\n            self.attempt_block_update(id, block_update);\n        }\n\n        self.update_entity_node_ids();\n\n        let spawns = std::mem::take(&mut self.accumulated_changes).into_spawns(\n            self.step,\n            &self.world,\n            &self.graph,\n        );\n\n        // TODO: Omit unchanged (e.g. freshly spawned) entities (dirty flag?)\n        let delta = StateDelta {\n            latest_input: 0, // To be filled in by the caller\n            step: self.step,\n            positions: self\n                .world\n                .query::<(&EntityId, &Position)>()\n                .iter()\n                .map(|(&id, &position)| (id, position))\n                .collect(),\n            character_states: self\n                .world\n                .query::<(&EntityId, &Character)>()\n                .iter()\n                .map(|(&id, ch)| (id, ch.state.clone()))\n                .collect(),\n        };\n\n        self.step += 1;\n        (spawns, delta)\n    }\n\n    /// Ensure that the NodeId component of every entity is set to what it should be to ensure consistency. Any entity\n    /// with a position should have a NodeId that matches that position, and all entities with inventories should propagate\n    /// their NodeId to their inventory items.\n    fn update_entity_node_ids(&mut self) {\n        // Helper function for properly changing the NodeId of a given node without leaving any\n        // of the supporting structures out of date.\n        let mut update_node_id = |entity: Entity, node_id: &mut NodeId, new_node_id: NodeId| {\n            if *node_id != new_node_id {\n                self.dirty_nodes.insert(*node_id);\n                self.graph_entities.remove(*node_id, entity);\n\n                *node_id = new_node_id;\n                self.dirty_nodes.insert(*node_id);\n                self.graph_entities.insert(*node_id, entity);\n            }\n        };\n\n        // Synchronize NodeId and Position\n        for (entity, node_id, position) in self\n            .world\n            .query::<(Entity, &mut NodeId, &Position)>()\n            .iter()\n        {\n            update_node_id(entity, node_id, position.node);\n        }\n\n        // Synchronize NodeId for all inventory items.\n        // TODO: Note that the order in which inventory items are updated is arbitrary, so\n        // if inventory items can themselves have inventories, their respective NodeIds\n        // may be out of date by a few steps, which could cause bugs. This can be solved with\n        // a more complete entity hierarchy system\n        for (&inventory_node_id, inventory) in self.world.query::<(&NodeId, &Inventory)>().iter() {\n            for inventory_entity_id in &inventory.contents {\n                let inventory_entity = *self.entity_ids.get(inventory_entity_id).unwrap();\n\n                let mut inventory_entity_node_id =\n                    self.world.get::<&mut NodeId>(inventory_entity).unwrap();\n\n                update_node_id(\n                    inventory_entity,\n                    &mut inventory_entity_node_id,\n                    inventory_node_id,\n                );\n            }\n        }\n    }\n\n    fn new_id(&mut self) -> EntityId {\n        loop {\n            let id = self.rng.random();\n            if !self.entity_ids.contains_key(&id) {\n                return id;\n            }\n        }\n    }\n\n    /// Add the given entity to the given inventory\n    fn add_to_inventory(&mut self, inventory_id: EntityId, entity_id: EntityId) {\n        let mut inventory = self\n            .world\n            .get::<&mut Inventory>(*self.entity_ids.get(&inventory_id).unwrap())\n            .unwrap();\n        inventory.contents.push(entity_id);\n        self.accumulated_changes\n            .inventory_additions\n            .push((inventory_id, entity_id));\n    }\n\n    /// Remove the given entity from the given inventory. Note that this does not destroy the entity.\n    /// Returns whether the item was in the inventory to begin with.\n    fn remove_from_inventory(&mut self, inventory_id: EntityId, entity_id: EntityId) -> bool {\n        let mut inventory = self\n            .world\n            .get::<&mut Inventory>(*self.entity_ids.get(&inventory_id).unwrap())\n            .unwrap();\n        let Some(position) = inventory.contents.iter().position(|&e| e == entity_id) else {\n            return false;\n        };\n        inventory.contents.remove(position);\n        self.accumulated_changes\n            .inventory_removals\n            .push((inventory_id, entity_id));\n        true\n    }\n\n    /// Executes the requested block update if the subject is able to do so and\n    /// leaves the state of the world unchanged otherwise\n    fn attempt_block_update(&mut self, subject: EntityId, block_update: BlockUpdate) {\n        let subject_node = *self\n            .world\n            .get::<&NodeId>(*self.entity_ids.get(&subject).unwrap())\n            .unwrap();\n        let Some(old_material) = self\n            .graph\n            .get_material(block_update.chunk_id, block_update.coords)\n        else {\n            tracing::warn!(\"Block update received from ungenerated chunk\");\n            return;\n        };\n        if self.cfg.gameplay_enabled {\n            if block_update.new_material != Material::Void {\n                let Some(consumed_entity_id) = block_update.consumed_entity else {\n                    tracing::warn!(\"Tried to place block without consuming any entities\");\n                    return;\n                };\n                let Some(&consumed_entity) = self.entity_ids.get(&consumed_entity_id) else {\n                    tracing::warn!(\"Tried to consume an unknown entity ID\");\n                    return;\n                };\n                if !self\n                    .world\n                    .get::<&Material>(consumed_entity)\n                    .is_ok_and(|m| *m == block_update.new_material)\n                {\n                    tracing::warn!(\"Tried to consume wrong material\");\n                    return;\n                }\n                if !self.remove_from_inventory(subject, consumed_entity_id) {\n                    tracing::warn!(\"Tried to consume entity not in player inventory\");\n                    return;\n                }\n                self.destroy(consumed_entity);\n            }\n            if old_material != Material::Void {\n                let (produced_entity, _) = self.spawn((subject_node, old_material));\n                self.add_to_inventory(subject, produced_entity);\n            }\n        }\n        assert!(self.graph.update_block(&block_update));\n        self.modified_chunks.insert(block_update.chunk_id);\n        self.dirty_voxel_nodes.insert(block_update.chunk_id.node);\n        self.accumulated_changes.block_updates.push(block_update);\n    }\n}\n\n/// Collect all information about a particular entity for transmission to clients.\nfn dump_entity(world: &hecs::World, entity: Entity) -> Vec<Component> {\n    assert!(\n        !world.satisfies::<&InactiveCharacter>(entity),\n        \"Inactive characters should not be sent to clients\"\n    );\n    let mut components = Vec::new();\n    if let Ok(x) = world.get::<&Position>(entity) {\n        components.push(Component::Position(*x));\n    }\n    if let Ok(x) = world.get::<&Character>(entity) {\n        components.push(Component::Character((*x).clone()));\n    }\n    if let Ok(x) = world.get::<&Inventory>(entity) {\n        components.push(Component::Inventory((*x).clone()));\n    }\n    if let Ok(x) = world.get::<&Material>(entity) {\n        components.push(Component::Material(*x));\n    }\n    components\n}\n\n#[derive(Debug, Serialize, Deserialize, Clone)]\nstruct InactiveCharacter(pub Character);\n\n/// Stores changes that the server has canonically done but hasn't yet broadcast to clients\n#[derive(Default)]\nstruct AccumulatedChanges {\n    /// Entities that have been spawned since the last broadcast\n    spawns: Vec<Entity>,\n\n    /// Entities that have been despawned since the last broadcast\n    despawns: Vec<EntityId>,\n\n    /// Block updates that have been applied to the world since the last broadcast\n    block_updates: Vec<BlockUpdate>,\n\n    /// Entities that have been added to an inventory since the last broadcast, where `(a, b)`` represents\n    /// entity `b`` being added to inventory `a``\n    inventory_additions: Vec<(EntityId, EntityId)>,\n\n    /// Entities that have been removed from an inventory since the last broadcast, where `(a, b)`` represents\n    /// entity `b`` being removed from inventory `a``\n    inventory_removals: Vec<(EntityId, EntityId)>,\n\n    /// Nodes that have been added to the graph since the last broadcast\n    fresh_nodes: Vec<NodeId>,\n}\n\nimpl AccumulatedChanges {\n    fn is_empty(&self) -> bool {\n        self.spawns.is_empty()\n            && self.despawns.is_empty()\n            && self.block_updates.is_empty()\n            && self.inventory_additions.is_empty()\n            && self.inventory_removals.is_empty()\n            && self.fresh_nodes.is_empty()\n    }\n\n    /// Convert state changes for broadcast to clients\n    fn into_spawns(self, step: Step, world: &hecs::World, graph: &Graph) -> Option<Spawns> {\n        if self.is_empty() {\n            return None;\n        }\n\n        let mut spawns = Vec::with_capacity(self.spawns.len());\n        for entity in self.spawns {\n            let id = *world.get::<&EntityId>(entity).unwrap();\n            spawns.push((id, dump_entity(world, entity)));\n        }\n\n        if !self.fresh_nodes.is_empty() {\n            trace!(count = self.fresh_nodes.len(), \"broadcasting fresh nodes\");\n        }\n\n        Some(Spawns {\n            step,\n            spawns,\n            despawns: self.despawns,\n            nodes: self\n                .fresh_nodes\n                .iter()\n                .filter_map(|&id| {\n                    let side = graph.primary_parent_side(id)?;\n                    Some(FreshNode {\n                        side,\n                        parent: graph.neighbor(id, side).unwrap(),\n                    })\n                })\n                .collect(),\n            block_updates: self.block_updates,\n            voxel_data: Vec::new(),\n            inventory_additions: self.inventory_additions,\n            inventory_removals: self.inventory_removals,\n        })\n    }\n}\n"
  },
  {
    "path": "shell.nix",
    "content": "let\n  moz_overlay = import (builtins.fetchTarball\n    \"https://github.com/mozilla/nixpkgs-mozilla/archive/9b11a87c0cc54e308fa83aac5b4ee1816d5418a2.tar.gz\");\n  nixpkgs = import <nixpkgs> { overlays = [ moz_overlay ]; };\nin with nixpkgs;\nlet\n  dlopen-libs = with xorg; [ vulkan-loader libX11 libXcursor libXrandr libXi libxkbcommon ];\nin mkShell.override {\n  stdenv = pkgs.stdenvAdapters.useMoldLinker pkgs.stdenv;\n} {\n  nativeBuildInputs = with pkgs; [\n    rustChannels.stable.rust\n    pkg-config\n    zstd\n    protobuf\n  ];\n  shellHook = ''\n    export RUST_BACKTRACE=1\n    export ZSTD_SYS_USE_PKG_CONFIG=1\n    export SHADERC_LIB_DIR=\"${shaderc.static}/lib\"\n    export LD_LIBRARY_PATH=\"$LD_LIBRARY_PATH:${lib.makeLibraryPath dlopen-libs}\"\n    #export VK_INSTANCE_LAYERS=VK_LAYER_KHRONOS_validation\n    export XDG_DATA_DIRS=\"$XDG_DATA_DIRS:${vulkan-validation-layers}/share\"\n    export RUST_LOG=client=trace,server=trace,common=trace,vulkan=info\n  '';\n}\n"
  }
]