[
  {
    "path": ".github/workflows/rust.yml",
    "content": "name: Rust\n\non:\n  push:\n    branches: [\"master\"]\n  pull_request:\n    branches: [\"master\"]\n\nenv:\n  CARGO_TERM_COLOR: always\n\njobs:\n  test:\n    runs-on: ${{ matrix.os }}\n\n    strategy:\n      matrix:\n        os:\n          - ubuntu-latest\n          - windows-latest\n          - macOS-latest\n        toolchain:\n          - nightly\n          - stable\n        cargo_flags:\n          - \"--all-features\"\n          - \"--no-default-features\"\n          - \"\"\n        exclude:\n          - cargo_flags: \"--all-features\"\n            toolchain: stable\n\n    steps:\n      - name: Checkout sources\n        uses: actions/checkout@v2\n      - name: Install rust toolchain\n        uses: dtolnay/rust-toolchain@stable\n        with:\n          toolchain: ${{ matrix.toolchain }}\n      - name: Generate lockfile\n        run: cargo generate-lockfile\n      - name: Cache\n        id: cache-restore\n        uses: actions/cache@v4\n        with:\n          path: |\n            ~/.cargo/registry\n            ~/.cargo/git\n            target\n          key: ${{ runner.os }}-cargo-test-${{ hashFiles('**/Cargo.lock') }}-${{ matrix.cargo_flags }}\n      - name: Build with tests\n        uses: actions-rs/cargo@v1\n        with:\n          command: test\n          args: --no-run --workspace ${{ matrix.cargo_flags }} --exclude dumpster_bench\n      - name: Run tests\n        uses: actions-rs/cargo@v1\n        with:\n          command: test\n          args: --workspace ${{ matrix.cargo_flags }} --exclude dumpster_bench\n      - name: Save cache\n        id: cache-save\n        uses: actions/cache/save@v4\n        if: always() && steps.cache-restore.cache-hit != 'true'\n        with:\n          path: |\n            ~/.cargo/registry\n            ~/.cargo/git\n            target\n          key: ${{ runner.os }}-cargo-test-${{ hashFiles('**/Cargo.lock') }}-${{ matrix.cargo_flags }}\n\n  miri:\n    runs-on: ${{ matrix.os }}\n\n    strategy:\n      matrix:\n        os:\n          - ubuntu-latest\n          - windows-latest\n          - macOS-latest\n        toolchain:\n          - nightly\n        cargo_flags:\n          - \"--all-features\"\n\n    steps:\n      - name: Checkout sources\n        uses: actions/checkout@v2\n      - name: Install rust toolchain\n        uses: dtolnay/rust-toolchain@stable\n        with:\n          toolchain: ${{ matrix.toolchain }}\n          components: miri\n      - name: Generate lockfile\n        run: cargo generate-lockfile\n      - name: Cache\n        id: cache-restore\n        uses: actions/cache@v4\n        with:\n          path: |\n            ~/.cargo/registry\n            ~/.cargo/git\n            target\n          key: ${{ runner.os }}-cargo-miri-${{ hashFiles('**/Cargo.lock') }}-${{ matrix.cargo_flags }}\n      - name: Build miri test executables\n        uses: actions-rs/cargo@v1\n        with:\n          command: miri\n          args: test --no-run --workspace ${{ matrix.cargo_flags }} --exclude dumpster_bench\n      - name: Run miri tests\n        uses: actions-rs/cargo@v1\n        with:\n          command: miri\n          args: test --workspace ${{ matrix.cargo_flags }} --exclude dumpster_bench\n      - name: Save cache\n        id: cache-save\n        uses: actions/cache/save@v4\n        if: always() && steps.cache-restore.cache-hit != 'true'\n        with:\n          path: |\n            ~/.cargo/registry\n            ~/.cargo/git\n            target\n          key: ${{ runner.os }}-cargo-miri-${{ hashFiles('**/Cargo.lock') }}-${{ matrix.cargo_flags }}\n\n  loom:\n    runs-on: ubuntu-latest\n\n    steps:\n      - name: Checkout sources\n        uses: actions/checkout@v2\n      - name: Install rust toolchain\n        uses: dtolnay/rust-toolchain@stable\n        with:\n          toolchain: stable\n      - name: Generate lockfile\n        run: cargo generate-lockfile\n      - name: Cache\n        id: cache-restore\n        uses: actions/cache@v4\n        with:\n          path: |\n            ~/.cargo/registry\n            ~/.cargo/git\n            target\n          key: ${{ runner.os }}-cargo-loom-${{ hashFiles('**/Cargo.lock') }}\n      - name: Build with tests\n        uses: actions-rs/cargo@v1\n        env:\n          RUSTFLAGS: \"--cfg loom\"\n        with:\n          command: test\n          args: --lib -p dumpster loom --release --no-run\n      - name: Run tests\n        uses: actions-rs/cargo@v1\n        env:\n          RUSTFLAGS: \"--cfg loom\"\n        with:\n          command: test\n          args: --lib -p dumpster loom --release\n      - name: Save cache\n        id: cache-save\n        uses: actions/cache/save@v4\n        if: always() && steps.cache-restore.cache-hit != 'true'\n        with:\n          path: |\n            ~/.cargo/registry\n            ~/.cargo/git\n            target\n          key: ${{ runner.os }}-cargo-loom-${{ hashFiles('**/Cargo.lock') }}\n"
  },
  {
    "path": ".gitignore",
    "content": "/target\n/Cargo.lock\n\n*.csv\n.vscode\n.zed\n"
  },
  {
    "path": "CHANGELOG.md",
    "content": "# `dumpster` Changelog\n\n## 2.1.0\n\n### New features\n\n- Implemented `FromIterator` for `Gc<[T]>`.\n\n## 2.0.0\n\n### Breaking changes\n\n- Refactored `Trace` to use `TraceWith<V>`.\n\n### New features\n\n- Added `sync::Gc::new_cyclic`.\n\n## 1.2.0\n\n### New features\n\n- Added experimental support for testing under `loom`.\n- Added `unsync::Gc::new_cyclic`.\n- Implemented `Default` for `Gc`.\n- Added `Gc::make_mut`.\n- Added `From` implementations for `Gc`.\n- Supported differing `BuildHasher` types in `Trace` implementation for `HashSet`.\n- Added `sync::coerce_gc` and `unsync::coerce_gc`.\n- Added `Trace` implementation to more types in the Rust standard library.\n\n### Bug fixes\n\n- Fixed broken references in documentation.\n- Added overflow testing for `Gc` reference counts.\n- `Gc`s created in a garbage-collected value's `Drop` implementation are no longer leaked.\n\n## 1.1.1\n\n### Bug fixes\n\n- Using `dumpster` no longer fails under Miri as we have changed our underlying pointer model.\n\n## 1.1.0\n\n### New features\n\n- Added support for [`either`](https://crates.io/crates/either).\n\n### Bug fixes\n\n- Derive implementations no longer erroneously refer to `heapsize`.\n\n### Other changes\n\n- Slight performance and code style improvements.\n- Improved internal documentation on safety.\n- Remove `strict-provenance` requirement as it is now stabilized.\n\n## 1.0.0\n\n### Breaking changes\n\n- Rename `Collectable` to `Trace`.\n\n## 0.2.1\n\n### New features\n\n- Implement `Collectable` for `std::any::TypeId`.\n\n## 0.2.0\n\n### New features\n\n- Added `Gc::as_ptr`.\n- Added `Gc::ptr_eq`.\n- Implemented `PartialEq` and `Eq` for garbage collected pointers.\n\n### Other\n\n- Changed license from GNU GPLv3 or later to MPL 2.0.\n- Allocations which do not contain `Gc`s will simply be reference counted.\n\n## 0.1.2\n\n### New features\n\n- Implement `Collectable` for `OnceCell`, `HashMap`, and `BTreeMap`.\n- Add `try_clone` and `try_deref` to `unsync::Gc` and `sync::Gc`.\n- Make dereferencing `Gc` only panic on truly-dead `Gc`s.\n\n### Bugfixes\n\n- Prevent dead `Gc`s from escaping their `Drop` implementation, potentially causing UAFs.\n- Use fully-qualified name for `Result` in derive macro, preventing some bugs.\n\n### Other\n\n- Improve performance in `unsync` by using `parking_lot` for concurrency primitives.\n- Improve documentation of panicking behavior in `Gc`.\n- Fix spelling mistakes in documentation.\n\n## 0.1.1\n\n### Bugfixes\n\n- Prevent possible UAFs caused by accessing `Gc`s during `Drop` impls by panicking.\n\n### Other\n\n- Fix spelling mistakes in documentation.\n\n## 0.1.0\n\nInitial release.\n"
  },
  {
    "path": "Cargo.toml",
    "content": "[workspace]\nmembers = [\n    \"dumpster\",\n    \"dumpster_derive\",\n    \"dumpster_test\",\n    \"dumpster_bench\",\n]\nresolver = \"2\"\n\n[patch.crates-io]\ndumpster = { path = \"dumpster\" }\n\n[profile.release]\nlto = true"
  },
  {
    "path": "LICENSE-APACHE",
    "content": "                              Apache License\n                        Version 2.0, January 2004\n                     http://www.apache.org/licenses/\n\nTERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n1. Definitions.\n\n   \"License\" shall mean the terms and conditions for use, reproduction,\n   and distribution as defined by Sections 1 through 9 of this document.\n\n   \"Licensor\" shall mean the copyright owner or entity authorized by\n   the copyright owner that is granting the License.\n\n   \"Legal Entity\" shall mean the union of the acting entity and all\n   other entities that control, are controlled by, or are under common\n   control with that entity. For the purposes of this definition,\n   \"control\" means (i) the power, direct or indirect, to cause the\n   direction or management of such entity, whether by contract or\n   otherwise, or (ii) ownership of fifty percent (50%) or more of the\n   outstanding shares, or (iii) beneficial ownership of such entity.\n\n   \"You\" (or \"Your\") shall mean an individual or Legal Entity\n   exercising permissions granted by this License.\n\n   \"Source\" form shall mean the preferred form for making modifications,\n   including but not limited to software source code, documentation\n   source, and configuration files.\n\n   \"Object\" form shall mean any form resulting from mechanical\n   transformation or translation of a Source form, including but\n   not limited to compiled object code, generated documentation,\n   and conversions to other media types.\n\n   \"Work\" shall mean the work of authorship, whether in Source or\n   Object form, made available under the License, as indicated by a\n   copyright notice that is included in or attached to the work\n   (an example is provided in the Appendix below).\n\n   \"Derivative Works\" shall mean any work, whether in Source or Object\n   form, that is based on (or derived from) the Work and for which the\n   editorial revisions, annotations, elaborations, or other modifications\n   represent, as a whole, an original work of authorship. For the purposes\n   of this License, Derivative Works shall not include works that remain\n   separable from, or merely link (or bind by name) to the interfaces of,\n   the Work and Derivative Works thereof.\n\n   \"Contribution\" shall mean any work of authorship, including\n   the original version of the Work and any modifications or additions\n   to that Work or Derivative Works thereof, that is intentionally\n   submitted to Licensor for inclusion in the Work by the copyright owner\n   or by an individual or Legal Entity authorized to submit on behalf of\n   the copyright owner. For the purposes of this definition, \"submitted\"\n   means any form of electronic, verbal, or written communication sent\n   to the Licensor or its representatives, including but not limited to\n   communication on electronic mailing lists, source code control systems,\n   and issue tracking systems that are managed by, or on behalf of, the\n   Licensor for the purpose of discussing and improving the Work, but\n   excluding communication that is conspicuously marked or otherwise\n   designated in writing by the copyright owner as \"Not a Contribution.\"\n\n   \"Contributor\" shall mean Licensor and any individual or Legal Entity\n   on behalf of whom a Contribution has been received by Licensor and\n   subsequently incorporated within the Work.\n\n2. Grant of Copyright License. Subject to the terms and conditions of\n   this License, each Contributor hereby grants to You a perpetual,\n   worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n   copyright license to reproduce, prepare Derivative Works of,\n   publicly display, publicly perform, sublicense, and distribute the\n   Work and such Derivative Works in Source or Object form.\n\n3. Grant of Patent License. Subject to the terms and conditions of\n   this License, each Contributor hereby grants to You a perpetual,\n   worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n   (except as stated in this section) patent license to make, have made,\n   use, offer to sell, sell, import, and otherwise transfer the Work,\n   where such license applies only to those patent claims licensable\n   by such Contributor that are necessarily infringed by their\n   Contribution(s) alone or by combination of their Contribution(s)\n   with the Work to which such Contribution(s) was submitted. If You\n   institute patent litigation against any entity (including a\n   cross-claim or counterclaim in a lawsuit) alleging that the Work\n   or a Contribution incorporated within the Work constitutes direct\n   or contributory patent infringement, then any patent licenses\n   granted to You under this License for that Work shall terminate\n   as of the date such litigation is filed.\n\n4. Redistribution. You may reproduce and distribute copies of the\n   Work or Derivative Works thereof in any medium, with or without\n   modifications, and in Source or Object form, provided that You\n   meet the following conditions:\n\n   (a) You must give any other recipients of the Work or\n       Derivative Works a copy of this License; and\n\n   (b) You must cause any modified files to carry prominent notices\n       stating that You changed the files; and\n\n   (c) You must retain, in the Source form of any Derivative Works\n       that You distribute, all copyright, patent, trademark, and\n       attribution notices from the Source form of the Work,\n       excluding those notices that do not pertain to any part of\n       the Derivative Works; and\n\n   (d) If the Work includes a \"NOTICE\" text file as part of its\n       distribution, then any Derivative Works that You distribute must\n       include a readable copy of the attribution notices contained\n       within such NOTICE file, excluding those notices that do not\n       pertain to any part of the Derivative Works, in at least one\n       of the following places: within a NOTICE text file distributed\n       as part of the Derivative Works; within the Source form or\n       documentation, if provided along with the Derivative Works; or,\n       within a display generated by the Derivative Works, if and\n       wherever such third-party notices normally appear. The contents\n       of the NOTICE file are for informational purposes only and\n       do not modify the License. You may add Your own attribution\n       notices within Derivative Works that You distribute, alongside\n       or as an addendum to the NOTICE text from the Work, provided\n       that such additional attribution notices cannot be construed\n       as modifying the License.\n\n   You may add Your own copyright statement to Your modifications and\n   may provide additional or different license terms and conditions\n   for use, reproduction, or distribution of Your modifications, or\n   for any such Derivative Works as a whole, provided Your use,\n   reproduction, and distribution of the Work otherwise complies with\n   the conditions stated in this License.\n\n5. Submission of Contributions. Unless You explicitly state otherwise,\n   any Contribution intentionally submitted for inclusion in the Work\n   by You to the Licensor shall be under the terms and conditions of\n   this License, without any additional terms or conditions.\n   Notwithstanding the above, nothing herein shall supersede or modify\n   the terms of any separate license agreement you may have executed\n   with Licensor regarding such Contributions.\n\n6. Trademarks. This License does not grant permission to use the trade\n   names, trademarks, service marks, or product names of the Licensor,\n   except as required for reasonable and customary use in describing the\n   origin of the Work and reproducing the content of the NOTICE file.\n\n7. Disclaimer of Warranty. Unless required by applicable law or\n   agreed to in writing, Licensor provides the Work (and each\n   Contributor provides its Contributions) on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n   implied, including, without limitation, any warranties or conditions\n   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n   PARTICULAR PURPOSE. You are solely responsible for determining the\n   appropriateness of using or redistributing the Work and assume any\n   risks associated with Your exercise of permissions under this License.\n\n8. Limitation of Liability. In no event and under no legal theory,\n   whether in tort (including negligence), contract, or otherwise,\n   unless required by applicable law (such as deliberate and grossly\n   negligent acts) or agreed to in writing, shall any Contributor be\n   liable to You for damages, including any direct, indirect, special,\n   incidental, or consequential damages of any character arising as a\n   result of this License or out of the use or inability to use the\n   Work (including but not limited to damages for loss of goodwill,\n   work stoppage, computer failure or malfunction, or any and all\n   other commercial damages or losses), even if such Contributor\n   has been advised of the possibility of such damages.\n\n9. Accepting Warranty or Additional Liability. While redistributing\n   the Work or Derivative Works thereof, You may choose to offer,\n   and charge a fee for, acceptance of support, warranty, indemnity,\n   or other liability obligations and/or rights consistent with this\n   License. However, in accepting such obligations, You may act only\n   on Your own behalf and on Your sole responsibility, not on behalf\n   of any other Contributor, and only if You agree to indemnify,\n   defend, and hold each Contributor harmless for any liability\n   incurred by, or claims asserted against, such Contributor by reason\n   of your accepting any such warranty or additional liability.\n\nEND OF TERMS AND CONDITIONS\n"
  },
  {
    "path": "LICENSE-MIT",
    "content": "Copyright (c) The Rust Project Contributors\n\nPermission is hereby granted, free of charge, to any\nperson obtaining a copy of this software and associated\ndocumentation files (the \"Software\"), to deal in the\nSoftware without restriction, including without\nlimitation the rights to use, copy, modify, merge,\npublish, distribute, sublicense, and/or sell copies of\nthe Software, and to permit persons to whom the Software\nis furnished to do so, subject to the following\nconditions:\n\nThe above copyright notice and this permission notice\nshall be included in all copies or substantial portions\nof the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF\nANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED\nTO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\nPARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\nSHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\nCLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\nOF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR\nIN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n"
  },
  {
    "path": "LICENSE.md",
    "content": "Mozilla Public License Version 2.0\r\n==================================\r\n\r\n### 1. Definitions\r\n\r\n**1.1. “Contributor”**  \r\n    means each individual or legal entity that creates, contributes to\r\n    the creation of, or owns Covered Software.\r\n\r\n**1.2. “Contributor Version”**  \r\n    means the combination of the Contributions of others (if any) used\r\n    by a Contributor and that particular Contributor's Contribution.\r\n\r\n**1.3. “Contribution”**  \r\n    means Covered Software of a particular Contributor.\r\n\r\n**1.4. “Covered Software”**  \r\n    means Source Code Form to which the initial Contributor has attached\r\n    the notice in Exhibit A, the Executable Form of such Source Code\r\n    Form, and Modifications of such Source Code Form, in each case\r\n    including portions thereof.\r\n\r\n**1.5. “Incompatible With Secondary Licenses”**  \r\n    means\r\n\r\n* **(a)** that the initial Contributor has attached the notice described\r\n    in Exhibit B to the Covered Software; or\r\n* **(b)** that the Covered Software was made available under the terms of\r\n    version 1.1 or earlier of the License, but not also under the\r\n    terms of a Secondary License.\r\n\r\n**1.6. “Executable Form”**  \r\n    means any form of the work other than Source Code Form.\r\n\r\n**1.7. “Larger Work”**  \r\n    means a work that combines Covered Software with other material, in \r\n    a separate file or files, that is not Covered Software.\r\n\r\n**1.8. “License”**  \r\n    means this document.\r\n\r\n**1.9. “Licensable”**  \r\n    means having the right to grant, to the maximum extent possible,\r\n    whether at the time of the initial grant or subsequently, any and\r\n    all of the rights conveyed by this License.\r\n\r\n**1.10. “Modifications”**  \r\n    means any of the following:\r\n\r\n* **(a)** any file in Source Code Form that results from an addition to,\r\n    deletion from, or modification of the contents of Covered\r\n    Software; or\r\n* **(b)** any new file in Source Code Form that contains any Covered\r\n    Software.\r\n\r\n**1.11. “Patent Claims” of a Contributor**  \r\n    means any patent claim(s), including without limitation, method,\r\n    process, and apparatus claims, in any patent Licensable by such\r\n    Contributor that would be infringed, but for the grant of the\r\n    License, by the making, using, selling, offering for sale, having\r\n    made, import, or transfer of either its Contributions or its\r\n    Contributor Version.\r\n\r\n**1.12. “Secondary License”**  \r\n    means either the GNU General Public License, Version 2.0, the GNU\r\n    Lesser General Public License, Version 2.1, the GNU Affero General\r\n    Public License, Version 3.0, or any later versions of those\r\n    licenses.\r\n\r\n**1.13. “Source Code Form”**  \r\n    means the form of the work preferred for making modifications.\r\n\r\n**1.14. “You” (or “Your”)**  \r\n    means an individual or a legal entity exercising rights under this\r\n    License. For legal entities, “You” includes any entity that\r\n    controls, is controlled by, or is under common control with You. For\r\n    purposes of this definition, “control” means **(a)** the power, direct\r\n    or indirect, to cause the direction or management of such entity,\r\n    whether by contract or otherwise, or **(b)** ownership of more than\r\n    fifty percent (50%) of the outstanding shares or beneficial\r\n    ownership of such entity.\r\n\r\n\r\n### 2. License Grants and Conditions\r\n\r\n#### 2.1. Grants\r\n\r\nEach Contributor hereby grants You a world-wide, royalty-free,\r\nnon-exclusive license:\r\n\r\n* **(a)** under intellectual property rights (other than patent or trademark)\r\n    Licensable by such Contributor to use, reproduce, make available,\r\n    modify, display, perform, distribute, and otherwise exploit its\r\n    Contributions, either on an unmodified basis, with Modifications, or\r\n    as part of a Larger Work; and\r\n* **(b)** under Patent Claims of such Contributor to make, use, sell, offer\r\n    for sale, have made, import, and otherwise transfer either its\r\n    Contributions or its Contributor Version.\r\n\r\n#### 2.2. Effective Date\r\n\r\nThe licenses granted in Section 2.1 with respect to any Contribution\r\nbecome effective for each Contribution on the date the Contributor first\r\ndistributes such Contribution.\r\n\r\n#### 2.3. Limitations on Grant Scope\r\n\r\nThe licenses granted in this Section 2 are the only rights granted under\r\nthis License. No additional rights or licenses will be implied from the\r\ndistribution or licensing of Covered Software under this License.\r\nNotwithstanding Section 2.1(b) above, no patent license is granted by a\r\nContributor:\r\n\r\n* **(a)** for any code that a Contributor has removed from Covered Software;\r\n    or\r\n* **(b)** for infringements caused by: **(i)** Your and any other third party's\r\n    modifications of Covered Software, or **(ii)** the combination of its\r\n    Contributions with other software (except as part of its Contributor\r\n    Version); or\r\n* **(c)** under Patent Claims infringed by Covered Software in the absence of\r\n    its Contributions.\r\n\r\nThis License does not grant any rights in the trademarks, service marks,\r\nor logos of any Contributor (except as may be necessary to comply with\r\nthe notice requirements in Section 3.4).\r\n\r\n#### 2.4. Subsequent Licenses\r\n\r\nNo Contributor makes additional grants as a result of Your choice to\r\ndistribute the Covered Software under a subsequent version of this\r\nLicense (see Section 10.2) or under the terms of a Secondary License (if\r\npermitted under the terms of Section 3.3).\r\n\r\n#### 2.5. Representation\r\n\r\nEach Contributor represents that the Contributor believes its\r\nContributions are its original creation(s) or it has sufficient rights\r\nto grant the rights to its Contributions conveyed by this License.\r\n\r\n#### 2.6. Fair Use\r\n\r\nThis License is not intended to limit any rights You have under\r\napplicable copyright doctrines of fair use, fair dealing, or other\r\nequivalents.\r\n\r\n#### 2.7. Conditions\r\n\r\nSections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted\r\nin Section 2.1.\r\n\r\n\r\n### 3. Responsibilities\r\n\r\n#### 3.1. Distribution of Source Form\r\n\r\nAll distribution of Covered Software in Source Code Form, including any\r\nModifications that You create or to which You contribute, must be under\r\nthe terms of this License. You must inform recipients that the Source\r\nCode Form of the Covered Software is governed by the terms of this\r\nLicense, and how they can obtain a copy of this License. You may not\r\nattempt to alter or restrict the recipients' rights in the Source Code\r\nForm.\r\n\r\n#### 3.2. Distribution of Executable Form\r\n\r\nIf You distribute Covered Software in Executable Form then:\r\n\r\n* **(a)** such Covered Software must also be made available in Source Code\r\n    Form, as described in Section 3.1, and You must inform recipients of\r\n    the Executable Form how they can obtain a copy of such Source Code\r\n    Form by reasonable means in a timely manner, at a charge no more\r\n    than the cost of distribution to the recipient; and\r\n\r\n* **(b)** You may distribute such Executable Form under the terms of this\r\n    License, or sublicense it under different terms, provided that the\r\n    license for the Executable Form does not attempt to limit or alter\r\n    the recipients' rights in the Source Code Form under this License.\r\n\r\n#### 3.3. Distribution of a Larger Work\r\n\r\nYou may create and distribute a Larger Work under terms of Your choice,\r\nprovided that You also comply with the requirements of this License for\r\nthe Covered Software. If the Larger Work is a combination of Covered\r\nSoftware with a work governed by one or more Secondary Licenses, and the\r\nCovered Software is not Incompatible With Secondary Licenses, this\r\nLicense permits You to additionally distribute such Covered Software\r\nunder the terms of such Secondary License(s), so that the recipient of\r\nthe Larger Work may, at their option, further distribute the Covered\r\nSoftware under the terms of either this License or such Secondary\r\nLicense(s).\r\n\r\n#### 3.4. Notices\r\n\r\nYou may not remove or alter the substance of any license notices\r\n(including copyright notices, patent notices, disclaimers of warranty,\r\nor limitations of liability) contained within the Source Code Form of\r\nthe Covered Software, except that You may alter any license notices to\r\nthe extent required to remedy known factual inaccuracies.\r\n\r\n#### 3.5. Application of Additional Terms\r\n\r\nYou may choose to offer, and to charge a fee for, warranty, support,\r\nindemnity or liability obligations to one or more recipients of Covered\r\nSoftware. However, You may do so only on Your own behalf, and not on\r\nbehalf of any Contributor. You must make it absolutely clear that any\r\nsuch warranty, support, indemnity, or liability obligation is offered by\r\nYou alone, and You hereby agree to indemnify every Contributor for any\r\nliability incurred by such Contributor as a result of warranty, support,\r\nindemnity or liability terms You offer. You may include additional\r\ndisclaimers of warranty and limitations of liability specific to any\r\njurisdiction.\r\n\r\n\r\n### 4. Inability to Comply Due to Statute or Regulation\r\n\r\nIf it is impossible for You to comply with any of the terms of this\r\nLicense with respect to some or all of the Covered Software due to\r\nstatute, judicial order, or regulation then You must: **(a)** comply with\r\nthe terms of this License to the maximum extent possible; and **(b)**\r\ndescribe the limitations and the code they affect. Such description must\r\nbe placed in a text file included with all distributions of the Covered\r\nSoftware under this License. Except to the extent prohibited by statute\r\nor regulation, such description must be sufficiently detailed for a\r\nrecipient of ordinary skill to be able to understand it.\r\n\r\n\r\n### 5. Termination\r\n\r\n**5.1.** The rights granted under this License will terminate automatically\r\nif You fail to comply with any of its terms. However, if You become\r\ncompliant, then the rights granted under this License from a particular\r\nContributor are reinstated **(a)** provisionally, unless and until such\r\nContributor explicitly and finally terminates Your grants, and **(b)** on an\r\nongoing basis, if such Contributor fails to notify You of the\r\nnon-compliance by some reasonable means prior to 60 days after You have\r\ncome back into compliance. Moreover, Your grants from a particular\r\nContributor are reinstated on an ongoing basis if such Contributor\r\nnotifies You of the non-compliance by some reasonable means, this is the\r\nfirst time You have received notice of non-compliance with this License\r\nfrom such Contributor, and You become compliant prior to 30 days after\r\nYour receipt of the notice.\r\n\r\n**5.2.** If You initiate litigation against any entity by asserting a patent\r\ninfringement claim (excluding declaratory judgment actions,\r\ncounter-claims, and cross-claims) alleging that a Contributor Version\r\ndirectly or indirectly infringes any patent, then the rights granted to\r\nYou by any and all Contributors for the Covered Software under Section\r\n2.1 of this License shall terminate.\r\n\r\n**5.3.** In the event of termination under Sections 5.1 or 5.2 above, all\r\nend user license agreements (excluding distributors and resellers) which\r\nhave been validly granted by You or Your distributors under this License\r\nprior to termination shall survive termination.\r\n\r\n\r\n### 6. Disclaimer of Warranty\r\n\r\n> Covered Software is provided under this License on an “as is”\r\n> basis, without warranty of any kind, either expressed, implied, or\r\n> statutory, including, without limitation, warranties that the\r\n> Covered Software is free of defects, merchantable, fit for a\r\n> particular purpose or non-infringing. The entire risk as to the\r\n> quality and performance of the Covered Software is with You.\r\n> Should any Covered Software prove defective in any respect, You\r\n> (not any Contributor) assume the cost of any necessary servicing,\r\n> repair, or correction. This disclaimer of warranty constitutes an\r\n> essential part of this License. No use of any Covered Software is\r\n> authorized under this License except under this disclaimer.\r\n\r\n### 7. Limitation of Liability\r\n\r\n> Under no circumstances and under no legal theory, whether tort\r\n> (including negligence), contract, or otherwise, shall any\r\n> Contributor, or anyone who distributes Covered Software as\r\n> permitted above, be liable to You for any direct, indirect,\r\n> special, incidental, or consequential damages of any character\r\n> including, without limitation, damages for lost profits, loss of\r\n> goodwill, work stoppage, computer failure or malfunction, or any\r\n> and all other commercial damages or losses, even if such party\r\n> shall have been informed of the possibility of such damages. This\r\n> limitation of liability shall not apply to liability for death or\r\n> personal injury resulting from such party's negligence to the\r\n> extent applicable law prohibits such limitation. Some\r\n> jurisdictions do not allow the exclusion or limitation of\r\n> incidental or consequential damages, so this exclusion and\r\n> limitation may not apply to You.\r\n\r\n\r\n### 8. Litigation\r\n\r\nAny litigation relating to this License may be brought only in the\r\ncourts of a jurisdiction where the defendant maintains its principal\r\nplace of business and such litigation shall be governed by laws of that\r\njurisdiction, without reference to its conflict-of-law provisions.\r\nNothing in this Section shall prevent a party's ability to bring\r\ncross-claims or counter-claims.\r\n\r\n\r\n### 9. Miscellaneous\r\n\r\nThis License represents the complete agreement concerning the subject\r\nmatter hereof. If any provision of this License is held to be\r\nunenforceable, such provision shall be reformed only to the extent\r\nnecessary to make it enforceable. Any law or regulation which provides\r\nthat the language of a contract shall be construed against the drafter\r\nshall not be used to construe this License against a Contributor.\r\n\r\n\r\n### 10. Versions of the License\r\n\r\n#### 10.1. New Versions\r\n\r\nMozilla Foundation is the license steward. Except as provided in Section\r\n10.3, no one other than the license steward has the right to modify or\r\npublish new versions of this License. Each version will be given a\r\ndistinguishing version number.\r\n\r\n#### 10.2. Effect of New Versions\r\n\r\nYou may distribute the Covered Software under the terms of the version\r\nof the License under which You originally received the Covered Software,\r\nor under the terms of any subsequent version published by the license\r\nsteward.\r\n\r\n#### 10.3. Modified Versions\r\n\r\nIf you create software not governed by this License, and you want to\r\ncreate a new license for such software, you may create and use a\r\nmodified version of this License if you rename the license and remove\r\nany references to the name of the license steward (except to note that\r\nsuch modified license differs from this License).\r\n\r\n#### 10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses\r\n\r\nIf You choose to distribute Source Code Form that is Incompatible With\r\nSecondary Licenses under the terms of this version of the License, the\r\nnotice described in Exhibit B of this License must be attached.\r\n\r\n## Exhibit A - Source Code Form License Notice\r\n\r\n    This Source Code Form is subject to the terms of the Mozilla Public\r\n    License, v. 2.0. If a copy of the MPL was not distributed with this\r\n    file, You can obtain one at http://mozilla.org/MPL/2.0/.\r\n\r\nIf it is not possible or desirable to put the notice in a particular\r\nfile, then You may include the notice in a location (such as a LICENSE\r\nfile in a relevant directory) where a recipient would be likely to look\r\nfor such a notice.\r\n\r\nYou may add additional accurate notices of copyright ownership.\r\n\r\n## Exhibit B - “Incompatible With Secondary Licenses” Notice\r\n\r\n    This Source Code Form is \"Incompatible With Secondary Licenses\", as\r\n    defined by the Mozilla Public License, v. 2.0."
  },
  {
    "path": "README.md",
    "content": "# `dumpster`: A cycle-tracking garbage collector for Rust\n\n[![Crates.io page](https://img.shields.io/crates/v/dumpster)](https://crates.io/crates/dumpster)\n[![docs.rs](https://img.shields.io/docsrs/dumpster)](https://docs.rs/dumpster)\n\n`dumpster` is a cycle-detecting garbage collector for Rust.\nIt detects unreachable allocations and automatically frees them.\n\n## Why should you use this crate?\n\nIn short, `dumpster` offers a great mix of usability, performance, and flexibility.\n\n- `dumpster`'s API is a drop-in replacement for `std`'s reference-counted shared allocations\n  (`Rc` and `Arc`).\n- It's very performant and has builtin implementations of both thread-local and concurrent\n  garbage collection.\n- There are no restrictions on the reference structure within a garbage-collected allocation\n  (references may point in any way you like).\n- It's trivial to make a custom type Trace using the provided derive macros.\n- You can even store `?Sized` data in a garbage-collected pointer!\n\n## How it works\n\n`dumpster` is unlike most tracing garbage collectors.\nOther GCs keep track of a set of roots, which can then be used to perform a sweep and find out\nwhich allocations are reachable and which are not.\nInstead, `dumpster` extends reference-counted garbage collection (such as `std::rc::Rc`) with a\ncycle-detection algorithm, enabling it to effectively clean up self-referential data structures.\n\nFor a deeper dive, check out this\n[blog post](https://claytonwramsey.github.io/2023/08/14/dumpster.html).\n\n## What this library contains\n\n`dumpster` actually contains two garbage collector implementations: one thread-local, non-`Send`\ngarbage collector in the module `unsync`, and one thread-safe garbage collector in the module\n`sync`.\nThese garbage collectors can be safely mixed and matched.\n\nThis library also comes with a derive macro for creating custom Trace types.\n\n## Examples\n\n```rust\nuse dumpster::{Trace, unsync::Gc};\n\n#[derive(Trace)]\nstruct Foo {\n    ptr: RefCell<Option<Gc<Foo>>>,\n}\n\n// Create a new garbage-collected Foo.\nlet foo = Gc::new(Foo {\n    ptr: RefCell::new(None),\n});\n\n// Insert a circular reference inside of the foo.\n*foo.ptr.borrow_mut() = Some(foo.clone());\n\n// Render the foo inaccessible.\n// This may trigger a collection, but it's not guaranteed.\n// If we had used `Rc` instead of `Gc`, this would have caused a memory leak.\ndrop(foo);\n\n// Trigger a collection.\n// This isn't necessary, but it guarantees that `foo` will be collected immediately (instead of\n// later).\ndumpster::unsync::collect();\n```\n\n## Installation\n\nTo install, simply add `dumpster` as a dependency to your project.\n\n```toml\n[dependencies]\ndumpster = \"2.1.0\"\n```\n\n## Optional features\n\n### `derive`\n\n`derive` is enabled by default.\nIt enables the derive macro for `Trace`, which makes it easy for users to implement their\nown Trace types.\n\n```rust\nuse dumpster::{unsync::Gc, Trace};\nuse std::cell::RefCell;\n\n#[derive(Trace)] // no manual implementation required\nstruct Foo(RefCell<Option<Gc<Foo>>>);\n\nlet my_foo = Gc::new(Foo(RefCell::new(None)));\n*my_foo.0.borrow_mut() = Some(my_foo.clone());\n\ndrop(my_foo); // my_foo will be automatically cleaned up\n```\n\n### `either`\n\n`either` is disabled by default. It adds support for the [`either`](https://crates.io/crates/either) crate,\nspecifically by implementing `Trace` for [`either::Either`](https://docs.rs/either/1.13.0/either/enum.Either.html).\n\n### `coerce-unsized`\n\n`coerce-unsized` is disabled by default.\nThis enables the implementation of `CoerceUnsized` for each garbage collector,\nmaking it possible to use `Gc` with `!Sized` types conveniently.\n\n```rust\nuse dumpster::unsync::Gc;\n\n// this only works with \"coerce-unsized\" enabled while compiling on nightly Rust\nlet gc1: Gc<[u8]> = Gc::new([1, 2, 3]);\n```\n\nTo use `coerce-unsized`, edit your installation to `Cargo.toml` to include the feature.\n\n```toml\n[dependencies]\ndumpster = { version = \"2.1.0\", features = [\"coerce-unsized\"]}\n```\n\n## Loom support\n\n`dumpster` has experimental support for permutation testing under [`loom`](https://github.com/tokio-rs/loom).\nIt is expected to be unstable and buggy.\nTo compile `dumpster` using `loom`, add `--cfg loom` to `RUSTFLAGS` when compiling, for example:\n\n```sh\nRUSTFLAGS='--cfg loom' cargo test\n```\n\n## License\n\nThis code is licensed under the Mozilla Public License, version 2.0.\nFor more information, refer to [LICENSE.md](LICENSE.md).\n\nThis project includes portions of code derived from the Rust standard library,\nwhich is dual-licensed under the MIT and Apache 2.0 licenses.\nCopyright (c) The Rust Project Developers.\n"
  },
  {
    "path": "dumpster/.gitignore",
    "content": "/target\n/Cargo.lock\n"
  },
  {
    "path": "dumpster/Cargo.toml",
    "content": "[package]\nname = \"dumpster\"\nversion = \"2.1.0\"\nedition = \"2021\"\nlicense = \"MPL-2.0\"\nauthors = [\"Clayton Ramsey\"]\ndescription = \"A concurrent cycle-tracking garbage collector.\"\nrepository = \"https://github.com/claytonwramsey/dumpster\"\nreadme = \"../README.md\"\nkeywords = [\"dumpster\", \"garbage_collector\", \"gc\"]\ncategories = [\"memory-management\", \"data-structures\"]\n\n[features]\ndefault = [\"derive\"]\ncoerce-unsized = []\nderive = [\"dep:dumpster_derive\"]\neither = [\"dep:either\"]\n\n[dependencies]\nparking_lot = \"0.12.3\"\ndumpster_derive = { version = \"2.0.0\", path = \"../dumpster_derive\", optional = true }\neither = { version = \"1.13.0\", optional = true }\nfoldhash = { version = \"0.2.0\", default-features = false, features = [\"std\"] }\n\n[dev-dependencies]\nfastrand = \"2.0.0\"\n\n[target.'cfg(loom)'.dependencies]\nloom = { version = \"0.7.2\" }\n\n[package.metadata.playground]\nfeatures = [\"derive\"]\n\n[package.metadata.docs.rs]\nfeatures = [\"derive\"]\ntargets = [\"x86_64-unknown-linux-gnu\"]\nrustdoc-args = [\"--generate-link-to-definition\"]\n\n[lints.rust]\nunexpected_cfgs = { level = \"warn\", check-cfg = ['cfg(loom)'] }\n"
  },
  {
    "path": "dumpster/src/impls.rs",
    "content": "/*\n    dumpster, a cycle-tracking garbage collector for Rust.    Copyright (C) 2023 Clayton Ramsey.\n\n    This Source Code Form is subject to the terms of the Mozilla Public\n    License, v. 2.0. If a copy of the MPL was not distributed with this\n    file, You can obtain one at http://mozilla.org/MPL/2.0/.\n*/\n\n//! Implementations of [`TraceWith<V>`] for common data types.\n\n#![allow(deprecated)]\n\nuse std::{\n    borrow::Cow,\n    cell::{Cell, OnceCell, RefCell},\n    collections::{BTreeMap, BTreeSet, BinaryHeap, HashMap, HashSet, LinkedList, VecDeque},\n    convert::Infallible,\n    hash::{BuildHasher, BuildHasherDefault},\n    marker::PhantomData,\n    num::{\n        NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize, NonZeroU128,\n        NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize,\n    },\n    ops::Deref,\n    sync::{\n        atomic::{\n            AtomicI16, AtomicI32, AtomicI64, AtomicI8, AtomicIsize, AtomicU16, AtomicU32,\n            AtomicU64, AtomicU8, AtomicUsize,\n        },\n        Mutex, MutexGuard, OnceLock, RwLock, RwLockReadGuard, TryLockError,\n    },\n};\n\nuse crate::{TraceWith, Visitor};\n\nunsafe impl<V: Visitor> TraceWith<V> for Infallible {\n    fn accept(&self, _: &mut V) -> Result<(), ()> {\n        match *self {}\n    }\n}\n\n#[cfg(feature = \"either\")]\nunsafe impl<V: Visitor, A: TraceWith<V>, B: TraceWith<V>> TraceWith<V> for either::Either<A, B> {\n    fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n        match self {\n            either::Either::Left(a) => a.accept(visitor),\n            either::Either::Right(b) => b.accept(visitor),\n        }\n    }\n}\n\n/// Implement `TraceWith<V>` trivially for some parametric `?Sized` type.\nmacro_rules! param_trivial_impl_unsized {\n    ($x: ty) => {\n        unsafe impl<V: Visitor, T: ?Sized> TraceWith<V> for $x {\n            #[inline]\n            fn accept(&self, _: &mut V) -> Result<(), ()> {\n                Ok(())\n            }\n        }\n    };\n}\n\nparam_trivial_impl_unsized!(MutexGuard<'static, T>);\nparam_trivial_impl_unsized!(RwLockReadGuard<'static, T>);\nparam_trivial_impl_unsized!(&'static T);\nparam_trivial_impl_unsized!(PhantomData<T>);\n\n/// Implement `TraceWith<V>` trivially for some parametric `Sized` type.\nmacro_rules! param_trivial_impl_sized {\n    ($x: ty) => {\n        unsafe impl<V: Visitor, T> TraceWith<V> for $x {\n            #[inline]\n            fn accept(&self, _: &mut V) -> Result<(), ()> {\n                Ok(())\n            }\n        }\n    };\n}\n\nparam_trivial_impl_sized!(std::future::Pending<T>);\nparam_trivial_impl_sized!(std::mem::Discriminant<T>);\n\nunsafe impl<V: Visitor, T: TraceWith<V> + ?Sized> TraceWith<V> for Box<T> {\n    fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n        (**self).accept(visitor)\n    }\n}\n\nunsafe impl<V: Visitor, T> TraceWith<V> for BuildHasherDefault<T> {\n    fn accept(&self, _: &mut V) -> Result<(), ()> {\n        Ok(())\n    }\n}\n\nunsafe impl<V: Visitor, T: ToOwned> TraceWith<V> for Cow<'_, T>\nwhere\n    T::Owned: TraceWith<V>,\n{\n    fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n        if let Cow::Owned(ref v) = self {\n            v.accept(visitor)?;\n        }\n        Ok(())\n    }\n}\n\nunsafe impl<V: Visitor, T: TraceWith<V> + ?Sized> TraceWith<V> for RefCell<T> {\n    #[inline]\n    fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n        self.try_borrow().map_err(|_| ())?.accept(visitor)\n    }\n}\n\nunsafe impl<V: Visitor, T: TraceWith<V> + ?Sized> TraceWith<V> for Mutex<T> {\n    #[inline]\n    fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n        self.try_lock()\n            .map_err(|e| match e {\n                TryLockError::Poisoned(_) => panic!(),\n                TryLockError::WouldBlock => (),\n            })?\n            .deref()\n            .accept(visitor)\n    }\n}\n\nunsafe impl<V: Visitor, T: TraceWith<V> + ?Sized> TraceWith<V> for RwLock<T> {\n    #[inline]\n    fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n        self.try_read()\n            .map_err(|e| match e {\n                TryLockError::Poisoned(_) => panic!(),\n                TryLockError::WouldBlock => (),\n            })?\n            .deref()\n            .accept(visitor)\n    }\n}\n\nunsafe impl<V: Visitor, T: TraceWith<V>> TraceWith<V> for Option<T> {\n    #[inline]\n    fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n        match self {\n            Some(x) => x.accept(visitor),\n            None => Ok(()),\n        }\n    }\n}\n\nunsafe impl<V: Visitor, T: TraceWith<V>, E: TraceWith<V>> TraceWith<V> for Result<T, E> {\n    #[inline]\n    fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n        match self {\n            Ok(t) => t.accept(visitor),\n            Err(e) => e.accept(visitor),\n        }\n    }\n}\n\nunsafe impl<V: Visitor, T: Copy + TraceWith<V>> TraceWith<V> for Cell<T> {\n    fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n        self.get().accept(visitor)\n    }\n}\n\nunsafe impl<V: Visitor, T: TraceWith<V>> TraceWith<V> for OnceCell<T> {\n    fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n        self.get().map_or(Ok(()), |x| x.accept(visitor))\n    }\n}\n\nunsafe impl<V: Visitor, T: TraceWith<V>> TraceWith<V> for OnceLock<T> {\n    fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n        self.get().map_or(Ok(()), |x| x.accept(visitor))\n    }\n}\n\nunsafe impl<V: Visitor, T: TraceWith<V>> TraceWith<V> for std::cmp::Reverse<T> {\n    fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n        self.0.accept(visitor)\n    }\n}\n\nunsafe impl<V: Visitor, T: TraceWith<V> + ?Sized> TraceWith<V> for std::io::BufReader<T> {\n    fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n        self.get_ref().accept(visitor)\n    }\n}\n\nunsafe impl<V: Visitor, T: TraceWith<V> + std::io::Write + ?Sized> TraceWith<V>\n    for std::io::BufWriter<T>\n{\n    fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n        self.get_ref().accept(visitor)\n    }\n}\n\nunsafe impl<V: Visitor, T: TraceWith<V>, U: TraceWith<V>> TraceWith<V> for std::io::Chain<T, U> {\n    fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n        let (t, u) = self.get_ref();\n        t.accept(visitor)?;\n        u.accept(visitor)\n    }\n}\n\nunsafe impl<V: Visitor, T: TraceWith<V>> TraceWith<V> for std::io::Cursor<T> {\n    fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n        self.get_ref().accept(visitor)\n    }\n}\n\nunsafe impl<V: Visitor, T: TraceWith<V> + std::io::Write + ?Sized> TraceWith<V>\n    for std::io::LineWriter<T>\n{\n    fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n        self.get_ref().accept(visitor)\n    }\n}\n\nunsafe impl<V: Visitor, T: TraceWith<V>> TraceWith<V> for std::io::Take<T> {\n    fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n        self.get_ref().accept(visitor)\n    }\n}\n\nunsafe impl<V: Visitor, T: TraceWith<V>> TraceWith<V> for std::mem::ManuallyDrop<T> {\n    fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n        (**self).accept(visitor)\n    }\n}\n\nunsafe impl<V: Visitor, T: TraceWith<V>> TraceWith<V> for std::num::Saturating<T> {\n    fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n        self.0.accept(visitor)\n    }\n}\n\nunsafe impl<V: Visitor, T: TraceWith<V>> TraceWith<V> for std::num::Wrapping<T> {\n    fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n        self.0.accept(visitor)\n    }\n}\n\nunsafe impl<V: Visitor, T: TraceWith<V>> TraceWith<V> for std::ops::Range<T> {\n    fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n        self.start.accept(visitor)?;\n        self.end.accept(visitor)\n    }\n}\n\nunsafe impl<V: Visitor, T: TraceWith<V>> TraceWith<V> for std::ops::RangeFrom<T> {\n    fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n        self.start.accept(visitor)\n    }\n}\n\nunsafe impl<V: Visitor, T: TraceWith<V>> TraceWith<V> for std::ops::RangeInclusive<T> {\n    fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n        self.start().accept(visitor)?;\n        self.end().accept(visitor)\n    }\n}\n\nunsafe impl<V: Visitor, T: TraceWith<V>> TraceWith<V> for std::ops::RangeTo<T> {\n    fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n        self.end.accept(visitor)\n    }\n}\n\nunsafe impl<V: Visitor, T: TraceWith<V>> TraceWith<V> for std::ops::RangeToInclusive<T> {\n    fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n        self.end.accept(visitor)\n    }\n}\n\nunsafe impl<V: Visitor, T: TraceWith<V>> TraceWith<V> for std::ops::Bound<T> {\n    fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n        match self {\n            std::ops::Bound::Included(x) | std::ops::Bound::Excluded(x) => x.accept(visitor),\n            std::ops::Bound::Unbounded => Ok(()),\n        }\n    }\n}\n\nunsafe impl<V: Visitor, B: TraceWith<V>, C: TraceWith<V>> TraceWith<V>\n    for std::ops::ControlFlow<B, C>\n{\n    fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n        match self {\n            std::ops::ControlFlow::Continue(c) => c.accept(visitor),\n            std::ops::ControlFlow::Break(b) => b.accept(visitor),\n        }\n    }\n}\n\nunsafe impl<V: Visitor, T: TraceWith<V>> TraceWith<V> for std::panic::AssertUnwindSafe<T> {\n    fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n        self.0.accept(visitor)\n    }\n}\n\nunsafe impl<V: Visitor, T: TraceWith<V>> TraceWith<V> for std::task::Poll<T> {\n    fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n        match self {\n            std::task::Poll::Ready(r) => r.accept(visitor),\n            std::task::Poll::Pending => Ok(()),\n        }\n    }\n}\n\n/// Implement [`TraceWith<V>`] for a collection data structure which has some method `iter()` that\n/// iterates over all elements of the data structure and `iter_mut()` which does the same over\n/// mutable references.\nmacro_rules! Trace_collection_impl {\n    ($x: ty) => {\n        unsafe impl<V: Visitor, T: TraceWith<V>> TraceWith<V> for $x {\n            #[inline]\n            fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n                for elem in self {\n                    elem.accept(visitor)?;\n                }\n                Ok(())\n            }\n        }\n    };\n}\n\nTrace_collection_impl!(Vec<T>);\nTrace_collection_impl!(VecDeque<T>);\nTrace_collection_impl!(LinkedList<T>);\nTrace_collection_impl!([T]);\nTrace_collection_impl!(BinaryHeap<T>);\nTrace_collection_impl!(BTreeSet<T>);\n\nunsafe impl<V: Visitor, T: TraceWith<V>> TraceWith<V> for std::vec::IntoIter<T> {\n    #[inline]\n    fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n        for elem in self.as_slice() {\n            elem.accept(visitor)?;\n        }\n        Ok(())\n    }\n}\n\nunsafe impl<Z: Visitor, K: TraceWith<Z>, V: TraceWith<Z>, S: BuildHasher + TraceWith<Z>>\n    TraceWith<Z> for HashMap<K, V, S>\n{\n    fn accept(&self, visitor: &mut Z) -> Result<(), ()> {\n        for (k, v) in self {\n            k.accept(visitor)?;\n            v.accept(visitor)?;\n        }\n        self.hasher().accept(visitor)\n    }\n}\n\nunsafe impl<Z: Visitor, T: TraceWith<Z>, S: BuildHasher + TraceWith<Z>> TraceWith<Z>\n    for HashSet<T, S>\n{\n    fn accept(&self, visitor: &mut Z) -> Result<(), ()> {\n        for elem in self {\n            elem.accept(visitor)?;\n        }\n        self.hasher().accept(visitor)\n    }\n}\n\nunsafe impl<Z: Visitor, K: TraceWith<Z>, V: TraceWith<Z>> TraceWith<Z> for BTreeMap<K, V> {\n    fn accept(&self, visitor: &mut Z) -> Result<(), ()> {\n        for (k, v) in self {\n            k.accept(visitor)?;\n            v.accept(visitor)?;\n        }\n        Ok(())\n    }\n}\n\nunsafe impl<V: Visitor, T: TraceWith<V>, const N: usize> TraceWith<V> for [T; N] {\n    #[inline]\n    fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n        for elem in self {\n            elem.accept(visitor)?;\n        }\n        Ok(())\n    }\n}\n\n/// Implement [`TraceWith<V>`] for a trivially-collected type which contains no `Gc`s in its\n/// fields.\nmacro_rules! Trace_trivial_impl {\n    ($x: ty) => {\n        unsafe impl<V: Visitor> TraceWith<V> for $x {\n            #[inline]\n            fn accept(&self, _: &mut V) -> Result<(), ()> {\n                Ok(())\n            }\n        }\n    };\n}\n\nTrace_trivial_impl!(());\n\nTrace_trivial_impl!(u8);\nTrace_trivial_impl!(u16);\nTrace_trivial_impl!(u32);\nTrace_trivial_impl!(u64);\nTrace_trivial_impl!(u128);\nTrace_trivial_impl!(usize);\nTrace_trivial_impl!(i8);\nTrace_trivial_impl!(i16);\nTrace_trivial_impl!(i32);\nTrace_trivial_impl!(i64);\nTrace_trivial_impl!(i128);\nTrace_trivial_impl!(isize);\n\nTrace_trivial_impl!(bool);\nTrace_trivial_impl!(char);\n\nTrace_trivial_impl!(f32);\nTrace_trivial_impl!(f64);\n\nTrace_trivial_impl!(AtomicU8);\nTrace_trivial_impl!(AtomicU16);\nTrace_trivial_impl!(AtomicU32);\nTrace_trivial_impl!(AtomicU64);\nTrace_trivial_impl!(AtomicUsize);\nTrace_trivial_impl!(AtomicI8);\nTrace_trivial_impl!(AtomicI16);\nTrace_trivial_impl!(AtomicI32);\nTrace_trivial_impl!(AtomicI64);\nTrace_trivial_impl!(AtomicIsize);\n\nTrace_trivial_impl!(NonZeroU8);\nTrace_trivial_impl!(NonZeroU16);\nTrace_trivial_impl!(NonZeroU32);\nTrace_trivial_impl!(NonZeroU64);\nTrace_trivial_impl!(NonZeroU128);\nTrace_trivial_impl!(NonZeroUsize);\nTrace_trivial_impl!(NonZeroI8);\nTrace_trivial_impl!(NonZeroI16);\nTrace_trivial_impl!(NonZeroI32);\nTrace_trivial_impl!(NonZeroI64);\nTrace_trivial_impl!(NonZeroI128);\nTrace_trivial_impl!(NonZeroIsize);\n\nTrace_trivial_impl!(std::alloc::Layout);\nTrace_trivial_impl!(std::alloc::LayoutError);\nTrace_trivial_impl!(std::alloc::System);\n\nTrace_trivial_impl!(std::any::TypeId);\n\nTrace_trivial_impl!(std::ascii::EscapeDefault);\n\nTrace_trivial_impl!(std::backtrace::Backtrace);\nTrace_trivial_impl!(std::backtrace::BacktraceStatus);\n\nTrace_trivial_impl!(std::cmp::Ordering);\n\nTrace_trivial_impl!(std::char::CharTryFromError);\nTrace_trivial_impl!(std::char::EscapeDebug);\nTrace_trivial_impl!(std::char::EscapeDefault);\nTrace_trivial_impl!(std::char::EscapeUnicode);\nTrace_trivial_impl!(std::char::ToLowercase);\nTrace_trivial_impl!(std::char::ToUppercase);\n\nTrace_trivial_impl!(std::env::Args);\nTrace_trivial_impl!(std::env::ArgsOs);\nTrace_trivial_impl!(std::env::JoinPathsError);\nTrace_trivial_impl!(std::env::Vars);\nTrace_trivial_impl!(std::env::VarsOs);\nTrace_trivial_impl!(std::env::VarError);\n\nTrace_trivial_impl!(std::ffi::CStr);\nTrace_trivial_impl!(std::ffi::CString);\nTrace_trivial_impl!(std::ffi::FromBytesUntilNulError);\nTrace_trivial_impl!(std::ffi::FromVecWithNulError);\nTrace_trivial_impl!(std::ffi::IntoStringError);\nTrace_trivial_impl!(std::ffi::NulError);\nTrace_trivial_impl!(std::ffi::OsStr);\nTrace_trivial_impl!(std::ffi::OsString);\nTrace_trivial_impl!(std::ffi::FromBytesWithNulError);\nTrace_trivial_impl!(std::ffi::c_void);\n\nTrace_trivial_impl!(std::fmt::Error);\nTrace_trivial_impl!(std::fmt::Alignment);\n\nTrace_trivial_impl!(std::fs::DirBuilder);\nTrace_trivial_impl!(std::fs::DirEntry);\nTrace_trivial_impl!(std::fs::File);\nTrace_trivial_impl!(std::fs::FileTimes);\nTrace_trivial_impl!(std::fs::FileType);\nTrace_trivial_impl!(std::fs::Metadata);\nTrace_trivial_impl!(std::fs::OpenOptions);\nTrace_trivial_impl!(std::fs::Permissions);\nTrace_trivial_impl!(std::fs::ReadDir);\nTrace_trivial_impl!(std::fs::TryLockError);\n\nTrace_trivial_impl!(std::hash::DefaultHasher);\nTrace_trivial_impl!(std::hash::RandomState);\nTrace_trivial_impl!(std::hash::SipHasher);\n\nTrace_trivial_impl!(std::io::Empty);\nTrace_trivial_impl!(std::io::Error);\nTrace_trivial_impl!(std::io::PipeReader);\nTrace_trivial_impl!(std::io::PipeWriter);\nTrace_trivial_impl!(std::io::Repeat);\nTrace_trivial_impl!(std::io::Sink);\nTrace_trivial_impl!(std::io::Stdin);\nTrace_trivial_impl!(std::io::Stdout);\nTrace_trivial_impl!(std::io::WriterPanicked);\nTrace_trivial_impl!(std::io::ErrorKind);\nTrace_trivial_impl!(std::io::SeekFrom);\n\nTrace_trivial_impl!(std::marker::PhantomPinned);\n\nTrace_trivial_impl!(std::net::AddrParseError);\nTrace_trivial_impl!(std::net::Ipv4Addr);\nTrace_trivial_impl!(std::net::Ipv6Addr);\nTrace_trivial_impl!(std::net::SocketAddrV4);\nTrace_trivial_impl!(std::net::SocketAddrV6);\nTrace_trivial_impl!(std::net::TcpListener);\nTrace_trivial_impl!(std::net::TcpStream);\nTrace_trivial_impl!(std::net::UdpSocket);\nTrace_trivial_impl!(std::net::IpAddr);\nTrace_trivial_impl!(std::net::Shutdown);\nTrace_trivial_impl!(std::net::SocketAddr);\n\nTrace_trivial_impl!(std::num::ParseFloatError);\nTrace_trivial_impl!(std::num::ParseIntError);\nTrace_trivial_impl!(std::num::TryFromIntError);\nTrace_trivial_impl!(std::num::FpCategory);\nTrace_trivial_impl!(std::num::IntErrorKind);\n\nTrace_trivial_impl!(std::ops::RangeFull);\n\nTrace_trivial_impl!(std::path::Path);\nTrace_trivial_impl!(std::path::PathBuf);\nTrace_trivial_impl!(std::path::StripPrefixError);\n\nTrace_trivial_impl!(std::process::Child);\nTrace_trivial_impl!(std::process::ChildStderr);\nTrace_trivial_impl!(std::process::ChildStdin);\nTrace_trivial_impl!(std::process::ChildStdout);\nTrace_trivial_impl!(std::process::Command);\nTrace_trivial_impl!(std::process::ExitCode);\nTrace_trivial_impl!(std::process::Output);\nTrace_trivial_impl!(std::process::Stdio);\n\nTrace_trivial_impl!(std::slice::GetDisjointMutError);\n\nTrace_trivial_impl!(str);\nTrace_trivial_impl!(std::rc::Rc<str>);\nTrace_trivial_impl!(std::sync::Arc<str>);\n\nTrace_trivial_impl!(std::string::FromUtf8Error);\nTrace_trivial_impl!(std::string::FromUtf16Error);\nTrace_trivial_impl!(std::string::String);\n\nTrace_trivial_impl!(std::thread::AccessError);\nTrace_trivial_impl!(std::thread::Builder);\nTrace_trivial_impl!(std::thread::Thread);\nTrace_trivial_impl!(std::thread::ThreadId);\n\nTrace_trivial_impl!(std::time::Duration);\nTrace_trivial_impl!(std::time::Instant);\nTrace_trivial_impl!(std::time::SystemTime);\nTrace_trivial_impl!(std::time::SystemTimeError);\nTrace_trivial_impl!(std::time::TryFromFloatSecsError);\n\n/// Implement [`TraceWith<V>`] for a tuple.\nmacro_rules! Trace_tuple {\n    () => {}; // This case is handled above by the trivial case\n    ($($args:ident),*) => {\n        unsafe impl<V: Visitor, $($args: TraceWith<V>),*> TraceWith<V> for ($($args,)*) {\n            fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n                #[expect(clippy::allow_attributes)]\n                #[allow(non_snake_case)]\n                let &($(ref $args,)*) = self;\n                $(($args).accept(visitor)?;)*\n                Ok(())\n            }\n        }\n    }\n}\n\nTrace_tuple!();\nTrace_tuple!(A);\nTrace_tuple!(A, B);\nTrace_tuple!(A, B, C);\nTrace_tuple!(A, B, C, D);\nTrace_tuple!(A, B, C, D, E);\nTrace_tuple!(A, B, C, D, E, F);\nTrace_tuple!(A, B, C, D, E, F, G);\nTrace_tuple!(A, B, C, D, E, F, G, H);\nTrace_tuple!(A, B, C, D, E, F, G, H, I);\nTrace_tuple!(A, B, C, D, E, F, G, H, I, J);\n\n/// Implement `TraceWith<V>` for one function type.\nmacro_rules! Trace_fn {\n    ($ty:ty $(,$args:ident)*) => {\n        unsafe impl<V: Visitor, Ret $(,$args)*> TraceWith<V> for $ty {\n            fn accept(&self, _: &mut V) -> Result<(), ()> { Ok(()) }\n        }\n    }\n}\n\n/// Implement `TraceWith<V>` for all functions with a given set of args.\nmacro_rules! Trace_fn_group {\n    () => {\n        Trace_fn!(extern \"Rust\" fn () -> Ret);\n        Trace_fn!(extern \"C\" fn () -> Ret);\n        Trace_fn!(unsafe extern \"Rust\" fn () -> Ret);\n        Trace_fn!(unsafe extern \"C\" fn () -> Ret);\n    };\n    ($($args:ident),*) => {\n        Trace_fn!(extern \"Rust\" fn ($($args),*) -> Ret, $($args),*);\n        Trace_fn!(extern \"C\" fn ($($args),*) -> Ret, $($args),*);\n        Trace_fn!(extern \"C\" fn ($($args),*, ...) -> Ret, $($args),*);\n        Trace_fn!(unsafe extern \"Rust\" fn ($($args),*) -> Ret, $($args),*);\n        Trace_fn!(unsafe extern \"C\" fn ($($args),*) -> Ret, $($args),*);\n        Trace_fn!(unsafe extern \"C\" fn ($($args),*, ...) -> Ret, $($args),*);\n    }\n}\n\nTrace_fn_group!();\nTrace_fn_group!(A);\nTrace_fn_group!(A, B);\nTrace_fn_group!(A, B, C);\nTrace_fn_group!(A, B, C, D);\nTrace_fn_group!(A, B, C, D, E);\nTrace_fn_group!(A, B, C, D, E, F);\nTrace_fn_group!(A, B, C, D, E, F, G);\nTrace_fn_group!(A, B, C, D, E, F, G, H);\nTrace_fn_group!(A, B, C, D, E, F, G, H, I);\nTrace_fn_group!(A, B, C, D, E, F, G, H, I, J);\n"
  },
  {
    "path": "dumpster/src/lib.rs",
    "content": "/*\n    dumpster, a cycle-tracking garbage collector for Rust.    Copyright (C) 2023 Clayton Ramsey.\n\n    This Source Code Form is subject to the terms of the Mozilla Public\n    License, v. 2.0. If a copy of the MPL was not distributed with this\n    file, You can obtain one at http://mozilla.org/MPL/2.0/.\n*/\n\n//! A cycle-tracking concurrent garbage collector with an easy-to-use API.\n//!\n//! Most garbage collectors are _tracing_ garbage collectors, meaning that they keep track of a set\n//! of roots which are directly accessible from the stack, and then use those roots to find the set\n//! of all accessible allocations.\n//! However, because Rust does not allow us to hook into when a value is moved, it's quite difficult\n//! to detect when a garbage-collected value stops being a root.\n//!\n//! `dumpster` takes a different approach.\n//! It begins by using simple reference counting, then automatically detects cycles.\n//! Allocations are freed when their reference count reaches zero or when they are only accessible\n//! via their descendants.\n//!\n//! Garbage-collected pointers can be created and destroyed in _O(1)_ amortized time, but destroying\n//! a garbage-collected pointer may take _O(r)_, where _r_ is the number of existing\n//! garbage-collected references, on occasion.\n//! However, the cleanups that require _O(r)_ performance are performed once every _O(1/r)_ times\n//! a reference is dropped, yielding an amortized _O(1)_ runtime.\n//!\n//! # Why should you use this crate?\n//!\n//! In short, `dumpster` offers a great mix of usability, performance, and flexibility.\n//!\n//! - `dumpster`'s API is a drop-in replacement for `std`'s reference-counted shared allocations\n//!   (`Rc` and `Arc`).\n//! - It's very performant and has builtin implementations of both thread-local and concurrent\n//!   garbage collection.\n//! - There are no restrictions on the reference structure within a garbage-collected allocation\n//!   (references may point in any way you like).\n//! - It's trivial to make a custom type Trace using the provided derive macros.\n//! - You can even store `?Sized` data in a garbage-collected pointer!\n//!\n//! # Module structure\n//!\n//! `dumpster` contains 3 core modules: the root (this module), as well as [`sync`] and [`unsync`].\n//! `sync` contains an implementation of thread-safe garbage-collected pointers, while `unsync`\n//! contains an implementation of thread-local garbage-collected pointers which cannot be shared\n//! across threads.\n//! Thread-safety requires some synchronization overhead, so for a single-threaded application,\n//! it is recommended to use `unsync`.\n//!\n//! The project root contains common definitions across both `sync` and `unsync`.\n//! Types which implement [`Trace`] can immediately be used in `unsync`, but in order to use\n//! `sync`'s garbage collector, the types must also implement [`Sync`].\n//!\n//! # Examples\n//!\n//! If your code is meant to run as a single thread, or if your data doesn't need to be shared\n//! across threads, you should use [`unsync::Gc`] to store your allocations.\n//!\n//! ```\n//! use dumpster::unsync::Gc;\n//! use std::cell::Cell;\n//!\n//! let my_gc = Gc::new(Cell::new(0451));\n//!\n//! let other_gc = my_gc.clone(); // shallow copy\n//! other_gc.set(512);\n//!\n//! assert_eq!(my_gc.get(), 512);\n//! ```\n//!\n//! For data which is shared across threads, you can use [`sync::Gc`] with the exact same API.\n//!\n//! ```\n//! use dumpster::sync::Gc;\n//! use std::sync::Mutex;\n//!\n//! let my_shared_gc = Gc::new(Mutex::new(25));\n//! let other_shared_gc = my_shared_gc.clone();\n//!\n//! std::thread::scope(|s| {\n//!     s.spawn(move || {\n//!         *other_shared_gc.lock().unwrap() = 35;\n//!     });\n//! });\n//!\n//! println!(\"{}\", *my_shared_gc.lock().unwrap());\n//! ```\n//!\n//! It's trivial to use custom data structures with the provided derive macro.\n//!\n//! ```\n//! use dumpster::{unsync::Gc, Trace};\n//! use std::cell::RefCell;\n//!\n//! #[derive(Trace)]\n//! struct Foo {\n//!     refs: RefCell<Vec<Gc<Foo>>>,\n//! }\n//!\n//! let foo = Gc::new(Foo {\n//!     refs: RefCell::new(Vec::new()),\n//! });\n//!\n//! foo.refs.borrow_mut().push(foo.clone());\n//!\n//! drop(foo);\n//!\n//! // even though foo had a self reference, it still got collected!\n//! ```\n//!\n//! # Installation\n//!\n//! To use `dumpster`, add the following lines to your `Cargo.toml`.\n//!\n//! ```toml\n//! [dependencies]\n//! dumpster = \"2.1.0\"\n//! ```\n//!\n//! # Optional features\n//!\n//! ## `derive`\n//!\n//! `derive` is enabled by default.\n//! It enables the derive macro for `Trace`, which makes it easy for users to implement their\n//! own Trace types.\n//!\n//! ```\n//! use dumpster::{unsync::Gc, Trace};\n//! use std::cell::RefCell;\n//!\n//! #[derive(Trace)] // no manual implementation required\n//! struct Foo(RefCell<Option<Gc<Foo>>>);\n//!\n//! let my_foo = Gc::new(Foo(RefCell::new(None)));\n//! *my_foo.0.borrow_mut() = Some(my_foo.clone());\n//!\n//! drop(my_foo); // my_foo will be automatically cleaned up\n//! ```\n//!\n//! ## `either`\n//!\n//! `either` is disabled by default. It adds support for the [`either`](https://crates.io/crates/either) crate,\n//! specifically by implementing [`Trace`] for [`either::Either`](https://docs.rs/either/1.13.0/either/enum.Either.html).\n//!\n//! ## `coerce-unsized`\n//!\n//! `coerce-unsized` is disabled by default.\n//! This enables the implementation of [`std::ops::CoerceUnsized`] for each garbage collector,\n//! making it possible to use `Gc` with `!Sized` types conveniently.\n#![cfg_attr(\n    feature = \"coerce-unsized\",\n    doc = r#\"\n```\n// this only works with \"coerce-unsized\" enabled while compiling on nightly Rust\nuse dumpster::unsync::Gc;\n\nlet gc1: Gc<[u8]> = Gc::new([1, 2, 3]);\n```\n\"#\n)]\n//! To use `coerce-unsized`, edit your installation to `Cargo.toml` to include the feature.\n//!\n//! ```toml\n//! [dependencies]\n//! dumpster = { version = \"2.1.0\", features = [\"coerce-unsized\"]}\n//! ```\n//!\n//! ## Loom support\n//!\n//! `dumpster` has experimental support for permutation testing under [`loom`](https://github.com/tokio-rs/loom).\n//! It is expected to be unstable and buggy.\n//! To compile `dumpster` using `loom`, add `--cfg loom` to `RUSTFLAGS` when compiling, for example:\n//!\n//! ```sh\n//! RUSTFLAGS='--cfg loom' cargo test\n//! ```\n//!\n//! # License\n//!\n//! `dumpster` is licensed under the Mozilla Public License, version 2.0.\n//! For more details, refer to\n//! [LICENSE.md](https://github.com/claytonwramsey/dumpster/blob/master/LICENSE.md).\n//!\n//! This project includes portions of code derived from the Rust standard library,\n//! which is dual-licensed under the MIT and Apache 2.0 licenses.\n//! Copyright (c) The Rust Project Developers.\n\n#![warn(clippy::pedantic)]\n#![warn(clippy::cargo)]\n#![warn(missing_docs)]\n#![warn(clippy::missing_docs_in_private_items)]\n#![warn(clippy::allow_attributes, reason = \"prefer expect over allow\")]\n#![allow(clippy::multiple_crate_versions, clippy::result_unit_err)]\n#![cfg_attr(feature = \"coerce-unsized\", feature(coerce_unsized))]\n#![cfg_attr(feature = \"coerce-unsized\", feature(unsize))]\n\nmod impls;\n\nmod ptr;\npub mod sync;\npub mod unsync;\n\n/// Contains the sealed trait for [`Trace`].\nmod trace {\n    use crate::{sync::TraceSync, unsync::TraceUnsync, ContainsGcs, TraceWith};\n\n    /// The sealed trait for [`Trace`](crate::Trace),\n    /// hiding away the implementation details and making it\n    /// impossible to manually implement `Trace`.\n    #[expect(clippy::missing_safety_doc)]\n    #[expect(private_bounds)]\n    pub unsafe trait TraceWithV: TraceWith<ContainsGcs> + TraceSync + TraceUnsync {}\n\n    unsafe impl<T> TraceWithV for T where T: ?Sized + TraceWith<ContainsGcs> + TraceSync + TraceUnsync {}\n}\n\n/// The trait that any garbage-collected data must implement.\n///\n/// This trait should usually be implemented by using `#[derive(Trace)]`, using the provided\n/// macro.\n/// Only data structures using raw pointers or other magic should manually implement `Trace`.\n///\n/// To manually implement `Trace` you need to implement [`TraceWith<V>`].\n/// Any type that implements `TraceWith` for all <code>V: [Visitor]</code>\n/// automatically implements `Trace`.\n///\n/// # Examples\n///\n/// Implementing `Trace` for a scalar type which contains no garbage-collected references\n/// is very easy.\n/// Accepting a visitor is simply a no-op.\n///\n/// ```\n/// use dumpster::{TraceWith, Visitor};\n///\n/// struct Foo(u8);\n///\n/// unsafe impl<V: Visitor> TraceWith<V> for Foo {\n///     fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n///         Ok(())\n///     }\n/// }\n/// ```\n///\n/// However, if a data structure contains a garbage collected pointer, it must delegate to its\n/// fields in `accept`.\n///\n/// ```\n/// use dumpster::{unsync::Gc, TraceWith, Visitor};\n///\n/// struct Bar(Gc<Bar>);\n///\n/// unsafe impl<V: Visitor> TraceWith<V> for Bar {\n///     fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n///         self.0.accept(visitor)\n///     }\n/// }\n/// ```\n///\n/// A data structure with two or more fields which could own a garbage-collected pointer should\n/// delegate to both fields in a consistent order:\n///\n/// ```\n/// use dumpster::{unsync::Gc, TraceWith, Visitor};\n///\n/// struct Baz {\n///     a: Gc<Baz>,\n///     b: Gc<Baz>,\n/// }\n///\n/// unsafe impl<V: Visitor> TraceWith<V> for Baz {\n///     fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n///         self.a.accept(visitor)?;\n///         self.b.accept(visitor)?;\n///         Ok(())\n///     }\n/// }\n/// ```\n///\n/// `Trace` is dyn-compatible, so you can use it as a subtrait\n/// to allocate your own trait object.\n///\n/// ```\n/// use dumpster::{\n///     unsync::{coerce_gc, Gc},\n///     Trace,\n/// };\n///\n/// trait MyTrait: Trace {}\n/// impl<T: Trace> MyTrait for T {}\n///\n/// let gc: Gc<i32> = Gc::new(5);\n/// let gc: Gc<dyn MyTrait> = coerce_gc!(gc);\n/// ```\npub trait Trace: trace::TraceWithV {}\n\nimpl<T> Trace for T where T: trace::TraceWithV + ?Sized {}\n\n/// The underlying tracing implementation powering the [`Trace`] trait.\n///\n/// # Safety\n///\n/// If the implementation of this trait is incorrect, this will result in undefined behavior,\n/// typically double-frees or use-after-frees.\n/// This includes [`TraceWith::accept`], even though it is a safe function, since its correctness\n/// is required for safety.\n///\n/// The garbage collector in `dumpster` requires strong assumptions about the values inside of a\n/// `Gc`; by implementing `TraceWith`, you are responsible for these assumptions.\n/// Specifically, in order to be `TraceWith`, a value must have a _tree-like_ ownership structure.\n/// If some type `T` implements `TraceWith`, it means that no references to a value inside `T` will\n/// remain valid while `T` is moved. For instance, this means that `Rc` can never be `Trace`, as\n/// moving one `Rc` will not invalidate other `Rc`s pointing to the same allocation.\n/// We allow exceptions for fields of `T` that are not visited by the implementation of\n/// [`TraceWith::accept`], such as borrows (see the implementation of `TraceWith` for `&T`) and\n/// naturally for [`unsync::Gc`] and [`sync::Gc`].\n///\n/// Any structure whose implementation of `TraceWith` comes from `#[derive(Trace)]` satisfies the\n/// tree-like requirement.\npub unsafe trait TraceWith<V: Visitor> {\n    /// Accept a visitor to this garbage-collected value.\n    ///\n    /// Implementors of this function need only delegate to all fields owned by this value which\n    /// may contain a garbage-collected reference (either a [`sync::Gc`] or a [`unsync::Gc`]).\n    /// This delegation must be done in a consistent order.\n    ///\n    /// For structures which have more than one field, they should return immediately after the\n    /// first `Err` is returned from one of its fields.\n    /// To do so efficiently, we recommend using the try operator (`?`) on each field and then\n    /// returning `Ok(())` after delegating to each field.\n    ///\n    /// # Errors\n    ///\n    /// Errors are returned from this function whenever a field of this object returns an error\n    /// after delegating acceptance to it, or if this value's data is inaccessible (such as\n    /// attempting to borrow from a [`RefCell`](std::cell::RefCell) which has already been\n    /// mutably borrowed).\n    fn accept(&self, visitor: &mut V) -> Result<(), ()>;\n}\n\n/// A visitor for a garbage collected value.\n///\n/// This visitor allows us to hide details of the implementation of the garbage-collection procedure\n/// from implementors of [`Trace`].\n///\n/// When accepted by a `Trace`, this visitor will be delegated down until it reaches a\n/// garbage-collected pointer.\n/// Then, the garbage-collected pointer will call one of `visit_sync` or `visit_unsync`, depending\n/// on which type of pointer it is.\n///\n/// In general, it's not expected for consumers of this library to write their own visitors.\npub trait Visitor {\n    /// Visit a synchronized garbage-collected pointer.\n    ///\n    /// This function is called for every [`sync::Gc`] owned by the value that accepted this\n    /// visitor.\n    fn visit_sync<T>(&mut self, gc: &sync::Gc<T>)\n    where\n        T: Trace + Send + Sync + ?Sized;\n\n    /// Visit a thread-local garbage-collected pointer.\n    ///\n    /// This function is called for every [`unsync::Gc`] owned by the value that accepted this\n    /// visitor.\n    fn visit_unsync<T>(&mut self, gc: &unsync::Gc<T>)\n    where\n        T: Trace + ?Sized;\n}\n\n// Re-export #[derive(Trace)].\n//\n// The reason re-exporting is not enabled by default is that disabling it would\n// be annoying for crates that provide handwritten impls or data formats. They\n// would need to disable default features and then explicitly re-enable std.\n#[cfg(feature = \"derive\")]\nextern crate dumpster_derive;\n\n#[cfg(feature = \"derive\")]\n/// The derive macro for implementing `Trace`.\n///\n/// This enables users of `dumpster` to easily store custom types inside a `Gc`.\n/// To do so, simply annotate your type with `#[derive(Trace)]`.\n///\n/// # Examples\n///\n/// ```\n/// use dumpster::Trace;\n///\n/// #[derive(Trace)]\n/// struct Foo {\n///     bar: Option<Box<Foo>>,\n/// }\n/// ```\n///\n/// You can specify the crate path for the `dumpster` crate using the `dumpster` attribute:\n///\n/// ```\n/// use dumpster as dumpster_renamed;\n/// use dumpster_renamed::Trace;\n///\n/// #[derive(Trace)]\n/// #[dumpster(crate = dumpster_renamed)]\n/// struct Foo {\n///     bar: Option<Box<Foo>>,\n/// }\n/// ```\npub use dumpster_derive::Trace;\n\n/// Determine whether some value contains a garbage-collected pointer.\n///\n/// This function will return one of three values:\n/// - `Ok(true)`: The data structure contains a garbage-collected pointer.\n/// - `Ok(false)`: The data structure contains no garbage-collected pointers.\n/// - `Err(())`: The data structure was accessed while we checked it for garbage-collected pointers.\nfn contains_gcs<T: Trace + ?Sized>(x: &T) -> Result<bool, ()> {\n    let mut visit = ContainsGcs(false);\n    x.accept(&mut visit)?;\n    Ok(visit.0)\n}\n\n/// A visitor structure used for determining whether some garbage-collected pointer contains a\n/// `Gc` in its pointed-to value.\nstruct ContainsGcs(bool);\n\nimpl Visitor for ContainsGcs {\n    fn visit_sync<T>(&mut self, _: &sync::Gc<T>)\n    where\n        T: Trace + Send + Sync + ?Sized,\n    {\n        self.0 = true;\n    }\n\n    fn visit_unsync<T>(&mut self, _: &unsync::Gc<T>)\n    where\n        T: Trace + ?Sized,\n    {\n        self.0 = true;\n    }\n}\n\n/// Panics with a message that explains that the gc object has already been collected.\n#[cold]\n#[inline(never)]\nfn panic_deref_of_collected_object() -> ! {\n    panic!(\n        \"Attempt to dereference Gc to already-collected object. \\\n    This means a Gc escaped from a Drop implementation, likely implying a bug in your code.\",\n    );\n}\n"
  },
  {
    "path": "dumpster/src/ptr.rs",
    "content": "/*\n    dumpster, a cycle-tracking garbage collector for Rust.    Copyright (C) 2023 Clayton Ramsey.\n\n    This Source Code Form is subject to the terms of the Mozilla Public\n    License, v. 2.0. If a copy of the MPL was not distributed with this\n    file, You can obtain one at http://mozilla.org/MPL/2.0/.\n*/\n\n//! Custom pointer types used by this garbage collector.\n\nuse std::{\n    fmt,\n    mem::{size_of, MaybeUninit},\n    ptr::{addr_of, addr_of_mut, copy_nonoverlapping, null, NonNull},\n};\n\n#[repr(C)]\n#[derive(Clone, Copy)]\n/// A pointer for an allocation, extracted out as raw data.\n/// This contains both the pointer and all the pointer's metadata, but hidden behind an unknown\n/// interpretation.\n/// We trust that all pointers (even to `?Sized` or `dyn` types) are 2 words or fewer in size.\n/// This is a hack! Like, a big hack!\npub(crate) struct Erased([*const u8; 2]);\n\nunsafe impl Send for Erased {}\nunsafe impl Sync for Erased {}\n\nimpl Erased {\n    /// Construct a new erased pointer to some data from a reference\n    ///\n    /// # Panics\n    ///\n    /// This function will panic if the size of a reference is larger than the size of an\n    /// `ErasedPtr`.\n    /// To my knowledge, there are no pointer types with this property.\n    pub fn new<T: ?Sized>(reference: NonNull<T>) -> Erased {\n        let mut ptr = Erased([null(); 2]);\n        let ptr_size = size_of::<NonNull<T>>();\n        // Extract out the pointer as raw memory\n        assert!(\n            ptr_size <= size_of::<Erased>(),\n            \"pointers to T are too big for storage\"\n        );\n        unsafe {\n            // SAFETY: We know that `cleanup` has at least as much space as `ptr_size`, and that\n            // `box_ref` has size equal to `ptr_size`.\n            copy_nonoverlapping(\n                addr_of!(reference).cast::<u8>(),\n                addr_of_mut!(ptr.0).cast::<u8>(),\n                ptr_size,\n            );\n        }\n\n        ptr\n    }\n\n    /// Specify this pointer into a pointer of a particular type.\n    ///\n    /// # Safety\n    ///\n    /// This function must only be specified to the type that the pointer was constructed with\n    /// via [`Erased::new`].\n    pub unsafe fn specify<T: ?Sized>(self) -> NonNull<T> {\n        let mut box_ref: MaybeUninit<NonNull<T>> = MaybeUninit::zeroed();\n\n        // For some reason, switching the ordering of casts causes this to create wacky undefined\n        // behavior. Why? I don't know. I have better things to do than pontificate on this on a\n        // Sunday afternoon.\n        copy_nonoverlapping(\n            addr_of!(self.0).cast::<u8>(),\n            addr_of_mut!(box_ref).cast::<u8>(),\n            size_of::<NonNull<T>>(),\n        );\n\n        box_ref.assume_init()\n    }\n}\n\nimpl fmt::Debug for Erased {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        write!(f, \"ErasedPtr({:x?})\", self.0)\n    }\n}\n\n/// A nullable pointer to an `?Sized` type.\n///\n/// We need this because it's actually impossible to create a null `*mut T` if `T` is `?Sized`.\npub(crate) struct Nullable<T: ?Sized>(*mut T);\n\nimpl<T: ?Sized> Nullable<T> {\n    /// Create a new nullable pointer from a non-null pointer.\n    pub fn new(ptr: NonNull<T>) -> Nullable<T> {\n        Nullable(ptr.as_ptr())\n    }\n\n    /// Convert this pointer to a null pointer.\n    pub fn as_null(self) -> Nullable<T> {\n        Nullable(self.0.with_addr(0))\n    }\n\n    /// Determine whether this pointer is null.\n    pub fn is_null(self) -> bool {\n        self.as_option().is_none()\n    }\n\n    /// Convert this pointer to an `Option<NonNull<T>>`.\n    pub fn as_option(self) -> Option<NonNull<T>> {\n        NonNull::new(self.0)\n    }\n\n    /// Convert this pointer to a `*mut T`.\n    pub fn as_ptr(self) -> *mut T {\n        self.0\n    }\n\n    /// Create a new nullable pointer from a pointer.\n    pub fn from_ptr(ptr: *mut T) -> Self {\n        Self(ptr)\n    }\n\n    /// Convert this pointer to a `NonNull<T>`, panicking if this pointer is null with message\n    /// `msg`.\n    pub fn expect(self, msg: &str) -> NonNull<T> {\n        self.as_option().expect(msg)\n    }\n\n    /// Convert this pointer to a `NonNull<T>`, panicking if this pointer is null.\n    pub fn unwrap(self) -> NonNull<T> {\n        self.as_option().unwrap()\n    }\n\n    /// Convert this pointer to a `NonNull<T>`.\n    ///\n    /// # Safety\n    ///\n    /// The pointer must not be null.\n    pub unsafe fn unwrap_unchecked(self) -> NonNull<T> {\n        self.as_option().unwrap_unchecked()\n    }\n}\n\nimpl<T: ?Sized> Clone for Nullable<T> {\n    fn clone(&self) -> Self {\n        *self\n    }\n}\nimpl<T: ?Sized> Copy for Nullable<T> {}\n\n#[cfg(feature = \"coerce-unsized\")]\nimpl<T, U> std::ops::CoerceUnsized<Nullable<U>> for Nullable<T>\nwhere\n    T: std::marker::Unsize<U> + ?Sized,\n    U: ?Sized,\n{\n}\n\nimpl<T: ?Sized> fmt::Debug for Nullable<T> {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        write!(f, \"Nullable({:x?})\", self.0)\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use core::any::Any;\n    use std::alloc::{dealloc, Layout};\n\n    use super::*;\n\n    #[test]\n    fn erased_alloc() {\n        let orig_ptr: &mut u8 = Box::leak(Box::new(7));\n        let erased_ptr = Erased::new(NonNull::from(orig_ptr));\n\n        unsafe {\n            let remade_ptr = erased_ptr.specify::<u8>();\n            assert_eq!(*remade_ptr.as_ref(), 7);\n            dealloc(remade_ptr.as_ptr(), Layout::for_value(remade_ptr.as_ref()));\n        }\n    }\n\n    #[test]\n    fn erased_alloc_slice() {\n        let orig_ptr: &mut [u8] = Box::leak(Box::new([7, 8, 9]));\n        let erased_ptr = Erased::new(NonNull::from(orig_ptr));\n\n        unsafe {\n            let remade_ptr = erased_ptr.specify::<[u8]>();\n            assert_eq!(remade_ptr.as_ref(), [7, 8, 9]);\n            dealloc(\n                remade_ptr.as_ptr().cast(),\n                Layout::for_value(remade_ptr.as_ref()),\n            );\n        }\n    }\n\n    #[test]\n    fn erased_alloc_dyn() {\n        let orig_ptr: &mut dyn Any = Box::leak(Box::new(7u8));\n        let erased_ptr = Erased::new(NonNull::from(orig_ptr));\n\n        unsafe {\n            let remade_ptr = erased_ptr.specify::<dyn Any>();\n            assert_eq!(*remade_ptr.as_ref().downcast_ref::<u8>().unwrap(), 7);\n            dealloc(\n                remade_ptr.as_ptr().cast(),\n                Layout::for_value(remade_ptr.as_ref()),\n            );\n        }\n    }\n}\n"
  },
  {
    "path": "dumpster/src/sync/cell.rs",
    "content": "/*\n    dumpster, a cycle-tracking garbage collector for Rust.    Copyright (C) 2023 Clayton Ramsey.\n\n    This Source Code Form is subject to the terms of the Mozilla Public\n    License, v. 2.0. If a copy of the MPL was not distributed with this\n    file, You can obtain one at http://mozilla.org/MPL/2.0/.\n*/\n\n//! A shim for using either Loom or the standard library in garbage-collected environments.\n\n#[cfg(loom)]\nuse loom::cell::UnsafeCell;\n\n#[cfg(not(loom))]\nuse std::cell::UnsafeCell;\n\n#[derive(Debug)]\n/// An unsafe cell that is agnostic over using `std` or `loom` as its backing implementation.\n/// It is intended to only be used with [`Copy`] data.\npub struct UCell<T>(UnsafeCell<T>);\n\nimpl<T> UCell<T> {\n    /// Construct a `UCell` containing the value.\n    pub fn new(x: T) -> Self {\n        Self(UnsafeCell::new(x))\n    }\n\n    /// Get the value inside the `UCell`.\n    ///\n    /// # Safety\n    ///\n    /// This function can only be called when no other code is calling [`UCell::set`].\n    pub unsafe fn get(&self) -> T\n    where\n        T: Copy,\n    {\n        #[cfg(loom)]\n        {\n            *self.0.get().deref()\n        }\n        #[cfg(not(loom))]\n        {\n            *self.0.get()\n        }\n    }\n\n    /// Overwrite the value inside this cell.\n    ///\n    /// # Safety\n    ///\n    /// This function can only be called when no other code is calling [`UCell::set`] or\n    /// [`UCell::get`].\n    pub unsafe fn set(&self, x: T) {\n        #[cfg(loom)]\n        {\n            *self.0.get_mut().deref() = x;\n        }\n        #[cfg(not(loom))]\n        {\n            *self.0.get() = x;\n        }\n    }\n}\n\n#[cfg(not(loom))]\n#[cfg(feature = \"coerce-unsized\")]\nimpl<T, U> std::ops::CoerceUnsized<UCell<crate::ptr::Nullable<U>>>\n    for UCell<crate::ptr::Nullable<T>>\nwhere\n    T: std::marker::Unsize<U> + ?Sized,\n    U: ?Sized,\n{\n}\n"
  },
  {
    "path": "dumpster/src/sync/collect.rs",
    "content": "/*\n    dumpster, a cycle-tracking garbage collector for Rust.    Copyright (C) 2023 Clayton Ramsey.\n\n    This Source Code Form is subject to the terms of the Mozilla Public\n    License, v. 2.0. If a copy of the MPL was not distributed with this\n    file, You can obtain one at http://mozilla.org/MPL/2.0/.\n*/\n\n//! A synchronized collection algorithm.\n\nuse std::{\n    alloc::{dealloc, Layout},\n    cell::{Cell, LazyCell, RefCell},\n    collections::hash_map::Entry,\n    hash::Hash,\n    mem::{replace, swap, take, transmute},\n    ptr::{drop_in_place, NonNull},\n};\n\n#[cfg(not(loom))]\nuse std::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};\n\nuse foldhash::{HashMap, HashMapExt};\n\n#[cfg(loom)]\nuse loom::{\n    lazy_static,\n    sync::atomic::{AtomicPtr, AtomicUsize, Ordering},\n    thread_local,\n};\n\n#[cfg(not(loom))]\nuse parking_lot::{Mutex, RwLock};\n\n#[cfg(loom)]\nuse crate::sync::loom_ext::{Mutex, RwLock};\n\nuse crate::{ptr::Erased, Trace, Visitor};\n\nuse super::{default_collect_condition, CollectCondition, CollectInfo, Gc, GcBox, CURRENT_TAG};\n\n/// The garbage truck, which is a global data structure containing information about allocations\n/// which might need to be collected.\nstruct GarbageTruck {\n    /// The contents of the garbage truck, containing all the allocations which need to be\n    /// collected and have already been delivered by a [`Dumpster`].\n    contents: Mutex<LazyCell<HashMap<AllocationId, TrashCan>>>,\n    /// A lock used for synchronizing threads that are awaiting completion of a collection process.\n    /// This lock should be acquired for reads by threads running a collection and for writes by\n    /// threads awaiting collection completion.\n    collecting_lock: RwLock<()>,\n    /// The number of [`Gc`]s dropped since the last time [`GarbageTruck::collect_all()`] was\n    /// called.\n    n_gcs_dropped: AtomicUsize,\n    /// The number of [`Gc`]s currently existing (which have not had their internals replaced with\n    /// `None`).\n    n_gcs_existing: AtomicUsize,\n    /// The function which determines whether a collection should be triggered.\n    /// This pointer value should always be cast to a [`CollectCondition`], but since `AtomicPtr`\n    /// doesn't handle function pointers correctly, we just cast to `*mut ()`.\n    collect_condition: AtomicPtr<()>,\n}\n\n/// A structure containing the global information for the garbage collector.\npub(super) struct Dumpster {\n    /// A lookup table for the allocations which may need to be cleaned up later.\n    pub contents: RefCell<HashMap<AllocationId, TrashCan>>,\n    /// The number of times an allocation on this thread has been dropped.\n    n_drops: Cell<usize>,\n}\n\n#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)]\n/// A unique identifier for an allocation.\npub(super) struct AllocationId(NonNull<GcBox<()>>);\n\n#[derive(Debug)]\n/// The information which describes an allocation that may need to be cleaned up later.\npub(super) struct TrashCan {\n    /// A pointer to the allocation to be cleaned up.\n    ptr: Erased,\n    /// The function which can be used to build a reference graph.\n    /// This function is safe to call on `ptr`.\n    dfs_fn: unsafe fn(Erased, &mut HashMap<AllocationId, AllocationInfo>),\n}\n\n#[derive(Debug)]\n/// A node in the reference graph, which is constructed while searching for unreachable allocations.\nstruct AllocationInfo {\n    /// An erased pointer to the allocation.\n    ptr: Erased,\n    /// Function for dropping the allocation when its weak and strong count hits zero.\n    /// Should have the same behavior as dropping a Gc normally to a reference count of zero.\n    weak_drop_fn: unsafe fn(Erased),\n    /// Information about this allocation's reachability.\n    reachability: Reachability,\n}\n\n#[derive(Debug)]\n/// The state of whether an allocation is reachable or of unknown reachability.\nenum Reachability {\n    /// The information describing an allocation whose accessibility is unknown.\n    Unknown {\n        /// The IDs for the allocations directly accessible from this allocation.\n        children: Vec<AllocationId>,\n        /// The number of references in the reference count for this allocation which are\n        /// \"unaccounted,\" which have not been found while constructing the graph.\n        /// It is the difference between the allocations indegree in the \"true\" reference graph vs\n        /// the one we are currently building.\n        n_unaccounted: usize,\n        /// A function used to destroy the allocation.\n        destroy_fn: unsafe fn(Erased, &HashMap<AllocationId, AllocationInfo>),\n    },\n    /// The allocation here is reachable.\n    /// No further information is needed.\n    Reachable,\n}\n\n#[cfg(not(loom))]\n/// The global garbage truck.\n/// All [`TrashCan`]s should eventually end up in here.\nstatic GARBAGE_TRUCK: GarbageTruck = GarbageTruck::new();\n\n#[cfg(loom)]\nlazy_static! {\n    static ref GARBAGE_TRUCK: GarbageTruck = GarbageTruck::new();\n}\n\nthread_local! {\n    /// The dumpster for this thread.\n    /// Allocations which are \"dirty\" will be transferred to this dumpster before being moved into\n    /// the garbage truck for final collection.\n    pub(super) static DUMPSTER: Dumpster = Dumpster {\n        contents: RefCell::new(HashMap::new()),\n        n_drops: Cell::new(0),\n    };\n}\n\n#[cfg(not(loom))]\nthread_local! {\n    /// Whether the currently-running thread is doing a cleanup.\n    /// This cannot be stored in `DUMPSTER` because otherwise it would cause weird use-after-drop\n    /// behavior.\n    static CLEANING: Cell<bool> = const { Cell::new(false) };\n}\n\n#[cfg(loom)]\nthread_local! {\n    /// Whether the currently-running thread is doing a cleanup.\n    /// This cannot be stored in `DUMPSTER` because otherwise it would cause weird use-after-drop\n    /// behavior.\n    static CLEANING: Cell<bool> = Cell::new(false);\n}\n\n/// Collect all allocations in the garbage truck (but not necessarily the dumpster), then await\n/// completion of the collection.\n/// Ensures that all allocations dropped on the calling thread are cleaned up\npub fn collect_all_await() {\n    _ = DUMPSTER.try_with(|d| d.deliver_to(&GARBAGE_TRUCK));\n    GARBAGE_TRUCK.collect_all();\n    drop(GARBAGE_TRUCK.collecting_lock.read());\n}\n\n/// Notify that a `Gc` was destroyed, and update the tracking count for the number of dropped and\n/// existing `Gc`s.\n///\n/// This may trigger a linear-time cleanup of all allocations, but this will be guaranteed to\n/// occur with less-than-linear frequency, so it's always O(1).\npub fn notify_dropped_gc() {\n    GARBAGE_TRUCK.n_gcs_existing.fetch_sub(1, Ordering::Relaxed);\n    GARBAGE_TRUCK.n_gcs_dropped.fetch_add(1, Ordering::Relaxed);\n\n    // Do not do deliver or collect if we are currently cleaning or this thread is dying.\n    // This prevents deadlocks.\n    if !CLEANING.try_with(Cell::get).is_ok_and(|x| !x) {\n        return;\n    }\n\n    _ = DUMPSTER.try_with(|dumpster| {\n        dumpster.n_drops.set(dumpster.n_drops.get() + 1);\n        if dumpster.is_full() {\n            dumpster.deliver_to(&GARBAGE_TRUCK);\n        }\n    });\n\n    let collect_cond = unsafe {\n        // SAFETY: we only ever store collection conditions in the collect-condition box\n        transmute::<*mut (), CollectCondition>(\n            GARBAGE_TRUCK.collect_condition.load(Ordering::Relaxed),\n        )\n    };\n    if collect_cond(&CollectInfo { _private: () }) {\n        GARBAGE_TRUCK.collect_all();\n    }\n}\n\n/// Notify that a [`Gc`] was created, and increment the number of total existing `Gc`s.\npub fn notify_created_gc() {\n    GARBAGE_TRUCK.n_gcs_existing.fetch_add(1, Ordering::Relaxed);\n}\n\n/// Mark an allocation as \"dirty,\" implying that it may or may not be inaccessible and need to\n/// be cleaned up.\n///\n/// # Safety\n///\n/// When calling this method, you have to ensure that `allocation`\n/// is [convertible to a reference](core::ptr#pointer-to-reference-conversion).\npub(super) unsafe fn mark_dirty<T>(allocation: NonNull<GcBox<T>>)\nwhere\n    T: Trace + Send + Sync + ?Sized,\n{\n    _ = DUMPSTER.try_with(|dumpster| {\n        if dumpster\n            .contents\n            .borrow_mut()\n            .insert(\n                AllocationId::from(allocation),\n                TrashCan {\n                    ptr: Erased::new(allocation),\n                    dfs_fn: dfs::<T>,\n                },\n            )\n            .is_none()\n        {\n            // SAFETY: the caller must guarantee that `allocation` meets all the\n            // requirements for a reference.\n            unsafe { allocation.as_ref() }\n                .weak\n                .fetch_add(1, Ordering::Acquire);\n        }\n    });\n}\n\n/// Mark an allocation as \"clean,\" implying that it has already been cleaned up and does not\n/// need to be cleaned again.\npub(super) fn mark_clean<T>(allocation: &GcBox<T>)\nwhere\n    T: Trace + Send + Sync + ?Sized,\n{\n    _ = DUMPSTER.try_with(|dumpster| {\n        if dumpster\n            .contents\n            .borrow_mut()\n            .remove(&AllocationId::from(allocation))\n            .is_some()\n        {\n            allocation.weak.fetch_sub(1, Ordering::Release);\n        }\n    });\n}\n\n#[cfg(test)]\n/// Deliver all [`TrashCan`]s from this thread's dumpster into the garbage truck.\n///\n/// This function is available to to support testing, but currently is not part of the public API.\npub(super) fn deliver_dumpster() {\n    _ = DUMPSTER.try_with(|d| d.deliver_to(&GARBAGE_TRUCK));\n}\n\n/// Set the function which determines whether the garbage collector should be run.\n///\n/// `f` will be periodically called by the garbage collector to determine whether it should perform\n/// a full traversal of the heap.\n/// When `f` returns true, a traversal will begin.\n///\n/// # Examples\n///\n/// ```\n/// use dumpster::sync::{set_collect_condition, CollectInfo};\n///\n/// /// This function will make sure a GC traversal never happens unless directly activated.\n/// fn never_collect(_: &CollectInfo) -> bool {\n///     false\n/// }\n///\n/// set_collect_condition(never_collect);\n/// ```\npub fn set_collect_condition(f: CollectCondition) {\n    GARBAGE_TRUCK\n        .collect_condition\n        .store(f as *mut (), Ordering::Relaxed);\n}\n\n/// Get the number of `[Gc]`s dropped since the last collection.\npub fn n_gcs_dropped() -> usize {\n    GARBAGE_TRUCK.n_gcs_dropped.load(Ordering::Relaxed)\n}\n\n/// Get the number of `[Gc]`s currently existing in the entire program.\npub fn n_gcs_existing() -> usize {\n    GARBAGE_TRUCK.n_gcs_existing.load(Ordering::Relaxed)\n}\n\nimpl Dumpster {\n    /// Deliver all [`TrashCan`]s contained by this dumpster to the garbage collect, removing them\n    /// from the local dumpster storage and adding them to the global truck.\n    fn deliver_to(&self, garbage_truck: &GarbageTruck) {\n        let mut guard = garbage_truck.contents.lock();\n        self.n_drops.set(0);\n        self.deliver_to_contents(&mut guard);\n    }\n\n    /// Deliver the entries in this dumpster to `contents`.\n    fn deliver_to_contents(&self, contents: &mut HashMap<AllocationId, TrashCan>) {\n        for (id, can) in self.contents.borrow_mut().drain() {\n            if contents.insert(id, can).is_some() {\n                unsafe {\n                    // SAFETY: an allocation can only be in the dumpster if it still exists and its\n                    // header is valid\n                    id.0.as_ref()\n                }\n                .weak\n                .fetch_sub(1, Ordering::Release);\n            }\n        }\n    }\n\n    /// Determine whether this dumpster is full (and therefore should have its contents delivered to\n    /// the garbage truck).\n    fn is_full(&self) -> bool {\n        self.contents.borrow().len() > 100_000 || self.n_drops.get() > 100_000\n    }\n}\n\nimpl GarbageTruck {\n    /// Construct a new, empty garbage truck.\n    ///\n    /// Since the `GarbageTruck` is meant to be a single global value, this function should only be\n    /// called once in the initialization of `GARBAGE_TRUCK`.\n    #[cfg(not(loom))]\n    const fn new() -> Self {\n        Self {\n            contents: Mutex::new(LazyCell::new(HashMap::new)),\n            collecting_lock: RwLock::new(()),\n            n_gcs_dropped: AtomicUsize::new(0),\n            n_gcs_existing: AtomicUsize::new(0),\n            collect_condition: AtomicPtr::new(default_collect_condition as *mut ()),\n        }\n    }\n\n    /// Construct a new, empty garbage truck.\n    ///\n    /// Since the `GarbageTruck` is meant to be a single global value, this function should only be\n    /// called once in the initialization of `GARBAGE_TRUCK`.\n    #[cfg(loom)]\n    fn new() -> Self {\n        Self {\n            contents: Mutex::new(LazyCell::new(HashMap::new)),\n            collecting_lock: RwLock::new(()),\n            n_gcs_dropped: AtomicUsize::new(0),\n            n_gcs_existing: AtomicUsize::new(0),\n            collect_condition: AtomicPtr::new(default_collect_condition as *mut ()),\n        }\n    }\n\n    /// Search through the set of existing allocations which have been marked inaccessible, and see\n    /// if they are inaccessible.\n    /// If so, drop those allocations.\n    fn collect_all(&self) {\n        let collecting_guard = self.collecting_lock.write();\n        self.n_gcs_dropped.store(0, Ordering::Relaxed);\n\n        let to_collect = take(&mut **self.contents.lock());\n\n        let mut ref_graph = HashMap::with_capacity(to_collect.len());\n\n        CURRENT_TAG.fetch_add(1, Ordering::Release);\n\n        for (_, TrashCan { ptr, dfs_fn }) in to_collect {\n            unsafe {\n                // SAFETY: `ptr` may only be in `to_collect` if it was a valid pointer\n                // and `dfs_fn` must have been created with the intent of referring to\n                // the erased type of `ptr`.\n                dfs_fn(ptr, &mut ref_graph);\n            }\n        }\n\n        let root_ids = ref_graph\n            .iter()\n            .filter_map(|(&k, v)| match v.reachability {\n                Reachability::Reachable => Some(k),\n                Reachability::Unknown { n_unaccounted, .. } => (n_unaccounted > 0\n                    || unsafe {\n                        // SAFETY: we found `k` in the reference graph,\n                        // so it must still be an extant allocation\n                        k.0.as_ref().weak.load(Ordering::Acquire) > 1\n                    })\n                .then_some(k),\n            })\n            .collect::<Vec<_>>();\n        for root_id in root_ids {\n            mark(root_id, &mut ref_graph);\n        }\n\n        CLEANING.with(|c| c.set(true));\n        // set of allocations which must be destroyed because we were the last weak pointer to it\n        let mut weak_destroys = Vec::new();\n        for (id, node) in &ref_graph {\n            let header_ref = unsafe { id.0.as_ref() };\n            match node.reachability {\n                Reachability::Unknown { destroy_fn, .. } => unsafe {\n                    // SAFETY: `destroy_fn` must have been created with `node.ptr` in mind,\n                    // and we have proven that no other references to `node.ptr` exist\n                    destroy_fn(node.ptr, &ref_graph);\n                },\n                Reachability::Reachable => {\n                    if header_ref.weak.fetch_sub(1, Ordering::Release) == 1\n                        && header_ref.strong.load(Ordering::Acquire) == 0\n                    {\n                        // we are the last reference to the allocation.\n                        // mark to be cleaned up later\n                        // no real synchronization loss to storing the guard because we had the last\n                        // reference anyway\n                        weak_destroys.push((node.weak_drop_fn, node.ptr));\n                    }\n                }\n            }\n        }\n        CLEANING.with(|c| c.set(false));\n        for (drop_fn, ptr) in weak_destroys {\n            unsafe {\n                // SAFETY: we have proven (via header_ref.weak = 1) that the cleaning\n                // process had the last reference to the allocation.\n                // `drop_fn` must have been created with the true value of `ptr` in mind.\n                drop_fn(ptr);\n            };\n        }\n        drop(collecting_guard);\n    }\n}\n\n/// Build out a part of the reference graph, making note of all allocations which are reachable from\n/// the one described in `ptr`.\n///\n/// # Inputs\n///\n/// - `ptr`: A pointer to the allocation that we should start constructing from.\n/// - `ref_graph`: A lookup from allocation IDs to node information about that allocation.\n///\n/// # Effects\n///\n/// `ref_graph` will be expanded to include all allocations reachable from `ptr`.\n///\n/// # Safety\n///\n/// `ptr` must have been created as a pointer to a `GcBox<T>`.\nunsafe fn dfs<T: Trace + Send + Sync + ?Sized>(\n    ptr: Erased,\n    ref_graph: &mut HashMap<AllocationId, AllocationInfo>,\n) {\n    let box_ref = unsafe {\n        // SAFETY: We require `ptr` to be a an erased pointer to `GcBox<T>`.\n        ptr.specify::<GcBox<T>>().as_ref()\n    };\n    let starting_id = AllocationId::from(box_ref);\n    let Entry::Vacant(v) = ref_graph.entry(starting_id) else {\n        // the weak count was incremented by another DFS operation elsewhere.\n        // Decrement it to have only one from us.\n        box_ref.weak.fetch_sub(1, Ordering::Release);\n        return;\n    };\n    let strong_count = box_ref.strong.load(Ordering::Acquire);\n    v.insert(AllocationInfo {\n        ptr,\n        weak_drop_fn: drop_weak_zero::<T>,\n        reachability: Reachability::Unknown {\n            children: Vec::new(),\n            n_unaccounted: strong_count,\n            destroy_fn: destroy_erased::<T>,\n        },\n    });\n\n    if box_ref\n        .value\n        .accept(&mut Dfs {\n            ref_graph,\n            current_id: starting_id,\n        })\n        .is_err()\n        || box_ref.generation.load(Ordering::Acquire) >= CURRENT_TAG.load(Ordering::Relaxed)\n    {\n        // box_ref.value was accessed while we worked\n        // mark this allocation as reachable\n        mark(starting_id, ref_graph);\n    }\n}\n\n#[derive(Debug)]\n/// The visitor structure used for building the found-reference-graph of allocations.\npub(super) struct Dfs<'a> {\n    /// The reference graph.\n    /// Each allocation is assigned a node.\n    ref_graph: &'a mut HashMap<AllocationId, AllocationInfo>,\n    /// The allocation ID currently being visited.\n    /// Used for knowing which node is the parent of another.\n    current_id: AllocationId,\n}\n\nimpl Visitor for Dfs<'_> {\n    fn visit_sync<T>(&mut self, gc: &Gc<T>)\n    where\n        T: Trace + Send + Sync + ?Sized,\n    {\n        if Gc::is_dead(gc) {\n            return;\n        }\n        // must not use deref operators since we don't want to update the generation\n        let ptr = unsafe {\n            // SAFETY: This is the same as the deref implementation, but avoids\n            // incrementing the generation count.\n            gc.ptr.get().unwrap()\n        };\n        let box_ref = unsafe {\n            // SAFETY: same as above.\n            ptr.as_ref()\n        };\n        let current_tag = CURRENT_TAG.load(Ordering::Relaxed);\n        if gc.tag.swap(current_tag, Ordering::Relaxed) >= current_tag\n            || box_ref.generation.load(Ordering::Acquire) >= current_tag\n        {\n            // This pointer was already tagged by this sweep, so it must have been moved by\n            mark(self.current_id, self.ref_graph);\n            return;\n        }\n\n        let mut new_id = AllocationId::from(box_ref);\n\n        let Reachability::Unknown {\n            ref mut children, ..\n        } = self\n            .ref_graph\n            .get_mut(&self.current_id)\n            .unwrap()\n            .reachability\n        else {\n            // this node has been proven reachable by something higher up. No need to keep building\n            // its ref graph\n            return;\n        };\n        children.push(new_id);\n\n        match self.ref_graph.entry(new_id) {\n            Entry::Occupied(mut o) => match o.get_mut().reachability {\n                Reachability::Unknown {\n                    ref mut n_unaccounted,\n                    ..\n                } => {\n                    *n_unaccounted -= 1;\n                }\n                Reachability::Reachable => (),\n            },\n            Entry::Vacant(v) => {\n                // This allocation has never been visited by the reference graph builder\n                let strong_count = box_ref.strong.load(Ordering::Acquire);\n                box_ref.weak.fetch_add(1, Ordering::Acquire);\n                v.insert(AllocationInfo {\n                    ptr: Erased::new(ptr),\n                    weak_drop_fn: drop_weak_zero::<T>,\n                    reachability: Reachability::Unknown {\n                        children: Vec::new(),\n                        n_unaccounted: strong_count - 1,\n                        destroy_fn: destroy_erased::<T>,\n                    },\n                });\n\n                // Save the previously visited ID, then carry on to the next one\n                swap(&mut new_id, &mut self.current_id);\n\n                if box_ref.value.accept(self).is_err()\n                    || box_ref.generation.load(Ordering::Acquire) >= current_tag\n                {\n                    // On failure, this means `**gc` is accessible, and should be marked\n                    // as such\n                    mark(self.current_id, self.ref_graph);\n                }\n\n                // Restore current_id and carry on\n                swap(&mut new_id, &mut self.current_id);\n            }\n        }\n    }\n\n    fn visit_unsync<T>(&mut self, _: &crate::unsync::Gc<T>)\n    where\n        T: Trace + ?Sized,\n    {\n        unreachable!(\"sync Gc cannot own an unsync Gc\");\n    }\n}\n\n/// Traverse the reference graph, marking `root` and any allocations reachable from `root` as\n/// reachable.\nfn mark(root: AllocationId, graph: &mut HashMap<AllocationId, AllocationInfo>) {\n    let node = graph.get_mut(&root).unwrap();\n    if let Reachability::Unknown { children, .. } =\n        replace(&mut node.reachability, Reachability::Reachable)\n    {\n        for child in children {\n            mark(child, graph);\n        }\n    }\n}\n\n/// A visitor for decrementing the reference count of pointees.\npub(super) struct PrepareForDestruction<'a> {\n    /// The reference graph.\n    /// Must have been populated with reachability already.\n    graph: &'a HashMap<AllocationId, AllocationInfo>,\n}\n\nimpl Visitor for PrepareForDestruction<'_> {\n    fn visit_sync<T>(&mut self, gc: &crate::sync::Gc<T>)\n    where\n        T: Trace + Send + Sync + ?Sized,\n    {\n        if Gc::is_dead(gc) {\n            return;\n        }\n        let id = AllocationId::from(unsafe {\n            // SAFETY: This is the same as dereferencing the GC.\n            gc.ptr.get().unwrap()\n        });\n        if matches!(self.graph[&id].reachability, Reachability::Reachable) {\n            unsafe {\n                // SAFETY: This is the same as dereferencing the GC.\n                id.0.as_ref().strong.fetch_sub(1, Ordering::Release);\n            }\n        }\n        unsafe {\n            // SAFETY: we have a unique reference to `gc` as we are destroying the structure.\n            gc.kill();\n        }\n    }\n\n    fn visit_unsync<T>(&mut self, _: &crate::unsync::Gc<T>)\n    where\n        T: Trace + ?Sized,\n    {\n        unreachable!(\"no unsync members of sync Gc possible!\");\n    }\n}\n\n/// Destroy an allocation, obliterating its GCs, dropping it, and deallocating it.\n///\n/// # Safety\n///\n/// `ptr` must have been created from a pointer to a `GcBox<T>`.\nunsafe fn destroy_erased<T: Trace + Send + Sync + ?Sized>(\n    ptr: Erased,\n    graph: &HashMap<AllocationId, AllocationInfo>,\n) {\n    let specified = ptr.specify::<GcBox<T>>().as_mut();\n    specified\n        .value\n        .accept(&mut PrepareForDestruction { graph })\n        .expect(\"allocation assumed to be unreachable but somehow was accessed\");\n    let layout = Layout::for_value(specified);\n    drop_in_place(specified);\n    dealloc(std::ptr::from_mut::<GcBox<T>>(specified).cast(), layout);\n}\n\n/// Function for handling dropping an allocation when its weak and strong reference count reach\n/// zero.\n///\n/// # Safety\n///\n/// `ptr` must have been created as a pointer to a `GcBox<T>`.\nunsafe fn drop_weak_zero<T: Trace + Send + Sync + ?Sized>(ptr: Erased) {\n    let mut specified = ptr.specify::<GcBox<T>>();\n    assert_eq!(specified.as_ref().weak.load(Ordering::Relaxed), 0);\n    assert_eq!(specified.as_ref().strong.load(Ordering::Relaxed), 0);\n\n    let layout = Layout::for_value(specified.as_ref());\n    drop_in_place(specified.as_mut());\n    dealloc(specified.as_ptr().cast(), layout);\n}\n\nunsafe impl Send for AllocationId {}\nunsafe impl Sync for AllocationId {}\n\nimpl<T> From<&GcBox<T>> for AllocationId\nwhere\n    T: Trace + Send + Sync + ?Sized,\n{\n    fn from(value: &GcBox<T>) -> Self {\n        AllocationId(NonNull::from(value).cast())\n    }\n}\n\nimpl<T> From<NonNull<GcBox<T>>> for AllocationId\nwhere\n    T: Trace + Send + Sync + ?Sized,\n{\n    fn from(value: NonNull<GcBox<T>>) -> Self {\n        AllocationId(value.cast())\n    }\n}\n\n#[cfg(not(loom))] // cannot access lazy static in drop\nimpl Drop for Dumpster {\n    fn drop(&mut self) {\n        self.deliver_to(&GARBAGE_TRUCK);\n        // collect_all();\n    }\n}\n"
  },
  {
    "path": "dumpster/src/sync/loom_ext.rs",
    "content": "/*\n    dumpster, a cycle-tracking garbage collector for Rust.    Copyright (C) 2023 Clayton Ramsey.\n\n    This Source Code Form is subject to the terms of the Mozilla Public\n    License, v. 2.0. If a copy of the MPL was not distributed with this\n    file, You can obtain one at http://mozilla.org/MPL/2.0/.\n*/\n\n//! Tests for running under loom.\n\n#![cfg_attr(not(test), allow(dead_code))]\n\nuse std::{\n    mem::MaybeUninit,\n    ops::Deref,\n    sync::{PoisonError, TryLockError},\n};\n\nuse loom::{\n    cell::UnsafeCell,\n    sync::{\n        Mutex as MutexImpl, MutexGuard, RwLock as RwLockImpl, RwLockReadGuard, RwLockWriteGuard,\n    },\n};\n\nuse crate::{TraceWith, Visitor};\n\n/// Simple wrapper mutex type.\npub struct Mutex<T: ?Sized>(MutexImpl<T>);\n\nunsafe impl<V: Visitor, T: TraceWith<V> + ?Sized> TraceWith<V> for Mutex<T> {\n    fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n        self.0\n            .try_lock()\n            .map_err(|e| match e {\n                TryLockError::Poisoned(_) => panic!(),\n                TryLockError::WouldBlock => (),\n            })?\n            .deref()\n            .accept(visitor)\n    }\n}\n\nimpl<T> Mutex<T> {\n    /// Construct a new mutex.\n    pub fn new(value: T) -> Self {\n        Self(MutexImpl::new(value))\n    }\n\n    /// Lock the mutex.\n    pub fn lock(&self) -> MutexGuard<'_, T> {\n        self.0.lock().unwrap_or_else(PoisonError::into_inner)\n    }\n\n    #[expect(dead_code)]\n    /// Is the mutex locked?\n    pub fn is_locked(&self) -> bool {\n        !matches!(self.0.try_lock(), Err(TryLockError::WouldBlock))\n    }\n}\n\n/// A read-write lock\npub struct RwLock<T>(RwLockImpl<T>);\n\nimpl<T> RwLock<T> {\n    /// Construct a rwlock.\n    pub fn new(value: T) -> Self {\n        Self(RwLockImpl::new(value))\n    }\n\n    /// Get a read guard.\n    pub fn read(&self) -> RwLockReadGuard<'_, T> {\n        self.0.read().unwrap_or_else(PoisonError::into_inner)\n    }\n\n    /// Get a write guard.\n    pub fn write(&self) -> RwLockWriteGuard<'_, T> {\n        self.0.write().unwrap_or_else(PoisonError::into_inner)\n    }\n}\n\n/// A once-object.\nstruct Once {\n    /// Completed?\n    is_completed: Mutex<bool>,\n}\n\nimpl Once {\n    /// Construct a once.\n    fn new() -> Self {\n        Self {\n            is_completed: Mutex::new(false),\n        }\n    }\n\n    /// Call a function once.\n    fn call_once(&self, f: impl FnOnce()) {\n        let mut is_completed = self.is_completed.lock();\n\n        if *is_completed {\n            return;\n        }\n\n        f();\n        *is_completed = true;\n    }\n\n    /// Determine if we are completed.\n    fn is_completed(&self) -> bool {\n        *self.is_completed.lock()\n    }\n}\n\n/// A once-lock.\npub struct OnceLock<T> {\n    /// A thing that does it once.\n    once: Once,\n    /// The data.\n    value: UnsafeCell<MaybeUninit<T>>,\n}\n\nunsafe impl<T: Sync + Send> Sync for OnceLock<T> {}\nunsafe impl<T: Send> Send for OnceLock<T> {}\n\nunsafe impl<V: Visitor, T: TraceWith<V>> TraceWith<V> for OnceLock<T> {\n    fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n        self.with(|value| value.accept(visitor)).unwrap_or(Ok(()))\n    }\n}\n\nimpl<T> OnceLock<T> {\n    /// Construct a once-lock.\n    pub fn new() -> Self {\n        Self {\n            once: Once::new(),\n            value: UnsafeCell::new(MaybeUninit::uninit()),\n        }\n    }\n\n    /// Call a function uncheckedly.\n    unsafe fn with_unchecked<R>(&self, f: impl FnOnce(&T) -> R) -> R {\n        self.value\n            .with(|ptr| f(unsafe { (*ptr).assume_init_ref() }))\n    }\n\n    /// Apply a function.\n    pub fn with<R>(&self, f: impl FnOnce(&T) -> R) -> Option<R> {\n        if self.once.is_completed() {\n            Some(unsafe { self.with_unchecked(f) })\n        } else {\n            None\n        }\n    }\n\n    /// Apply or initialize.\n    pub fn with_or_init<R>(&self, init: impl FnOnce() -> T, f: impl FnOnce(&T) -> R) -> R {\n        self.once.call_once(|| {\n            self.value.with_mut(|ptr| unsafe {\n                (*ptr).write(init());\n            });\n        });\n\n        unsafe { self.with_unchecked(f) }\n    }\n\n    /// Set the value.\n    pub fn set(&self, value: T) {\n        self.with_or_init(|| value, |_| {});\n    }\n}\n\n#[test]\nfn test_once() {\n    use loom::sync::{\n        atomic::{AtomicUsize, Ordering},\n        Arc,\n    };\n\n    loom::model(|| {\n        let once = Arc::new(Once::new());\n        let counter = Arc::new(AtomicUsize::new(0));\n\n        let mut join_handles = vec![];\n\n        for _ in 0..2 {\n            let once = once.clone();\n            let counter = counter.clone();\n\n            join_handles.push(loom::thread::spawn(move || {\n                once.call_once(|| {\n                    counter.fetch_add(1, Ordering::Relaxed);\n                });\n            }));\n        }\n\n        for join_handle in join_handles {\n            join_handle.join().unwrap();\n        }\n\n        assert_eq!(counter.load(Ordering::Relaxed), 1);\n    });\n}\n\n#[test]\nfn test_once_lock() {\n    use loom::sync::{\n        atomic::{AtomicUsize, Ordering},\n        Arc,\n    };\n\n    loom::model(|| {\n        let once_lock = Arc::new(OnceLock::<String>::new());\n        let counter = Arc::new(AtomicUsize::new(0));\n\n        let mut join_handles = vec![];\n\n        for _ in 0..2 {\n            let once_lock = once_lock.clone();\n            let counter = counter.clone();\n\n            join_handles.push(loom::thread::spawn({\n                move || {\n                    once_lock.with_or_init(\n                        || {\n                            counter.fetch_add(1, Ordering::Relaxed);\n                            String::from(\"test\")\n                        },\n                        |value| {\n                            assert_eq!(value, \"test\");\n                        },\n                    );\n                }\n            }));\n        }\n\n        for join_handle in join_handles {\n            join_handle.join().unwrap();\n        }\n\n        assert_eq!(counter.load(Ordering::Relaxed), 1);\n    });\n}\n"
  },
  {
    "path": "dumpster/src/sync/loom_tests.rs",
    "content": "/*\n    dumpster, a cycle-tracking garbage collector for Rust.    Copyright (C) 2023 Clayton Ramsey.\n\n    This Source Code Form is subject to the terms of the Mozilla Public\n    License, v. 2.0. If a copy of the MPL was not distributed with this\n    file, You can obtain one at http://mozilla.org/MPL/2.0/.\n*/\n\nuse loom::{\n    lazy_static,\n    sync::atomic::{AtomicUsize, Ordering},\n};\n\nuse loom_ext::{Mutex, OnceLock};\n\nuse crate::Visitor;\n\nuse super::*;\n\nstruct DropCount<'a>(&'a AtomicUsize);\n\nimpl Drop for DropCount<'_> {\n    fn drop(&mut self) {\n        self.0.fetch_add(1, Ordering::Release);\n    }\n}\n\nunsafe impl<V: Visitor> TraceWith<V> for DropCount<'_> {\n    fn accept(&self, _: &mut V) -> Result<(), ()> {\n        Ok(())\n    }\n}\n\nstruct MultiRef {\n    refs: Mutex<Vec<Gc<MultiRef>>>,\n    #[expect(unused)]\n    count: DropCount<'static>,\n}\n\nunsafe impl<V: Visitor> TraceWith<V> for MultiRef {\n    fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n        self.refs.accept(visitor)\n    }\n}\n\n#[test]\nfn loom_single_alloc() {\n    lazy_static! {\n        static ref DROP_COUNT: AtomicUsize = AtomicUsize::new(0);\n    }\n\n    loom::model(|| {\n        let gc1 = Gc::new(DropCount(&DROP_COUNT));\n\n        collect();\n        assert_eq!(DROP_COUNT.load(Ordering::Acquire), 0);\n        drop(gc1);\n        collect();\n        assert_eq!(DROP_COUNT.load(Ordering::Acquire), 1);\n    });\n}\n\n#[test]\nfn loom_self_referential() {\n    struct Foo(Mutex<Option<Gc<Foo>>>);\n\n    lazy_static! {\n        static ref DROP_COUNT: AtomicUsize = AtomicUsize::new(0);\n    }\n\n    unsafe impl<V: Visitor> TraceWith<V> for Foo {\n        fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n            self.0.accept(visitor)\n        }\n    }\n\n    impl Drop for Foo {\n        fn drop(&mut self) {\n            // println!(\"begin increment of the drop count!\");\n            DROP_COUNT.fetch_add(1, Ordering::Release);\n        }\n    }\n\n    loom::model(|| {\n        let gc1 = Gc::new(Foo(Mutex::new(None)));\n        *gc1.0.lock() = Some(Gc::clone(&gc1));\n\n        assert_eq!(DROP_COUNT.load(Ordering::Acquire), 0);\n        drop(gc1);\n        collect();\n        assert_eq!(DROP_COUNT.load(Ordering::Acquire), 1);\n    });\n}\n\n#[test]\nfn loom_two_cycle() {\n    lazy_static! {\n        static ref DROP_0: AtomicUsize = AtomicUsize::new(0);\n        static ref DROP_1: AtomicUsize = AtomicUsize::new(0);\n    }\n\n    loom::model(|| {\n        let gc0 = Gc::new(MultiRef {\n            refs: Mutex::new(Vec::new()),\n            count: DropCount(&DROP_0),\n        });\n        let gc1 = Gc::new(MultiRef {\n            refs: Mutex::new(vec![Gc::clone(&gc0)]),\n            count: DropCount(&DROP_1),\n        });\n        gc0.refs.lock().push(Gc::clone(&gc1));\n\n        collect();\n        assert_eq!(DROP_0.load(Ordering::Acquire), 0);\n        assert_eq!(DROP_0.load(Ordering::Acquire), 0);\n        drop(gc0);\n        collect();\n        assert_eq!(DROP_0.load(Ordering::Acquire), 0);\n        assert_eq!(DROP_0.load(Ordering::Acquire), 0);\n        drop(gc1);\n        collect();\n        assert_eq!(DROP_0.load(Ordering::Acquire), 1);\n        assert_eq!(DROP_0.load(Ordering::Acquire), 1);\n    });\n}\n\n#[test]\n#[ignore = \"not going to fix this for now\"]\n/// Test that creating a `Gc` during a `Drop` implementation will still not leak the `Gc`.\nfn loom_sync_leak_by_creation_in_drop() {\n    lazy_static! {\n        static ref BAR_DROP_COUNT: [AtomicUsize; 2] = [AtomicUsize::new(0), AtomicUsize::new(0)];\n    }\n\n    struct Foo(OnceLock<Gc<Self>>, usize);\n    struct Bar(OnceLock<Gc<Self>>, usize);\n\n    unsafe impl<V: Visitor> TraceWith<V> for Foo {\n        fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n            self.0.accept(visitor)\n        }\n    }\n\n    unsafe impl<V: Visitor> TraceWith<V> for Bar {\n        fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n            self.0.accept(visitor)\n        }\n    }\n\n    impl Drop for Foo {\n        fn drop(&mut self) {\n            println!(\"calling drop for foo\");\n            let gcbar = Gc::new(Bar(OnceLock::new(), self.1));\n            gcbar.0.set(gcbar.clone());\n            drop(gcbar);\n\n            // MUST be included for the test to succeed (in case Foo is collected on separate\n            // thread)\n            crate::sync::collect::deliver_dumpster();\n            println!(\"drop for foo done\");\n        }\n    }\n\n    impl Drop for Bar {\n        fn drop(&mut self) {\n            println!(\"drop Bar\");\n            BAR_DROP_COUNT[self.1].fetch_add(1, Ordering::Relaxed);\n        }\n    }\n\n    loom::model(|| {\n        println!(\"=========== NEW MODEL ITERATION ===============\");\n\n        let mut join_handles = vec![];\n\n        for i in 0..2 {\n            join_handles.push(loom::thread::spawn(move || {\n                let foo = Gc::new(Foo(OnceLock::new(), i));\n                foo.0.set(foo.clone());\n                drop(foo);\n\n                println!(\"===== collect from {i} number 1\");\n                collect(); // causes Bar to be created and then leaked\n                println!(\"===== collect from {i} number 2\");\n                collect(); // cleans up Bar (eventually)\n\n                assert_eq!(\n                    BAR_DROP_COUNT[i].load(Ordering::Relaxed),\n                    1,\n                    \"failed to collect on thread 0\"\n                );\n                collect::DUMPSTER.with(|d| println!(\"{:?}\", d.contents));\n                assert!(collect::DUMPSTER.with(|d| d.contents.borrow().is_empty()));\n            }));\n        }\n\n        for join_handle in join_handles {\n            join_handle.join().unwrap();\n        }\n    });\n}\n"
  },
  {
    "path": "dumpster/src/sync/mod.rs",
    "content": "/*\n    dumpster, a cycle-tracking garbage collector for Rust.    Copyright (C) 2023 Clayton Ramsey.\n\n    This Source Code Form is subject to the terms of the Mozilla Public\n    License, v. 2.0. If a copy of the MPL was not distributed with this\n    file, You can obtain one at http://mozilla.org/MPL/2.0/.\n*/\n\n//! Thread-safe shared garbage collection.\n//!\n//! Most users of this module will be interested in using [`Gc`] directly out of the box - this will\n//! just work.\n//! Those with more particular needs (such as benchmarking) should turn toward\n//! [`set_collect_condition`] in order to tune exactly when the garbage collector does cleanups.\n//!\n//! # Examples\n//!\n//! ```\n//! use dumpster::sync::Gc;\n//!\n//! let my_gc = Gc::new(100);\n//! let other_gc = my_gc.clone();\n//!\n//! drop(my_gc);\n//! drop(other_gc);\n//!\n//! // contents of the Gc are automatically freed\n//! ```\n\nmod cell;\nmod collect;\n#[cfg(loom)]\nmod loom_ext;\n#[cfg(all(loom, test))]\nmod loom_tests;\n#[cfg(all(test, not(loom)))]\nmod tests;\n\n#[cfg(loom)]\nuse loom::{\n    lazy_static,\n    sync::atomic::{fence, AtomicUsize, Ordering},\n};\nuse std::fmt::Display;\n#[cfg(not(loom))]\nuse std::sync::atomic::{fence, AtomicUsize, Ordering};\nuse std::{\n    alloc::{dealloc, handle_alloc_error, Layout},\n    any::TypeId,\n    borrow::{Borrow, Cow},\n    fmt::Debug,\n    mem::{self, ManuallyDrop, MaybeUninit},\n    num::NonZeroUsize,\n    ops::Deref,\n    ptr::{self, addr_of, addr_of_mut, drop_in_place, NonNull},\n    slice,\n};\n\nuse crate::{\n    contains_gcs, panic_deref_of_collected_object,\n    ptr::Nullable,\n    sync::{\n        cell::UCell,\n        collect::{Dfs, PrepareForDestruction},\n    },\n    Trace, TraceWith, Visitor,\n};\n\nuse self::collect::{\n    collect_all_await, mark_clean, mark_dirty, n_gcs_dropped, n_gcs_existing, notify_created_gc,\n    notify_dropped_gc,\n};\n\n/// A soft limit on the amount of references that may be made to a `Gc`.\n///\n/// Going above this limit will abort your program (although not\n/// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references.\n///\n/// See comment in `Gc::clone`.\nconst MAX_STRONG_COUNT: usize = (isize::MAX) as usize;\n\n/// Allows tracing with all sync visitors.\n#[expect(private_bounds)]\npub(crate) trait TraceSync:\n    for<'a> TraceWith<Dfs<'a>> + for<'a> TraceWith<PrepareForDestruction<'a>> + TraceWith<Rehydrate>\n{\n}\n\nimpl<T> TraceSync for T where\n    T: ?Sized\n        + for<'a> TraceWith<Dfs<'a>>\n        + for<'a> TraceWith<PrepareForDestruction<'a>>\n        + TraceWith<Rehydrate>\n{\n}\n\n/// A thread-safe garbage-collected pointer.\n///\n/// This pointer can be duplicated and then shared across threads.\n/// Garbage collection is performed concurrently.\n///\n/// # Examples\n///\n/// ```\n/// use dumpster::sync::Gc;\n/// use std::sync::atomic::{AtomicUsize, Ordering};\n///\n/// let shared = Gc::new(AtomicUsize::new(0));\n///\n/// std::thread::scope(|s| {\n///     s.spawn(|| {\n///         let other_gc = shared.clone();\n///         other_gc.store(1, Ordering::Relaxed);\n///     });\n///\n///     shared.store(2, Ordering::Relaxed);\n/// });\n///\n/// println!(\"{}\", shared.load(Ordering::Relaxed));\n/// ```\n///\n/// # Interaction with `Drop`\n///\n/// While collecting cycles, it's possible for a `Gc` to exist that points to some deallocated\n/// object.\n/// To prevent undefined behavior, these `Gc`s are marked as dead during collection and rendered\n/// inaccessible.\n/// Dereferencing or cloning a `Gc` during the `Drop` implementation of a `Trace` type could\n/// result in the program panicking to keep the program from accessing memory after freeing it.\n/// If you're accessing a `Gc` during a `Drop` implementation, make sure to use the fallible\n/// operations [`Gc::try_deref`] and [`Gc::try_clone`].\npub struct Gc<T: Trace + Send + Sync + ?Sized + 'static> {\n    /// The pointer to the allocation.\n    ptr: UCell<Nullable<GcBox<T>>>,\n    /// The tag information of this pointer, used for mutation detection when marking.\n    tag: AtomicUsize,\n}\n\n#[cfg(not(loom))]\n/// The tag of the current sweep operation.\n/// All new allocations are minted with the current tag.\nstatic CURRENT_TAG: AtomicUsize = AtomicUsize::new(0);\n\n#[cfg(loom)]\nlazy_static! {\n    static ref CURRENT_TAG: AtomicUsize = AtomicUsize::new(0);\n}\n\n#[repr(C)]\n// This is only public to make the `sync_coerce_gc` macro work.\n#[doc(hidden)]\n/// The backing allocation for a [`Gc`].\npub struct GcBox<T>\nwhere\n    T: Trace + Send + Sync + ?Sized,\n{\n    /// The \"strong\" count, which is the number of extant `Gc`s to this allocation.\n    /// If the strong count is zero, a value contained in the allocation may be dropped, but the\n    /// allocation itself must still be valid.\n    strong: AtomicUsize,\n    /// The \"weak\" count, which is the number of references to this allocation stored in to-collect\n    /// buffers by the collection algorithm.\n    /// If the weak count is zero, the allocation may be destroyed.\n    weak: AtomicUsize,\n    /// The current generation number of the allocation.\n    /// The generation number is assigned to the global generation every time a strong reference is\n    /// created or destroyed or a `Gc` pointing to this allocation is dereferenced.\n    generation: AtomicUsize,\n    /// The actual data stored in the allocation.\n    value: T,\n}\n\nunsafe impl<T> Send for Gc<T> where T: Trace + Send + Sync + ?Sized {}\nunsafe impl<T> Sync for Gc<T> where T: Trace + Send + Sync + ?Sized {}\n\n/// Begin a collection operation of the allocations on the heap.\n///\n/// Due to concurrency issues, this might not collect every single unreachable allocation that\n/// currently exists, but often calling `collect()` will get allocations made by this thread.\n///\n/// # Examples\n///\n/// ```\n/// use dumpster::sync::{collect, Gc};\n///\n/// let gc = Gc::new(vec![1, 2, 3]);\n/// drop(gc);\n///\n/// collect(); // the vector originally in `gc` _might_ be dropped now, but could be dropped later\n/// ```\npub fn collect() {\n    collect_all_await();\n}\n\n#[derive(Debug)]\n/// Information passed to a [`CollectCondition`] used to determine whether the garbage collector\n/// should start collecting.\n///\n/// A `CollectInfo` is exclusively created by being passed as an argument to the collection\n/// condition.\n/// To set a custom collection condition, refer to [`set_collect_condition`].\n///\n/// # Examples\n///\n/// ```\n/// use dumpster::sync::{set_collect_condition, CollectInfo};\n///\n/// fn my_collect_condition(info: &CollectInfo) -> bool {\n///     (info.n_gcs_dropped_since_last_collect() + info.n_gcs_existing()) % 2 == 0\n/// }\n///\n/// set_collect_condition(my_collect_condition);\n/// ```\npub struct CollectInfo {\n    /// Dummy value so this is a private structure.\n    _private: (),\n}\n\n/// A function which determines whether the garbage collector should start collecting.\n/// This type primarily exists so that it can be used with [`set_collect_condition`].\n///\n/// # Examples\n///\n/// ```rust\n/// use dumpster::sync::{set_collect_condition, CollectInfo};\n///\n/// fn always_collect(_: &CollectInfo) -> bool {\n///     true\n/// }\n///\n/// set_collect_condition(always_collect);\n/// ```\npub type CollectCondition = fn(&CollectInfo) -> bool;\n\n#[must_use]\n/// The default collection condition used by the garbage collector.\n///\n/// There are no guarantees about what this function returns, other than that it will return `true`\n/// with sufficient frequency to ensure that all `Gc` operations are amortized _O(1)_ in runtime.\n///\n/// This function isn't really meant to be called by users, but rather it's supposed to be handed\n/// off to [`set_collect_condition`] to return to the default operating mode of the library.\n///\n/// This collection condition applies globally, i.e. to every thread.\n///\n/// # Examples\n///\n/// ```rust\n/// use dumpster::sync::{default_collect_condition, set_collect_condition, CollectInfo};\n///\n/// fn other_collect_condition(info: &CollectInfo) -> bool {\n///     info.n_gcs_existing() >= 25 || default_collect_condition(info)\n/// }\n///\n/// // Use my custom collection condition.\n/// set_collect_condition(other_collect_condition);\n///\n/// // I'm sick of the custom collection condition.\n/// // Return to the original.\n/// set_collect_condition(default_collect_condition);\n/// ```\npub fn default_collect_condition(info: &CollectInfo) -> bool {\n    info.n_gcs_dropped_since_last_collect() > info.n_gcs_existing()\n}\n\npub use collect::set_collect_condition;\n\nimpl<T> Gc<T>\nwhere\n    T: Trace + Send + Sync + ?Sized,\n{\n    /// Construct a new garbage-collected value.\n    ///\n    /// # Examples\n    ///\n    /// ```\n    /// use dumpster::sync::Gc;\n    ///\n    /// let _ = Gc::new(0);\n    /// ```\n    pub fn new(value: T) -> Gc<T>\n    where\n        T: Sized,\n    {\n        notify_created_gc();\n        Gc {\n            ptr: UCell::new(Nullable::new(NonNull::from(Box::leak(Box::new(GcBox {\n                strong: AtomicUsize::new(1),\n                weak: AtomicUsize::new(0),\n                generation: AtomicUsize::new(CURRENT_TAG.load(Ordering::Acquire)),\n                value,\n            }))))),\n            tag: AtomicUsize::new(0),\n        }\n    }\n\n    /// Construct a self-referencing `Gc`.\n    ///\n    /// `new_cyclic` first allocates memory for `T`, then constructs a dead `Gc`.\n    ///  The dead `Gc` is then passed to `data_fn` to construct a value of `T`, which\n    /// is stored in the allocation. Finally, `new_cyclic` will update the dead self-referential\n    /// `Gc`s and rehydrate them to produce the final value.\n    ///\n    /// # Panics\n    ///\n    /// If `data_fn` panics, the panic is propagated to the caller.\n    /// The allocation is cleaned up normally.\n    ///\n    /// Additionally, if, when attempting to rehydrate the `Gc` members of `F`, the visitor fails to\n    /// reach a `Gc`, this function will panic and reserve the allocation to be cleaned up\n    /// later.\n    ///\n    /// # Notes on safety\n    ///\n    /// Incorrect implementations of `data_fn` may have unusual or strange results.\n    /// Although `dumpster` guarantees that it will be safe, and will do its best to ensure correct\n    /// results, it is generally unwise to allow dead `Gc`s to exist for long.\n    /// If you implement `data_fn` wrong, this may cause panics later on inside of the collection\n    /// process.\n    ///\n    /// # Examples\n    ///\n    /// ```\n    /// use dumpster::{sync::Gc, Trace};\n    ///\n    /// #[derive(Trace)]\n    /// struct Cycle {\n    ///     this: Gc<Self>,\n    /// }\n    ///\n    /// let gc = Gc::new_cyclic(|this| Cycle { this });\n    /// assert!(Gc::ptr_eq(&gc, &gc.this));\n    /// ```\n    pub fn new_cyclic<F: FnOnce(Self) -> T>(data_fn: F) -> Self\n    where\n        T: Sized,\n    {\n        /// A struct containing an uninitialized value of `T`.\n        /// May only be used inside `new_cyclic`.\n        #[repr(transparent)]\n        struct Uninitialized<T>(MaybeUninit<T>);\n\n        unsafe impl<V: Visitor, T> TraceWith<V> for Uninitialized<T> {\n            fn accept(&self, _: &mut V) -> Result<(), ()> {\n                Ok(())\n            }\n        }\n\n        /// Data structure for cleaning up the allocation in case we panic along the way.\n        struct CleanUp<T: Trace + Send + Sync + 'static> {\n            /// Is `true` if the [`GcBox::value`] is initialized.\n            initialized: bool,\n            /// Pointer to the `GcBox` with a maybe uninitialized value.\n            ptr: NonNull<GcBox<T>>,\n        }\n\n        impl<T: Trace + Send + Sync + 'static> Drop for CleanUp<T> {\n            fn drop(&mut self) {\n                if self.initialized {\n                    // push this `Gc` into the destruction queue\n                    unsafe { mark_dirty(self.ptr) };\n                } else {\n                    // deallocate because this `Gc` is not initialized\n                    unsafe {\n                        dealloc(\n                            self.ptr.as_ptr().cast::<u8>(),\n                            Layout::for_value(self.ptr.as_ref()),\n                        );\n                    }\n                }\n            }\n        }\n\n        // make an uninitialized allocation\n        notify_created_gc();\n        let mut gcbox = NonNull::from(Box::leak(Box::new(GcBox {\n            strong: AtomicUsize::new(1),\n            weak: AtomicUsize::new(0),\n            generation: AtomicUsize::new(CURRENT_TAG.load(Ordering::Acquire)),\n            value: Uninitialized(MaybeUninit::<T>::uninit()),\n        })));\n        let mut cleanup = CleanUp {\n            ptr: gcbox,\n            initialized: false,\n        };\n\n        // nilgc is a dead Gc\n        let nilgc = Gc {\n            tag: AtomicUsize::new(0),\n            ptr: UCell::new(Nullable::new(gcbox.cast::<GcBox<T>>()).as_null()),\n        };\n        assert!(Gc::is_dead(&nilgc));\n        unsafe {\n            // SAFETY: `gcbox` is a valid pointer to an uninitialized datum that we have allocated.\n            gcbox.as_mut().value = Uninitialized(MaybeUninit::new(data_fn(nilgc)));\n        }\n        cleanup.initialized = true;\n\n        let gcbox = gcbox.cast::<GcBox<T>>();\n        let res = unsafe {\n            // SAFETY: the above unsafe block correctly constructed the Uninitialized value, so it\n            // is safe to cast `gcbox` and then construct a reference.\n            gcbox.as_ref().value.accept(&mut Rehydrate {\n                ptr: Nullable::new(gcbox.cast()),\n                type_id: TypeId::of::<T>(),\n            })\n        };\n\n        assert!(\n            res.is_ok(),\n            \"visitor must be able to access all Gc fields of structure when rehydrating dead Gcs\"\n        );\n        let gc = Gc {\n            ptr: UCell::new(Nullable::new(gcbox)),\n            tag: AtomicUsize::new(CURRENT_TAG.load(Ordering::Acquire)),\n        };\n\n        let _ = ManuallyDrop::new(cleanup);\n        gc\n    }\n\n    /// Attempt to dereference this `Gc`.\n    ///\n    /// This function will return `None` if `self` is a \"dead\" `Gc`, which points to an\n    /// already-deallocated object.\n    /// This can only occur if a `Gc` is accessed during the `Drop` implementation of a\n    /// [`Trace`] object.\n    ///\n    /// For a version which panics instead of returning `None`, consider using [`Deref`].\n    ///\n    /// # Examples\n    ///\n    /// For a still-living `Gc`, this always returns `Some`.\n    ///\n    /// ```\n    /// use dumpster::sync::Gc;\n    ///\n    /// let gc1 = Gc::new(0);\n    /// assert!(Gc::try_deref(&gc1).is_some());\n    /// ```\n    ///\n    /// The only way to get a `Gc` that fails on `try_deref` is by accessing a `Gc` during its\n    /// `Drop` implementation.\n    ///\n    /// ```\n    /// use dumpster::{sync::Gc, Trace};\n    /// use std::sync::Mutex;\n    ///\n    /// #[derive(Trace)]\n    /// struct Cycle(Mutex<Option<Gc<Self>>>);\n    ///\n    /// impl Drop for Cycle {\n    ///     fn drop(&mut self) {\n    ///         let guard = self.0.lock().unwrap();\n    ///         let maybe_ref = Gc::try_deref(guard.as_ref().unwrap());\n    ///         assert!(maybe_ref.is_none());\n    ///     }\n    /// }\n    ///\n    /// let gc1 = Gc::new(Cycle(Mutex::new(None)));\n    /// *gc1.0.lock().unwrap() = Some(gc1.clone());\n    /// # drop(gc1);\n    /// # dumpster::sync::collect();\n    /// ```\n    pub fn try_deref(gc: &Gc<T>) -> Option<&T> {\n        unsafe { (!gc.ptr.get().is_null()).then(|| &**gc) }\n    }\n\n    /// Attempt to clone this `Gc`.\n    ///\n    /// This function will return `None` if `self` is a \"dead\" `Gc`, which does not point to an\n    /// existing object. For details on dead `Gc`s, refer to [`Gc::is_dead`].\n    ///\n    /// For a version that simply clones the dead `Gc`, use [`Clone`].\n    ///\n    /// # Examples\n    ///\n    /// For a still-living `Gc`, this always returns `Some`.\n    ///\n    /// ```\n    /// use dumpster::sync::Gc;\n    ///\n    /// let gc1 = Gc::new(0);\n    /// let gc2 = Gc::try_clone(&gc1).unwrap();\n    /// ```\n    ///\n    /// The only way to get a `Gc` which fails on `try_clone` is by accessing a `Gc` during its\n    /// `Drop` implementation.\n    ///\n    /// ```\n    /// use dumpster::{sync::Gc, Trace};\n    ///\n    /// #[derive(Trace)]\n    /// struct Cycle(Gc<Self>);\n    ///\n    /// impl Drop for Cycle {\n    ///     fn drop(&mut self) {\n    ///         let cloned = Gc::try_clone(&self.0);\n    ///         assert!(cloned.is_none());\n    ///     }\n    /// }\n    ///\n    /// let gc1 = Gc::new_cyclic(|gc| Cycle(gc));\n    /// # drop(gc1);\n    /// # dumpster::sync::collect();\n    /// ```\n    pub fn try_clone(gc: &Gc<T>) -> Option<Gc<T>> {\n        unsafe { (!gc.ptr.get().is_null()).then(|| gc.clone()) }\n    }\n\n    /// Provides a raw pointer to the data.\n    ///\n    /// Panics if `self` is a \"dead\" `Gc`,\n    /// which points to an already-deallocated object.\n    /// This can only occur if a `Gc` is accessed during the `Drop` implementation of a\n    /// [`Trace`] object.\n    ///\n    /// # Examples\n    ///\n    /// ```\n    /// use dumpster::sync::Gc;\n    /// let x = Gc::new(\"hello\".to_owned());\n    /// let y = Gc::clone(&x);\n    /// let x_ptr = Gc::as_ptr(&x);\n    /// assert_eq!(x_ptr, Gc::as_ptr(&x));\n    /// assert_eq!(unsafe { &*x_ptr }, \"hello\");\n    /// ```\n    pub fn as_ptr(gc: &Gc<T>) -> *const T {\n        unsafe {\n            let ptr = NonNull::as_ptr(gc.ptr.get().unwrap());\n            addr_of_mut!((*ptr).value)\n        }\n    }\n\n    /// Determine whether two `Gc`s are equivalent by reference.\n    /// Returns `true` if both `this` and `other` point to the same value, in the same style as\n    /// [`std::ptr::eq`].\n    ///\n    /// # Examples\n    ///\n    /// ```\n    /// use dumpster::sync::Gc;\n    ///\n    /// let gc1 = Gc::new(0);\n    /// let gc2 = Gc::clone(&gc1); // points to same spot as `gc1`\n    /// let gc3 = Gc::new(0); // same value, but points to a different object than `gc1`\n    ///\n    /// assert!(Gc::ptr_eq(&gc1, &gc2));\n    /// assert!(!Gc::ptr_eq(&gc1, &gc3));\n    /// ```\n    pub fn ptr_eq(this: &Gc<T>, other: &Gc<T>) -> bool {\n        unsafe { this.ptr.get() }.as_option() == unsafe { other.ptr.get() }.as_option()\n    }\n\n    /// Get the number of references to the value pointed to by this `Gc`.\n    ///\n    /// This does not include internal references generated by the garbage collector.\n    ///\n    /// # Panics\n    ///\n    /// This function may panic if the `Gc` whose reference count we are loading is \"dead\" (i.e.\n    /// generated through a `Drop` implementation). For further reference, take a look at\n    /// [`Gc::is_dead`].\n    ///\n    /// # Examples\n    ///\n    /// ```\n    /// use dumpster::sync::Gc;\n    ///\n    /// let gc = Gc::new(());\n    /// assert_eq!(Gc::ref_count(&gc).get(), 1);\n    /// let gc2 = gc.clone();\n    /// assert_eq!(Gc::ref_count(&gc).get(), 2);\n    /// drop(gc);\n    /// drop(gc2);\n    /// ```\n    pub fn ref_count(gc: &Self) -> NonZeroUsize {\n        let box_ptr = unsafe { gc.ptr.get() }.expect(\n            \"Attempt to dereference Gc to already-collected object. \\\n    This means a Gc escaped from a Drop implementation, likely implying a bug in your code.\",\n        );\n        let box_ref = unsafe { box_ptr.as_ref() };\n        NonZeroUsize::new(box_ref.strong.load(Ordering::Relaxed))\n            .expect(\"strong count to a GcBox may never be zero while a Gc to it exists\")\n    }\n\n    /// Determine whether this is a dead `Gc`.\n    ///\n    /// A `Gc` is dead if it is not usable as a reference to any value.\n    /// Currently, a dead `Gc` may only be produced by accessing a `Gc` inside of the `Drop`\n    /// implementation of a garbage-collected value or by using the `Gc` provided to the\n    /// construction function in [`Gc::new_cyclic`].\n    ///\n    /// # Examples\n    ///\n    /// ```\n    /// use dumpster::{sync::Gc, Trace};\n    ///\n    /// #[derive(Trace)]\n    /// struct Cycle(Gc<Self>);\n    ///\n    /// impl Drop for Cycle {\n    ///     fn drop(&mut self) {\n    ///         assert!(Gc::is_dead(&self.0));\n    ///     }\n    /// }\n    ///\n    /// let gc1 = Gc::new_cyclic(|gc| Cycle(gc));\n    /// # drop(gc1);\n    /// # dumpster::sync::collect();\n    /// ```\n    #[inline]\n    pub fn is_dead(gc: &Self) -> bool {\n        unsafe { gc.ptr.get() }.is_null()\n    }\n\n    /// Consumes the `Gc<T>`, returning the inner `GcBox<T>` pointer and tag.\n    #[inline]\n    #[must_use]\n    fn into_ptr(this: Self) -> (*const GcBox<T>, usize) {\n        let this = ManuallyDrop::new(this);\n        let tag = &raw const this.tag;\n        let ptr = unsafe { this.ptr.get().as_ptr() };\n        let tag = unsafe { tag.read() }.into_inner();\n        (ptr, tag)\n    }\n\n    /// Constructs a `Gc<T>` from the innner `GcBox<T>` pointer and tag.\n    #[inline]\n    #[must_use]\n    unsafe fn from_ptr(ptr: *const GcBox<T>, tag: usize) -> Self {\n        Self {\n            ptr: UCell::new(Nullable::from_ptr(ptr.cast_mut())),\n            tag: AtomicUsize::new(tag),\n        }\n    }\n\n    /// Kill this `Gc`, making it dead.\n    ///\n    /// # Safety\n    ///\n    /// The caller is responsible for making sure that no other code can access this `Gc` while\n    /// `kill` is running.\n    unsafe fn kill(&self) {\n        self.ptr.set(self.ptr.get().as_null());\n    }\n\n    /// Exists solely for the [`coerce_gc`] macro.\n    #[inline]\n    #[must_use]\n    #[doc(hidden)]\n    pub fn __private_into_ptr(this: Self) -> (*const GcBox<T>, usize) {\n        Self::into_ptr(this)\n    }\n\n    /// Exists solely for the [`coerce_gc`] macro.\n    #[inline]\n    #[must_use]\n    #[doc(hidden)]\n    pub unsafe fn __private_from_ptr(ptr: *const GcBox<T>, tag: usize) -> Self {\n        Self::from_ptr(ptr, tag)\n    }\n}\n\n/// A struct for converting dead `Gc`s into live ones.\n///\n/// This is used in [`Gc::new_cyclic`].\npub(super) struct Rehydrate {\n    /// The pointer to the currently hydrating [`GcBox`].\n    ptr: Nullable<GcBox<()>>,\n    /// The [`TypeId`] of `T` in `Gc<T>` to be hydrated.\n    type_id: TypeId,\n}\n\nimpl Visitor for Rehydrate {\n    fn visit_sync<T>(&mut self, gc: &Gc<T>)\n    where\n        T: Trace + Send + Sync + ?Sized,\n    {\n        if Gc::is_dead(gc) && TypeId::of::<T>() == self.type_id {\n            unsafe {\n                // SAFETY: it is safe to transmute these pointers because we have checked\n                // that they are of the same type.\n                // Additionally, the `GcBox` has been fully initialized, so it is safe to\n                // create a reference here.\n                let cell_ptr = (&raw const gc.ptr).cast::<UCell<Nullable<GcBox<()>>>>();\n                (*cell_ptr).set(self.ptr);\n\n                let box_ref = &*self.ptr.as_ptr();\n                let old_strong = box_ref.strong.fetch_add(1, Ordering::Relaxed);\n                // Check for overflow. See implementation of clone for details.\n                if old_strong > MAX_STRONG_COUNT {\n                    std::process::abort();\n                }\n                box_ref\n                    .generation\n                    .store(CURRENT_TAG.load(Ordering::Acquire), Ordering::Release);\n                notify_created_gc();\n            }\n        }\n    }\n\n    fn visit_unsync<T>(&mut self, _: &crate::unsync::Gc<T>)\n    where\n        T: Trace + ?Sized,\n    {\n    }\n}\n\nimpl<T: Trace + Send + Sync + Clone> Gc<T> {\n    /// Makes a mutable reference to the given `Gc`.\n    ///\n    /// If there are other `Gc` pointers to the same allocation, then `make_mut` will\n    /// [`clone`] the inner value to a new allocation to ensure unique ownership. This is also\n    /// referred to as clone-on-write.\n    ///\n    /// [`clone`]: Clone::clone\n    ///\n    /// # Panics\n    ///\n    /// This function may panic if the `Gc` whose reference count we are loading is \"dead\" (i.e.\n    /// generated through a `Drop` implementation). For further reference, take a look at\n    /// [`Gc::is_dead`].\n    ///\n    /// # Examples\n    ///\n    /// ```\n    /// use dumpster::sync::Gc;\n    ///\n    /// let mut data = Gc::new(5);\n    ///\n    /// *Gc::make_mut(&mut data) += 1; // Won't clone anything\n    /// let mut other_data = Gc::clone(&data); // Won't clone inner data\n    /// *Gc::make_mut(&mut data) += 1; // Clones inner data\n    /// *Gc::make_mut(&mut data) += 1; // Won't clone anything\n    /// *Gc::make_mut(&mut other_data) *= 2; // Won't clone anything\n    ///\n    /// // Now `data` and `other_data` point to different allocations.\n    /// assert_eq!(*data, 8);\n    /// assert_eq!(*other_data, 12);\n    /// ```\n    #[inline]\n    pub fn make_mut(this: &mut Self) -> &mut T {\n        if Gc::is_dead(this) {\n            panic_deref_of_collected_object();\n        }\n\n        // SAFETY: we checked above that the object is alive (not null)\n        let box_ref = unsafe { this.ptr.get().unwrap_unchecked().as_ref() };\n\n        let strong = box_ref.strong.load(Ordering::Acquire);\n        let weak = box_ref.weak.load(Ordering::Acquire);\n\n        if strong != 1 || weak != 0 {\n            // We don't have unique access to the value so we need to clone it.\n            *this = Gc::new(box_ref.value.clone());\n        }\n\n        // SAFETY: we have exclusive access to this `GcBox` because we ensured\n        // that we hold the only reference to this allocation.\n        // No other `Gc`s point to this allocation because the strong count is 1, and there are no\n        // loose pointers internal to the collector because the weak count is 0.\n        unsafe { &mut (*this.ptr.get().as_ptr()).value }\n    }\n}\n\n/// Allows coercing `T` of [`Gc<T>`](Gc).\n///\n/// This means that you can convert a `Gc` containing a strictly-sized type (such as `[T; N]`) into\n/// a `Gc` containing its unsized version (such as `[T]`), all without using nightly-only features.\n///\n/// This is one of two easy ways to create a `Gc<[T]>`; the other method is to use [`FromIterator`].\n///\n/// # Examples\n///\n/// ```\n/// use dumpster::sync::{coerce_gc, Gc};\n///\n/// let gc1: Gc<[u8; 3]> = Gc::new([7, 8, 9]);\n/// let gc2: Gc<[u8]> = coerce_gc!(gc1);\n/// assert_eq!(&gc2[..], &[7, 8, 9]);\n/// ```\n///\n/// Note that although this macro allows for type conversion, it _cannot_ be used for converting\n/// between incompatible types.\n///\n/// ```compile_fail\n/// // This program is incorrect!\n/// use dumpster::sync::{Gc, coerce_gc};\n///\n/// let gc1: Gc<u8> = Gc::new(1);\n/// let gc2: Gc<i8> = coerce_gc!(gc1);\n/// ```\n#[doc(hidden)]\n#[macro_export]\nmacro_rules! __sync_coerce_gc {\n    ($gc:expr) => {{\n        // Temporarily convert the `Gc` into a raw pointer to allow for coercion to occur.\n        let (ptr, tag): (*const _, usize) = $crate::sync::Gc::__private_into_ptr($gc);\n        unsafe { $crate::sync::Gc::__private_from_ptr(ptr, tag) }\n    }};\n}\n\n#[doc(inline)]\npub use crate::__sync_coerce_gc as coerce_gc;\n\nimpl<T> Clone for Gc<T>\nwhere\n    T: Trace + Send + Sync + ?Sized,\n{\n    /// Clone a garbage-collected reference.\n    /// This does not clone the underlying data.\n    /// If this `Gc` is [dead](`Gc::is_dead`), this will produce another dead `Gc`.\n    ///\n    /// For a fallible version, refer to [`Gc::try_clone`].\n    ///\n    /// # Examples\n    ///\n    /// ```\n    /// use dumpster::sync::Gc;\n    /// use std::sync::atomic::{AtomicU8, Ordering};\n    ///\n    /// let gc1 = Gc::new(AtomicU8::new(0));\n    /// let gc2 = gc1.clone();\n    ///\n    /// gc1.store(1, Ordering::Relaxed);\n    /// assert_eq!(gc2.load(Ordering::Relaxed), 1);\n    /// ```\n    ///\n    /// Note that you can also clone a dead `Gc`.\n    ///\n    /// ```\n    /// use dumpster::{sync::Gc, Trace};\n    /// use std::sync::Mutex;\n    ///\n    /// #[derive(Trace)]\n    /// struct Cycle(Gc<Self>);\n    ///\n    /// impl Drop for Cycle {\n    ///     fn drop(&mut self) {\n    ///         let gc = self.0.clone();\n    ///         assert!(Gc::is_dead(&gc));\n    ///     }\n    /// }\n    ///\n    /// let gc1 = Gc::new_cyclic(|gc| Cycle(gc));\n    /// # drop(gc1);\n    /// # dumpster::sync::collect();\n    /// ```\n    fn clone(&self) -> Gc<T> {\n        if Gc::is_dead(self) {\n            // Clone dead Gcs by doing a naive copy.\n            return unsafe { ptr::read(self) };\n        }\n        let box_ref = unsafe { self.ptr.get().unwrap().as_ref() };\n\n        // increment strong count before generation to ensure cleanup never underestimates ref count\n        let old_strong = box_ref.strong.fetch_add(1, Ordering::Acquire);\n\n        // We need to guard against massive refcounts in case someone is `mem::forget`ing\n        // Gcs. If we don't do this the count can overflow and users will use-after free. This\n        // branch will never be taken in any realistic program. We abort because such a program is\n        // incredibly degenerate, and we don't care to support it.\n        //\n        // This check is not 100% water-proof: we error when the refcount grows beyond `isize::MAX`.\n        // But we do that check *after* having done the increment, so there is a chance here that\n        // the worst already happened and we actually do overflow the `usize` counter. However, that\n        // requires the counter to grow from `isize::MAX` to `usize::MAX` between the increment\n        // above and the `abort` below, which seems exceedingly unlikely.\n        if old_strong > MAX_STRONG_COUNT {\n            std::process::abort();\n        }\n\n        box_ref\n            .generation\n            .store(CURRENT_TAG.load(Ordering::Acquire), Ordering::Release);\n\n        notify_created_gc();\n        // mark_clean(box_ref); // causes performance drops\n\n        Gc {\n            ptr: UCell::new(unsafe { self.ptr.get() }),\n            tag: AtomicUsize::new(CURRENT_TAG.load(Ordering::Acquire)),\n        }\n    }\n}\n\nimpl<T> Drop for Gc<T>\nwhere\n    T: Trace + Send + Sync + ?Sized,\n{\n    fn drop(&mut self) {\n        let Some(mut ptr) = unsafe { self.ptr.get() }.as_option() else {\n            return;\n        };\n        let box_ref = unsafe { ptr.as_ref() };\n        box_ref.weak.fetch_add(1, Ordering::AcqRel); // ensures that this allocation wasn't freed\n                                                     // while we weren't looking\n        box_ref\n            .generation\n            .store(CURRENT_TAG.load(Ordering::Relaxed), Ordering::Release);\n        match box_ref.strong.fetch_sub(1, Ordering::AcqRel) {\n            0 => unreachable!(\"strong cannot reach zero while a Gc to it exists\"),\n            1 => {\n                mark_clean(box_ref);\n                if box_ref.weak.fetch_sub(1, Ordering::Release) == 1 {\n                    // destroyed the last weak reference! we can safely deallocate this\n                    let layout = Layout::for_value(box_ref);\n                    fence(Ordering::Acquire);\n                    unsafe {\n                        drop_in_place(ptr.as_mut());\n                        dealloc(ptr.as_ptr().cast(), layout);\n                    }\n                }\n            }\n            _ => {\n                if contains_gcs(&box_ref.value).unwrap_or(true) {\n                    // SAFETY: `ptr` is convertible to a reference\n                    // We don't use `box_ref` here because that pointer\n                    // only has `SharedReadOnly` permissions under the stacked borrows model\n                    // when we need `Unique` for the `TrashCan`.\n                    unsafe { mark_dirty(ptr) };\n                }\n                box_ref.weak.fetch_sub(1, Ordering::Release);\n            }\n        }\n        notify_dropped_gc();\n    }\n}\n\nimpl CollectInfo {\n    #[must_use]\n    /// Get the number of times that a [`Gc`] has been dropped since the last time a collection\n    /// operation was performed.\n    ///\n    /// # Examples\n    ///\n    /// ```\n    /// use dumpster::sync::{set_collect_condition, CollectInfo};\n    ///\n    /// // Collection condition for whether many Gc's have been dropped.\n    /// fn have_many_gcs_dropped(info: &CollectInfo) -> bool {\n    ///     info.n_gcs_dropped_since_last_collect() > 100\n    /// }\n    ///\n    /// set_collect_condition(have_many_gcs_dropped);\n    /// ```\n    pub fn n_gcs_dropped_since_last_collect(&self) -> usize {\n        n_gcs_dropped()\n    }\n\n    #[must_use]\n    /// Get the total number of [`Gc`]s which currently exist.\n    ///\n    /// # Examples\n    ///\n    /// ```\n    /// use dumpster::sync::{set_collect_condition, CollectInfo};\n    ///\n    /// // Collection condition for whether many Gc's currently exist.\n    /// fn do_many_gcs_exist(info: &CollectInfo) -> bool {\n    ///     info.n_gcs_existing() > 100\n    /// }\n    ///\n    /// set_collect_condition(do_many_gcs_exist);\n    /// ```\n    pub fn n_gcs_existing(&self) -> usize {\n        n_gcs_existing()\n    }\n}\n\nimpl<T: Trace + Send + Sync + ?Sized> Gc<T> {\n    /// Allocates an `GcBox<T>` with sufficient space for\n    /// a value of the provided layout.\n    ///\n    /// The function `mem_to_gc_box` is called with the data pointer\n    /// and must return back a pointer for the `GcBox<T>`.\n    unsafe fn allocate_for_layout(\n        value_layout: Layout,\n        mem_to_gc_box: impl FnOnce(*mut u8) -> *mut GcBox<T>,\n    ) -> *mut GcBox<T> {\n        let layout = Layout::new::<GcBox<()>>()\n            .extend(value_layout)\n            .unwrap()\n            .0\n            .pad_to_align();\n\n        Self::allocate_for_layout_of_box(layout, mem_to_gc_box)\n    }\n\n    /// Allocates an `GcBox<T>` with the given layout.\n    ///\n    /// The function `mem_to_gc_box` is called with the data pointer\n    /// and must return back a pointer for the `GcBox<T>`.\n    unsafe fn allocate_for_layout_of_box(\n        layout: Layout,\n        mem_to_gc_box: impl FnOnce(*mut u8) -> *mut GcBox<T>,\n    ) -> *mut GcBox<T> {\n        // SAFETY: layout has non-zero size because of the `ref_count` field\n        let ptr = unsafe { std::alloc::alloc(layout) };\n\n        if ptr.is_null() {\n            handle_alloc_error(layout);\n        }\n\n        let inner = mem_to_gc_box(ptr);\n\n        unsafe {\n            (&raw mut (*inner).strong).write(AtomicUsize::new(1));\n            (&raw mut (*inner).weak).write(AtomicUsize::new(0));\n            (&raw mut (*inner).generation).write(AtomicUsize::new(0));\n        }\n\n        inner\n    }\n}\n\nimpl<T: Trace + Send + Sync> Gc<[T]> {\n    /// Allocates an `GcBox<[T]>` with the given length.\n    fn allocate_for_slice(len: usize) -> *mut GcBox<[T]> {\n        unsafe {\n            Self::allocate_for_layout(Layout::array::<T>(len).unwrap(), |mem| {\n                ptr::slice_from_raw_parts_mut(mem.cast::<T>(), len) as *mut GcBox<[T]>\n            })\n        }\n    }\n}\n\nunsafe impl<V: Visitor, T: Trace + Send + Sync + ?Sized> TraceWith<V> for Gc<T> {\n    fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n        visitor.visit_sync(self);\n        Ok(())\n    }\n}\n\nimpl<T: Trace + Send + Sync + ?Sized> Deref for Gc<T> {\n    type Target = T;\n\n    /// Dereference this pointer, creating a reference to the contained value `T`.\n    ///\n    /// # Panics\n    ///\n    /// This function may panic if it is called from within the implementation of `std::ops::Drop`\n    /// of its owning value, since returning such a reference could cause a use-after-free.\n    /// It is not guaranteed to panic.\n    ///\n    /// # Examples\n    ///\n    /// The following is a correct time to dereference a `Gc`.\n    ///\n    /// ```\n    /// use dumpster::sync::Gc;\n    ///\n    /// let my_gc = Gc::new(0u8);\n    /// let my_ref: &u8 = &my_gc;\n    /// ```\n    ///\n    /// Dereferencing a `Gc` while dropping is not correct.\n    ///\n    /// ```should_panic\n    /// // This is wrong!\n    /// use dumpster::{sync::Gc, Trace};\n    /// use std::sync::Mutex;\n    ///\n    /// #[derive(Trace)]\n    /// struct Bad {\n    ///     s: String,\n    ///     cycle: Mutex<Option<Gc<Bad>>>,\n    /// }\n    ///\n    /// impl Drop for Bad {\n    ///     fn drop(&mut self) {\n    ///         println!(\"{}\", self.cycle.lock().unwrap().as_ref().unwrap().s)\n    ///     }\n    /// }\n    ///\n    /// let foo = Gc::new(Bad {\n    ///     s: \"foo\".to_string(),\n    ///     cycle: Mutex::new(None),\n    /// });\n    /// ```\n    fn deref(&self) -> &Self::Target {\n        let box_ref = unsafe {\n            self.ptr.get().expect(\n            \"Attempting to dereference Gc to already-deallocated object.\\\n            This is caused by accessing a Gc during a Drop implementation, likely implying a bug in your code.\"\n        ).as_ref()\n        };\n        let current_tag = CURRENT_TAG.load(Ordering::Acquire);\n        self.tag.store(current_tag, Ordering::Release);\n        box_ref.generation.store(current_tag, Ordering::Release);\n        &box_ref.value\n    }\n}\n\nimpl<T> PartialEq<Gc<T>> for Gc<T>\nwhere\n    T: Trace + Send + Sync + ?Sized + PartialEq,\n{\n    /// Test for equality on two `Gc`s.\n    ///\n    /// Two `Gc`s are equal if their inner values are equal, even if they are stored in different\n    /// allocations.\n    /// Because `PartialEq` does not imply reflexivity, and there is no current path for trait\n    /// specialization, this function does not do a \"fast-path\" check for reference equality.\n    /// Therefore, if two `Gc`s point to the same allocation, the implementation of `eq` will still\n    /// require a direct call to `eq` on the values.\n    ///\n    /// # Panics\n    ///\n    /// This function may panic if it is called from within the implementation of `std::ops::Drop`\n    /// of its owning value, since returning such a reference could cause a use-after-free.\n    /// It is not guaranteed to panic.\n    /// Additionally, if this `Gc` is moved out of an allocation during a `Drop` implementation, it\n    /// could later cause a panic.\n    /// For further details, refer to the main documentation for `Gc`.\n    ///\n    /// ```\n    /// use dumpster::sync::Gc;\n    ///\n    /// let gc = Gc::new(6);\n    /// assert!(gc == Gc::new(6));\n    /// ```\n    fn eq(&self, other: &Gc<T>) -> bool {\n        self.as_ref() == other.as_ref()\n    }\n}\n\nimpl<T> Eq for Gc<T> where T: Trace + Send + Sync + ?Sized + PartialEq {}\n\nimpl<T: Trace + Send + Sync + ?Sized> AsRef<T> for Gc<T> {\n    fn as_ref(&self) -> &T {\n        self\n    }\n}\n\nimpl<T: Trace + Send + Sync + ?Sized> Borrow<T> for Gc<T> {\n    fn borrow(&self) -> &T {\n        self\n    }\n}\n\nimpl<T: Trace + Send + Sync + Default> Default for Gc<T> {\n    fn default() -> Self {\n        Gc::new(T::default())\n    }\n}\n\nimpl<T: Trace + Send + Sync + ?Sized> std::fmt::Pointer for Gc<T> {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        std::fmt::Pointer::fmt(&addr_of!(**self), f)\n    }\n}\n\n#[cfg(not(loom))]\n#[cfg(feature = \"coerce-unsized\")]\nimpl<T, U> std::ops::CoerceUnsized<Gc<U>> for Gc<T>\nwhere\n    T: std::marker::Unsize<U> + Trace + Send + Sync + ?Sized,\n    U: Trace + Send + Sync + ?Sized,\n{\n}\n\nimpl<T: Trace + Send + Sync + ?Sized> Debug for Gc<T> {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        write!(\n            f,\n            \"Gc({:?}, {})\",\n            self.ptr,\n            self.tag.load(Ordering::Acquire)\n        )\n    }\n}\n\nimpl<T: Trace + Send + Sync + Display + ?Sized> Display for Gc<T> {\n    /// Formats the value using its `Display` implementation.\n    ///\n    /// # Note\n    ///\n    /// If `T` contains cyclic references through `Gc` pointers and its `Display` implementation\n    /// attempts to traverse them, this may cause infinite recursion. Types with potential cycles\n    /// should implement `Display` to avoid following cyclic references.\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        Display::fmt(&**self, f)\n    }\n}\n\nimpl<T: Trace + Send + Sync> From<T> for Gc<T> {\n    /// Converts a generic type `T` into an `Gc<T>`\n    ///\n    /// The conversion allocates on the heap and moves `t`\n    /// from the stack into it.\n    ///\n    /// # Example\n    /// ```rust\n    /// # use dumpster::sync::Gc;\n    /// let x = 5;\n    /// let rc = Gc::new(5);\n    ///\n    /// assert_eq!(Gc::from(x), rc);\n    /// ```\n    fn from(value: T) -> Self {\n        Gc::new(value)\n    }\n}\n\nimpl<T: Trace + Send + Sync, const N: usize> From<[T; N]> for Gc<[T]> {\n    /// Converts a [`[T; N]`](prim@array) into an `Gc<[T]>`.\n    ///\n    /// The conversion moves the array into a newly allocated `Gc`.\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// # use dumpster::sync::Gc;\n    /// let original: [i32; 3] = [1, 2, 3];\n    /// let shared: Gc<[i32]> = Gc::from(original);\n    /// assert_eq!(&[1, 2, 3], &shared[..]);\n    /// ```\n    #[inline]\n    fn from(v: [T; N]) -> Gc<[T]> {\n        coerce_gc!(Gc::<[T; N]>::from(v))\n    }\n}\n\nimpl<T: Trace + Send + Sync + Clone> From<&[T]> for Gc<[T]> {\n    /// Allocates a garbage-collected slice and fills it by cloning `slice`'s items.\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// # use dumpster::sync::Gc;\n    /// let original: &[i32] = &[1, 2, 3];\n    /// let shared: Gc<[i32]> = Gc::from(original);\n    /// assert_eq!(&[1, 2, 3], &shared[..]);\n    /// ```\n    #[inline]\n    fn from(slice: &[T]) -> Gc<[T]> {\n        // Panic guard while cloning T elements.\n        // In the event of a panic, elements that have been written\n        // into the new GcBox will be dropped, then the memory freed.\n        struct Guard<T> {\n            /// pointer to `GcBox` to deallocate on panic\n            mem: *mut u8,\n            /// layout of the `GcBox` to deallocate on panic\n            layout: Layout,\n            /// pointer to the `GcBox`'s value\n            elems: *mut T,\n            /// the number of elements cloned so far\n            n_elems: usize,\n        }\n\n        impl<T> Drop for Guard<T> {\n            fn drop(&mut self) {\n                unsafe {\n                    let slice = slice::from_raw_parts_mut(self.elems, self.n_elems);\n                    ptr::drop_in_place(slice);\n\n                    dealloc(self.mem, self.layout);\n                }\n            }\n        }\n\n        unsafe {\n            let value_layout = Layout::array::<T>(slice.len()).unwrap();\n\n            let layout = Layout::new::<GcBox<()>>()\n                .extend(value_layout)\n                .unwrap()\n                .0\n                .pad_to_align();\n\n            let ptr = Self::allocate_for_layout_of_box(layout, |mem| {\n                ptr::slice_from_raw_parts_mut(mem.cast::<T>(), slice.len()) as *mut GcBox<[T]>\n            });\n\n            // Pointer to first element\n            let elems = (&raw mut (*ptr).value).cast::<T>();\n\n            let mut guard = Guard {\n                mem: ptr.cast::<u8>(),\n                layout,\n                elems,\n                n_elems: 0,\n            };\n\n            for (i, item) in slice.iter().enumerate() {\n                ptr::write(elems.add(i), item.clone());\n                guard.n_elems += 1;\n            }\n\n            // All clear. Forget the guard so it doesn't free the new GcBox.\n            mem::forget(guard);\n\n            notify_created_gc();\n\n            Self {\n                ptr: UCell::new(Nullable::from_ptr(ptr)),\n                tag: AtomicUsize::new(0),\n            }\n        }\n    }\n}\n\nimpl<T: Trace + Send + Sync + Clone> From<&mut [T]> for Gc<[T]> {\n    /// Allocates a garbage-collected slice and fills it by cloning `v`'s items.\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// # use dumpster::sync::Gc;\n    /// let mut original = [1, 2, 3];\n    /// let original: &mut [i32] = &mut original;\n    /// let shared: Gc<[i32]> = Gc::from(original);\n    /// assert_eq!(&[1, 2, 3], &shared[..]);\n    /// ```\n    #[inline]\n    fn from(value: &mut [T]) -> Self {\n        Gc::from(&*value)\n    }\n}\n\nimpl From<&str> for Gc<str> {\n    /// Allocates a garbage-collected string slice and copies `v` into it.\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// # use dumpster::sync::Gc;\n    /// let shared: Gc<str> = Gc::from(\"statue\");\n    /// assert_eq!(\"statue\", &shared[..]);\n    /// ```\n    #[inline]\n    fn from(v: &str) -> Self {\n        let bytes = Gc::<[u8]>::from(v.as_bytes());\n        let (ptr, tag) = Gc::into_ptr(bytes);\n        unsafe { Gc::from_ptr(ptr as *const GcBox<str>, tag) }\n    }\n}\n\nimpl From<&mut str> for Gc<str> {\n    /// Allocates a garbage-collected string slice and copies `v` into it.\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// # use dumpster::sync::Gc;\n    /// let mut original = String::from(\"statue\");\n    /// let original: &mut str = &mut original;\n    /// let shared: Gc<str> = Gc::from(original);\n    /// assert_eq!(\"statue\", &shared[..]);\n    /// ```\n    #[inline]\n    fn from(v: &mut str) -> Self {\n        Gc::from(&*v)\n    }\n}\n\nimpl From<Gc<str>> for Gc<[u8]> {\n    /// Converts a garbage-collected string slice into a byte slice.\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// # use dumpster::sync::Gc;\n    /// let string: Gc<str> = Gc::from(\"eggplant\");\n    /// let bytes: Gc<[u8]> = Gc::from(string);\n    /// assert_eq!(\"eggplant\".as_bytes(), bytes.as_ref());\n    /// ```\n    #[inline]\n    fn from(value: Gc<str>) -> Self {\n        let (ptr, tag) = Gc::into_ptr(value);\n        unsafe { Gc::from_ptr(ptr as *const GcBox<[u8]>, tag) }\n    }\n}\n\nimpl From<String> for Gc<str> {\n    /// Allocates a garbage-collected string slice and copies `v` into it.\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// # use dumpster::sync::Gc;\n    /// let original: String = \"statue\".to_owned();\n    /// let shared: Gc<str> = Gc::from(original);\n    /// assert_eq!(\"statue\", &shared[..]);\n    /// ```\n    #[inline]\n    fn from(value: String) -> Self {\n        Self::from(&value[..])\n    }\n}\n\nimpl<T: Trace + Send + Sync> From<Box<T>> for Gc<T> {\n    /// Move a boxed object to a new, garbage collected, allocation.\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// # use dumpster::sync::Gc;\n    /// let original: Box<i32> = Box::new(1);\n    /// let shared: Gc<i32> = Gc::from(original);\n    /// assert_eq!(1, *shared);\n    /// ```\n    #[inline]\n    fn from(src: Box<T>) -> Self {\n        unsafe {\n            let layout = Layout::for_value(&*src);\n            let gc_ptr = Gc::allocate_for_layout(layout, <*mut u8>::cast::<GcBox<T>>);\n\n            // Copy value as bytes\n            ptr::copy_nonoverlapping(\n                (&raw const *src).cast::<u8>(),\n                (&raw mut (*gc_ptr).value).cast::<u8>(),\n                layout.size(),\n            );\n\n            // Free the allocation without dropping its contents\n            let bptr = Box::into_raw(src);\n            let src = Box::from_raw(bptr.cast::<mem::ManuallyDrop<T>>());\n            drop(src);\n\n            notify_created_gc();\n            Self::from_ptr(gc_ptr, 0)\n        }\n    }\n}\n\nimpl<T: Trace + Send + Sync> From<Vec<T>> for Gc<[T]> {\n    /// Allocates a garbage-collected slice and moves `vec`'s items into it.\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// # use dumpster::sync::Gc;\n    /// let unique: Vec<i32> = vec![1, 2, 3];\n    /// let shared: Gc<[i32]> = Gc::from(unique);\n    /// assert_eq!(&[1, 2, 3], &shared[..]);\n    /// ```\n    #[inline]\n    fn from(vec: Vec<T>) -> Self {\n        let mut vec = ManuallyDrop::new(vec);\n        let vec_cap = vec.capacity();\n        let vec_len = vec.len();\n        let vec_ptr = vec.as_mut_ptr();\n\n        let gc_ptr = Self::allocate_for_slice(vec_len);\n\n        unsafe {\n            let dst_ptr = (&raw mut (*gc_ptr).value).cast::<T>();\n            ptr::copy_nonoverlapping(vec_ptr, dst_ptr, vec_len);\n\n            let _ = Vec::from_raw_parts(vec_ptr, 0, vec_cap);\n\n            notify_created_gc();\n            Self::from_ptr(gc_ptr, 0)\n        }\n    }\n}\n\nimpl<'a, B: Trace + Send + Sync> From<Cow<'a, B>> for Gc<B>\nwhere\n    B: ToOwned + ?Sized,\n    Gc<B>: From<&'a B> + From<B::Owned>,\n{\n    /// Creates a garbage-collected pointer from a clone-on-write pointer by\n    /// copying its content.\n    ///\n    /// # Example\n    ///\n    /// ```rust\n    /// # use dumpster::sync::Gc;\n    /// # use std::borrow::Cow;\n    /// let cow: Cow<'_, str> = Cow::Borrowed(\"eggplant\");\n    /// let shared: Gc<str> = Gc::from(cow);\n    /// assert_eq!(\"eggplant\", &shared[..]);\n    /// ```\n    #[inline]\n    fn from(cow: Cow<'a, B>) -> Gc<B> {\n        match cow {\n            Cow::Borrowed(s) => Gc::from(s),\n            Cow::Owned(s) => Gc::from(s),\n        }\n    }\n}\n\nimpl<T> FromIterator<T> for Gc<[T]>\nwhere\n    T: Trace + Send + Sync,\n{\n    fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {\n        // Collect into a `Vec` for O(n) performance.\n        // TODO: this could be slightly optimized by using the `Gc<[]>` layout for perf, but this is\n        // a later problem.\n        Self::from(iter.into_iter().collect::<Vec<_>>())\n    }\n}\n"
  },
  {
    "path": "dumpster/src/sync/tests.rs",
    "content": "/*\n    dumpster, a cycle-tracking garbage collector for Rust.    Copyright (C) 2023 Clayton Ramsey.\n\n    This Source Code Form is subject to the terms of the Mozilla Public\n    License, v. 2.0. If a copy of the MPL was not distributed with this\n    file, You can obtain one at http://mozilla.org/MPL/2.0/.\n*/\n\nuse std::{\n    collections::hash_map::Entry,\n    mem::{swap, take, transmute, MaybeUninit},\n    ptr::NonNull,\n    sync::{\n        atomic::{AtomicUsize, Ordering},\n        Mutex, OnceLock,\n    },\n};\n\nuse foldhash::{HashMap, HashMapExt};\n\nuse crate::{sync::coerce_gc, Visitor};\n\nuse super::*;\n\nstruct DropCount<'a>(&'a AtomicUsize);\n\nimpl Drop for DropCount<'_> {\n    fn drop(&mut self) {\n        self.0.fetch_add(1, Ordering::Release);\n    }\n}\n\nunsafe impl<V: Visitor> TraceWith<V> for DropCount<'_> {\n    fn accept(&self, _: &mut V) -> Result<(), ()> {\n        Ok(())\n    }\n}\n\nstruct MultiRef {\n    refs: Mutex<Vec<Gc<MultiRef>>>,\n    #[expect(unused)]\n    count: DropCount<'static>,\n}\n\nunsafe impl<V: Visitor> TraceWith<V> for MultiRef {\n    fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n        self.refs.accept(visitor)\n    }\n}\n\n#[test]\nfn single_alloc() {\n    static DROP_COUNT: AtomicUsize = AtomicUsize::new(0);\n    let gc1 = Gc::new(DropCount(&DROP_COUNT));\n\n    collect();\n    assert_eq!(DROP_COUNT.load(Ordering::Acquire), 0);\n    drop(gc1);\n    collect();\n    assert_eq!(DROP_COUNT.load(Ordering::Acquire), 1);\n}\n\n#[test]\nfn ref_count() {\n    static DROP_COUNT: AtomicUsize = AtomicUsize::new(0);\n    let gc1 = Gc::new(DropCount(&DROP_COUNT));\n    let gc2 = Gc::clone(&gc1);\n\n    assert_eq!(DROP_COUNT.load(Ordering::Acquire), 0);\n    drop(gc1);\n    assert_eq!(DROP_COUNT.load(Ordering::Acquire), 0);\n    drop(gc2);\n    assert_eq!(DROP_COUNT.load(Ordering::Acquire), 1);\n}\n\n#[test]\nfn self_referential() {\n    struct Foo(Mutex<Option<Gc<Foo>>>);\n    static DROP_COUNT: AtomicUsize = AtomicUsize::new(0);\n\n    unsafe impl<V: Visitor> TraceWith<V> for Foo {\n        fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n            self.0.accept(visitor)\n        }\n    }\n\n    impl Drop for Foo {\n        fn drop(&mut self) {\n            println!(\"begin increment of the drop count!\");\n            DROP_COUNT.fetch_add(1, Ordering::Release);\n        }\n    }\n\n    let gc1 = Gc::new(Foo(Mutex::new(None)));\n    *gc1.0.lock().unwrap() = Some(Gc::clone(&gc1));\n\n    assert_eq!(DROP_COUNT.load(Ordering::Acquire), 0);\n    drop(gc1);\n    collect();\n    assert_eq!(DROP_COUNT.load(Ordering::Acquire), 1);\n}\n\n#[test]\nfn two_cycle() {\n    static DROP_0: AtomicUsize = AtomicUsize::new(0);\n    static DROP_1: AtomicUsize = AtomicUsize::new(0);\n\n    let gc0 = Gc::new(MultiRef {\n        refs: Mutex::new(Vec::new()),\n        count: DropCount(&DROP_0),\n    });\n    let gc1 = Gc::new(MultiRef {\n        refs: Mutex::new(vec![Gc::clone(&gc0)]),\n        count: DropCount(&DROP_1),\n    });\n    gc0.refs.lock().unwrap().push(Gc::clone(&gc1));\n\n    collect();\n    assert_eq!(DROP_0.load(Ordering::Acquire), 0);\n    assert_eq!(DROP_0.load(Ordering::Acquire), 0);\n    drop(gc0);\n    collect();\n    assert_eq!(DROP_0.load(Ordering::Acquire), 0);\n    assert_eq!(DROP_0.load(Ordering::Acquire), 0);\n    drop(gc1);\n    collect();\n    assert_eq!(DROP_0.load(Ordering::Acquire), 1);\n    assert_eq!(DROP_0.load(Ordering::Acquire), 1);\n}\n\n#[test]\nfn self_ref_two_cycle() {\n    static DROP_0: AtomicUsize = AtomicUsize::new(0);\n    static DROP_1: AtomicUsize = AtomicUsize::new(0);\n\n    let gc0 = Gc::new(MultiRef {\n        refs: Mutex::new(Vec::new()),\n        count: DropCount(&DROP_0),\n    });\n    let gc1 = Gc::new(MultiRef {\n        refs: Mutex::new(vec![Gc::clone(&gc0)]),\n        count: DropCount(&DROP_1),\n    });\n    gc0.refs.lock().unwrap().extend([gc0.clone(), gc1.clone()]);\n    gc1.refs.lock().unwrap().push(gc1.clone());\n\n    collect();\n    assert_eq!(DROP_0.load(Ordering::Acquire), 0);\n    assert_eq!(DROP_0.load(Ordering::Acquire), 0);\n    drop(gc0);\n    collect();\n    assert_eq!(DROP_0.load(Ordering::Acquire), 0);\n    assert_eq!(DROP_0.load(Ordering::Acquire), 0);\n    drop(gc1);\n    collect();\n    assert_eq!(DROP_0.load(Ordering::Acquire), 1);\n    assert_eq!(DROP_0.load(Ordering::Acquire), 1);\n}\n\n#[test]\nfn parallel_loop() {\n    static COUNT_1: AtomicUsize = AtomicUsize::new(0);\n    static COUNT_2: AtomicUsize = AtomicUsize::new(0);\n    static COUNT_3: AtomicUsize = AtomicUsize::new(0);\n    static COUNT_4: AtomicUsize = AtomicUsize::new(0);\n\n    let gc1 = Gc::new(MultiRef {\n        count: DropCount(&COUNT_1),\n        refs: Mutex::new(Vec::new()),\n    });\n    let gc2 = Gc::new(MultiRef {\n        count: DropCount(&COUNT_2),\n        refs: Mutex::new(vec![Gc::clone(&gc1)]),\n    });\n    let gc3 = Gc::new(MultiRef {\n        count: DropCount(&COUNT_3),\n        refs: Mutex::new(vec![Gc::clone(&gc1)]),\n    });\n    let gc4 = Gc::new(MultiRef {\n        count: DropCount(&COUNT_4),\n        refs: Mutex::new(vec![Gc::clone(&gc2), Gc::clone(&gc3)]),\n    });\n    gc1.refs.lock().unwrap().push(Gc::clone(&gc4));\n\n    assert_eq!(COUNT_1.load(Ordering::Acquire), 0);\n    assert_eq!(COUNT_2.load(Ordering::Acquire), 0);\n    assert_eq!(COUNT_3.load(Ordering::Acquire), 0);\n    assert_eq!(COUNT_4.load(Ordering::Acquire), 0);\n    drop(gc1);\n    collect();\n    assert_eq!(COUNT_1.load(Ordering::Acquire), 0);\n    assert_eq!(COUNT_2.load(Ordering::Acquire), 0);\n    assert_eq!(COUNT_3.load(Ordering::Acquire), 0);\n    assert_eq!(COUNT_4.load(Ordering::Acquire), 0);\n    drop(gc2);\n    collect();\n    assert_eq!(COUNT_1.load(Ordering::Acquire), 0);\n    assert_eq!(COUNT_2.load(Ordering::Acquire), 0);\n    assert_eq!(COUNT_3.load(Ordering::Acquire), 0);\n    assert_eq!(COUNT_4.load(Ordering::Acquire), 0);\n    drop(gc3);\n    collect();\n    assert_eq!(COUNT_1.load(Ordering::Acquire), 0);\n    assert_eq!(COUNT_2.load(Ordering::Acquire), 0);\n    assert_eq!(COUNT_3.load(Ordering::Acquire), 0);\n    assert_eq!(COUNT_4.load(Ordering::Acquire), 0);\n    drop(gc4);\n    collect();\n    assert_eq!(COUNT_1.load(Ordering::Acquire), 1);\n    assert_eq!(COUNT_2.load(Ordering::Acquire), 1);\n    assert_eq!(COUNT_3.load(Ordering::Acquire), 1);\n    assert_eq!(COUNT_4.load(Ordering::Acquire), 1);\n}\n\n#[test]\n/// Test that we can drop a Gc which points to some allocation with a locked Mutex inside it\n// note: I tried using `ntest::timeout` but for some reason that caused this test to trivially pass.\nfn deadlock() {\n    let gc1 = Gc::new(Mutex::new(()));\n    let gc2 = gc1.clone();\n\n    let guard = gc1.lock();\n    drop(gc2);\n    collect();\n    drop(guard);\n}\n\n#[test]\nfn open_drop() {\n    static COUNT_1: AtomicUsize = AtomicUsize::new(0);\n    let gc1 = Gc::new(MultiRef {\n        refs: Mutex::new(Vec::new()),\n        count: DropCount(&COUNT_1),\n    });\n\n    gc1.refs.lock().unwrap().push(gc1.clone());\n    let guard = gc1.refs.lock();\n    collect();\n    assert_eq!(COUNT_1.load(Ordering::Acquire), 0);\n    drop(guard);\n    drop(gc1);\n    collect();\n\n    assert_eq!(COUNT_1.load(Ordering::Acquire), 1);\n}\n\n#[test]\n#[cfg_attr(miri, ignore = \"miri is too slow\")]\nfn eventually_collect() {\n    static COUNT_1: AtomicUsize = AtomicUsize::new(0);\n    static COUNT_2: AtomicUsize = AtomicUsize::new(0);\n\n    let gc1 = Gc::new(MultiRef {\n        refs: Mutex::new(Vec::new()),\n        count: DropCount(&COUNT_1),\n    });\n    let gc2 = Gc::new(MultiRef {\n        refs: Mutex::new(vec![gc1.clone()]),\n        count: DropCount(&COUNT_2),\n    });\n    gc1.refs.lock().unwrap().push(gc2.clone());\n\n    assert_eq!(COUNT_1.load(Ordering::Acquire), 0);\n    assert_eq!(COUNT_2.load(Ordering::Acquire), 0);\n\n    drop(gc1);\n    drop(gc2);\n\n    for _ in 0..200_000 {\n        let gc = Gc::new(());\n        drop(gc);\n    }\n\n    // after enough time, gc1 and gc2 should have been collected\n    assert_eq!(COUNT_1.load(Ordering::Acquire), 1);\n    assert_eq!(COUNT_2.load(Ordering::Acquire), 1);\n}\n\n#[test]\n#[cfg(feature = \"coerce-unsized\")]\nfn coerce_array() {\n    let gc1: Gc<[u8; 3]> = Gc::new([0, 0, 0]);\n    let gc2: Gc<[u8]> = gc1;\n    assert_eq!(gc2.len(), 3);\n    assert_eq!(\n        std::mem::size_of::<Gc<[u8]>>(),\n        3 * std::mem::size_of::<usize>()\n    );\n}\n\n#[test]\nfn coerce_array_using_macro() {\n    let gc1: Gc<[u8; 3]> = Gc::new([0, 0, 0]);\n    let gc2: Gc<[u8]> = coerce_gc!(gc1);\n    assert_eq!(gc2.len(), 3);\n    assert_eq!(\n        std::mem::size_of::<Gc<[u8]>>(),\n        3 * std::mem::size_of::<usize>()\n    );\n}\n\n#[test]\nfn malicious() {\n    static EVIL: AtomicUsize = AtomicUsize::new(0);\n    static A_DROP_DETECT: AtomicUsize = AtomicUsize::new(0);\n    struct A {\n        x: Gc<X>,\n        y: Gc<Y>,\n    }\n    struct X {\n        a: Mutex<Option<Gc<A>>>,\n        y: NonNull<Y>,\n    }\n    struct Y {\n        a: Mutex<Option<Gc<A>>>,\n    }\n\n    unsafe impl Send for X {}\n\n    unsafe impl<V: Visitor> TraceWith<V> for A {\n        fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n            self.x.accept(visitor)?;\n            self.y.accept(visitor)\n        }\n    }\n\n    unsafe impl<V: Visitor> TraceWith<V> for X {\n        fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n            self.a.accept(visitor)?;\n\n            if EVIL.fetch_add(1, Ordering::Relaxed) == 1 {\n                println!(\"committing evil...\");\n                // simulates a malicious thread\n                let y = unsafe { self.y.as_ref() };\n                *y.a.lock().unwrap() = (*self.a.lock().unwrap()).take();\n            }\n\n            Ok(())\n        }\n    }\n\n    unsafe impl<V: Visitor> TraceWith<V> for Y {\n        fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n            self.a.accept(visitor)\n        }\n    }\n\n    unsafe impl Sync for X {}\n\n    impl Drop for A {\n        fn drop(&mut self) {\n            A_DROP_DETECT.fetch_add(1, Ordering::Relaxed);\n        }\n    }\n\n    let y = Gc::new(Y {\n        a: Mutex::new(None),\n    });\n    let x = Gc::new(X {\n        a: Mutex::new(None),\n        y: NonNull::from(y.as_ref()),\n    });\n    let a = Gc::new(A { x, y });\n    *a.x.a.lock().unwrap() = Some(a.clone());\n\n    collect();\n    drop(a.clone());\n    EVIL.store(1, Ordering::Relaxed);\n    collect();\n    assert_eq!(A_DROP_DETECT.load(Ordering::Relaxed), 0);\n    drop(a);\n    collect();\n    assert_eq!(A_DROP_DETECT.load(Ordering::Relaxed), 1);\n}\n\n#[test]\n#[cfg_attr(miri, ignore = \"miri is too slow\")]\n#[expect(clippy::too_many_lines)]\nfn fuzz() {\n    const N: usize = 20_000;\n    static DROP_DETECTORS: [AtomicUsize; N] = {\n        let mut detectors: [MaybeUninit<AtomicUsize>; N] =\n            unsafe { transmute(MaybeUninit::<[AtomicUsize; N]>::uninit()) };\n\n        let mut i = 0;\n        while i < N {\n            detectors[i] = MaybeUninit::new(AtomicUsize::new(0));\n            i += 1;\n        }\n\n        unsafe { transmute(detectors) }\n    };\n\n    #[derive(Debug)]\n    struct Alloc {\n        refs: Mutex<Vec<Gc<Alloc>>>,\n        id: usize,\n    }\n\n    impl Drop for Alloc {\n        fn drop(&mut self) {\n            DROP_DETECTORS[self.id].fetch_add(1, Ordering::Relaxed);\n        }\n    }\n\n    unsafe impl<V: Visitor> TraceWith<V> for Alloc {\n        fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n            self.refs.accept(visitor)\n        }\n    }\n\n    fn dfs(alloc: &Gc<Alloc>, graph: &mut HashMap<usize, Vec<usize>>) {\n        if let Entry::Vacant(v) = graph.entry(alloc.id) {\n            if alloc.id == 2822 || alloc.id == 2814 {\n                println!(\"{} - {alloc:?}\", alloc.id);\n            }\n            v.insert(Vec::new());\n            alloc.refs.lock().unwrap().iter().for_each(|a| {\n                graph.get_mut(&alloc.id).unwrap().push(a.id);\n                dfs(a, graph);\n            });\n        }\n    }\n\n    fastrand::seed(12345);\n    let mut gcs = (0..50)\n        .map(|i| {\n            Gc::new(Alloc {\n                refs: Mutex::new(Vec::new()),\n                id: i,\n            })\n        })\n        .collect::<Vec<_>>();\n\n    let mut next_detector = 50;\n    for _ in 0..N {\n        if gcs.is_empty() {\n            gcs.push(Gc::new(Alloc {\n                refs: Mutex::new(Vec::new()),\n                id: next_detector,\n            }));\n            next_detector += 1;\n        }\n        match fastrand::u8(0..4) {\n            0 => {\n                println!(\"add gc {next_detector}\");\n                gcs.push(Gc::new(Alloc {\n                    refs: Mutex::new(Vec::new()),\n                    id: next_detector,\n                }));\n                next_detector += 1;\n            }\n            1 => {\n                if gcs.len() > 1 {\n                    let from = fastrand::usize(0..gcs.len());\n                    let to = fastrand::usize(0..gcs.len());\n                    println!(\"add ref {} -> {}\", gcs[from].id, gcs[to].id);\n                    let new_gc = gcs[to].clone();\n                    let mut guard = gcs[from].refs.lock().unwrap();\n                    guard.push(new_gc);\n                }\n            }\n            2 => {\n                let idx = fastrand::usize(0..gcs.len());\n                println!(\"remove gc {}\", gcs[idx].id);\n                gcs.swap_remove(idx);\n            }\n            3 => {\n                let from = fastrand::usize(0..gcs.len());\n                let mut guard = gcs[from].refs.lock().unwrap();\n                if !guard.is_empty() {\n                    let to = fastrand::usize(0..guard.len());\n                    println!(\"drop ref {} -> {}\", gcs[from].id, guard[to].id);\n                    guard.swap_remove(to);\n                }\n            }\n            _ => unreachable!(),\n        }\n    }\n\n    let mut graph = HashMap::new();\n    graph.insert(9999, Vec::new());\n    for alloc in &gcs {\n        graph.get_mut(&9999).unwrap().push(alloc.id);\n        dfs(alloc, &mut graph);\n    }\n    println!(\"{graph:#?}\");\n\n    drop(gcs);\n    collect();\n\n    let mut n_missing = 0;\n    for (id, count) in DROP_DETECTORS[..next_detector].iter().enumerate() {\n        let num = count.load(Ordering::Relaxed);\n        if num != 1 {\n            println!(\"expected 1 for id {id} but got {num}\");\n            n_missing += 1;\n        }\n    }\n    assert_eq!(n_missing, 0);\n}\n\n#[test]\nfn root_canal() {\n    struct A {\n        b: Gc<B>,\n    }\n\n    struct B {\n        a0: Mutex<Option<Gc<A>>>,\n        a1: Mutex<Option<Gc<A>>>,\n        a2: Mutex<Option<Gc<A>>>,\n        a3: Mutex<Option<Gc<A>>>,\n    }\n\n    unsafe impl<V: Visitor> TraceWith<V> for A {\n        fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n            self.b.accept(visitor)\n        }\n    }\n\n    unsafe impl<V: Visitor> TraceWith<V> for B {\n        fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n            let n_prior_visits = B_VISIT_COUNT.fetch_add(1, Ordering::Relaxed);\n            self.a0.accept(visitor)?;\n            self.a1.accept(visitor)?;\n\n            // simulate a malicious thread swapping things around\n            if n_prior_visits == 1 {\n                println!(\"committing evil...\");\n                swap(\n                    &mut *SMUGGLED_POINTERS[0].lock().unwrap(),\n                    &mut *SMUGGLED_POINTERS[1]\n                        .lock()\n                        .unwrap()\n                        .as_ref()\n                        .unwrap()\n                        .b\n                        .a0\n                        .lock()\n                        .unwrap(),\n                );\n                swap(&mut *self.a0.lock().unwrap(), &mut *self.a2.lock().unwrap());\n                swap(\n                    &mut *SMUGGLED_POINTERS[0].lock().unwrap(),\n                    &mut *SMUGGLED_POINTERS[1]\n                        .lock()\n                        .unwrap()\n                        .as_ref()\n                        .unwrap()\n                        .b\n                        .a1\n                        .lock()\n                        .unwrap(),\n                );\n                swap(&mut *self.a1.lock().unwrap(), &mut *self.a3.lock().unwrap());\n            }\n\n            self.a2.accept(visitor)?;\n            self.a3.accept(visitor)?;\n\n            // smuggle out some pointers\n            if n_prior_visits == 0 {\n                println!(\"smuggling...\");\n                *SMUGGLED_POINTERS[0].lock().unwrap() = take(&mut *self.a2.lock().unwrap());\n                *SMUGGLED_POINTERS[1].lock().unwrap() = take(&mut *self.a3.lock().unwrap());\n            }\n\n            Ok(())\n        }\n    }\n\n    impl Drop for B {\n        fn drop(&mut self) {\n            B_DROP_DETECT.fetch_add(1, Ordering::Relaxed);\n        }\n    }\n\n    static SMUGGLED_POINTERS: [Mutex<Option<Gc<A>>>; 2] = [Mutex::new(None), Mutex::new(None)];\n    static B_VISIT_COUNT: AtomicUsize = AtomicUsize::new(0);\n    static B_DROP_DETECT: AtomicUsize = AtomicUsize::new(0);\n\n    let a = Gc::new(A {\n        b: Gc::new(B {\n            a0: Mutex::new(None),\n            a1: Mutex::new(None),\n            a2: Mutex::new(None),\n            a3: Mutex::new(None),\n        }),\n    });\n    *a.b.a0.lock().unwrap() = Some(a.clone());\n    *a.b.a1.lock().unwrap() = Some(a.clone());\n    *a.b.a2.lock().unwrap() = Some(a.clone());\n    *a.b.a3.lock().unwrap() = Some(a.clone());\n\n    drop(a.clone());\n    collect();\n    println!(\"{}\", CURRENT_TAG.load(Ordering::Relaxed));\n\n    assert!(dbg!(SMUGGLED_POINTERS[0].lock().unwrap().as_ref()).is_some());\n    assert!(SMUGGLED_POINTERS[1].lock().unwrap().as_ref().is_some());\n    println!(\"{}\", B_VISIT_COUNT.load(Ordering::Relaxed));\n\n    assert_eq!(B_DROP_DETECT.load(Ordering::Relaxed), 0);\n    drop(a);\n    assert_eq!(B_DROP_DETECT.load(Ordering::Relaxed), 0);\n    collect();\n    println!(\"{}\", CURRENT_TAG.load(Ordering::Relaxed));\n\n    assert_eq!(B_DROP_DETECT.load(Ordering::Relaxed), 0);\n\n    *SMUGGLED_POINTERS[0].lock().unwrap() = None;\n    *SMUGGLED_POINTERS[1].lock().unwrap() = None;\n    collect();\n\n    assert_eq!(B_DROP_DETECT.load(Ordering::Relaxed), 1);\n}\n\n#[test]\n#[should_panic = \"Attempting to dereference Gc to already-deallocated object.This is caused by accessing a Gc during a Drop implementation, likely implying a bug in your code.\"]\nfn escape_dead_pointer() {\n    static ESCAPED: Mutex<Option<Gc<Escape>>> = Mutex::new(None);\n\n    struct Escape {\n        x: u8,\n        ptr: Mutex<Option<Gc<Escape>>>,\n    }\n\n    impl Drop for Escape {\n        fn drop(&mut self) {\n            let mut escaped_guard = ESCAPED.lock().unwrap();\n            if escaped_guard.is_none() {\n                *escaped_guard = self.ptr.lock().unwrap().take();\n            }\n        }\n    }\n\n    unsafe impl<V: Visitor> TraceWith<V> for Escape {\n        fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n            self.ptr.accept(visitor)\n        }\n    }\n\n    let esc = Gc::new(Escape {\n        x: 0,\n        ptr: Mutex::new(None),\n    });\n\n    *(*esc).ptr.lock().unwrap() = Some(esc.clone());\n    drop(esc);\n    collect();\n    println!(\"{}\", ESCAPED.lock().unwrap().as_ref().unwrap().x);\n}\n\n#[test]\nfn from_box() {\n    let gc: Gc<String> = Gc::from(Box::new(String::from(\"hello\")));\n\n    // The `From<Box<T>>` implementation executes a different code path to\n    // construct the `Gc`.\n    //\n    // Here we ensure that the metadata is initialized to a valid state.\n    unsafe {\n        let gc_box = gc.ptr.get().unwrap().as_ref();\n        assert_eq!(gc_box.strong.load(Ordering::SeqCst), 1);\n        assert_eq!(gc_box.weak.load(Ordering::SeqCst), 0);\n    }\n\n    assert_eq!(&*gc, \"hello\");\n}\n\n#[test]\nfn from_slice() {\n    let gc: Gc<[String]> = Gc::from(&[String::from(\"hello\"), String::from(\"world\")][..]);\n\n    // The `From<&[T]>` implementation executes a different code path to\n    // construct the `Gc`.\n    //\n    // Here we ensure that the metadata is initialized to a valid state.\n    unsafe {\n        let gc_box = gc.ptr.get().unwrap().as_ref();\n        assert_eq!(gc_box.strong.load(Ordering::SeqCst), 1);\n        assert_eq!(gc_box.weak.load(Ordering::SeqCst), 0);\n    }\n\n    assert_eq!(&*gc, [\"hello\", \"world\"]);\n}\n\n#[test]\n#[should_panic = \"told you\"]\nfn from_slice_panic() {\n    struct MayPanicOnClone {\n        value: String,\n        panic: bool,\n    }\n\n    impl Clone for MayPanicOnClone {\n        fn clone(&self) -> Self {\n            assert!(!self.panic, \"told you\");\n\n            Self {\n                value: self.value.clone(),\n                panic: self.panic,\n            }\n        }\n    }\n\n    unsafe impl<V: Visitor> TraceWith<V> for MayPanicOnClone {\n        fn accept(&self, _: &mut V) -> Result<(), ()> {\n            Ok(())\n        }\n    }\n\n    let slice: &[MayPanicOnClone] = &[\n        MayPanicOnClone {\n            value: String::from(\"a\"),\n            panic: false,\n        },\n        MayPanicOnClone {\n            value: String::from(\"b\"),\n            panic: false,\n        },\n        MayPanicOnClone {\n            value: String::from(\"c\"),\n            panic: true,\n        },\n    ];\n\n    let _: Gc<[MayPanicOnClone]> = Gc::from(slice);\n}\n\n#[test]\nfn from_vec() {\n    let gc: Gc<[String]> = Gc::from(vec![String::from(\"hello\"), String::from(\"world\")]);\n\n    // The `From<Vec<T>>` implementation executes a different code path to\n    // construct the `Gc`.\n    //\n    // Here we ensure that the metadata is initialized to a valid state.\n    unsafe {\n        let gc_box = gc.ptr.get().unwrap().as_ref();\n        assert_eq!(gc_box.strong.load(Ordering::SeqCst), 1);\n        assert_eq!(gc_box.weak.load(Ordering::SeqCst), 0);\n    }\n\n    assert_eq!(&*gc, [\"hello\", \"world\"]);\n}\n\n#[test]\nfn make_mut() {\n    let mut a = Gc::new(42);\n    let mut b = a.clone();\n    let mut c = b.clone();\n\n    assert_eq!(*Gc::make_mut(&mut a), 42);\n    assert_eq!(*Gc::make_mut(&mut b), 42);\n    assert_eq!(*Gc::make_mut(&mut c), 42);\n\n    *Gc::make_mut(&mut a) += 1;\n    *Gc::make_mut(&mut b) += 2;\n    *Gc::make_mut(&mut c) += 3;\n\n    assert_eq!(*a, 43);\n    assert_eq!(*b, 44);\n    assert_eq!(*c, 45);\n\n    // they should all be unique\n    assert_eq!(Gc::ref_count(&a).get(), 1);\n    assert_eq!(Gc::ref_count(&b).get(), 1);\n    assert_eq!(Gc::ref_count(&c).get(), 1);\n}\n\n#[test]\nfn make_mut_2() {\n    let mut a = Gc::new(42);\n    let b = a.clone();\n    let c = b.clone();\n\n    assert_eq!(*a, 42);\n    assert_eq!(*b, 42);\n    assert_eq!(*c, 42);\n\n    *Gc::make_mut(&mut a) += 1;\n\n    assert_eq!(*a, 43);\n    assert_eq!(*b, 42);\n    assert_eq!(*c, 42);\n\n    // a should be unique\n    // b and c should share their object\n    assert_eq!(Gc::ref_count(&a).get(), 1);\n    assert_eq!(Gc::ref_count(&b).get(), 2);\n    assert_eq!(Gc::ref_count(&c).get(), 2);\n}\n\n#[test]\nfn make_mut_of_object_in_dumpster() {\n    #[derive(Clone)]\n    struct Foo {\n        // just some gc pointer so foo lands in the dumpster\n        something: Gc<i32>,\n    }\n\n    unsafe impl<V: Visitor> TraceWith<V> for Foo {\n        fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n            self.something.accept(visitor)\n        }\n    }\n\n    let mut foo = Gc::new(Foo {\n        something: Gc::new(5),\n    });\n\n    drop(foo.clone());\n\n    // now foo is in the dumpster\n    // and its ref count is one\n    assert_eq!(Gc::ref_count(&foo).get(), 1);\n\n    // we get a mut reference\n    let foo_mut = Gc::make_mut(&mut foo);\n\n    // now we collect garbage while we're also holding onto a mutable reference to foo\n    // if foo is still in the dumpster then the collection will dereference it and cause UB\n    collect();\n\n    // we need to do something with `foo_mut` here so the mutable borrow is actually held\n    // during collection\n    assert_eq!(*foo_mut.something, 5);\n}\n\n#[test]\n#[should_panic = \"panic on visit\"]\n#[cfg_attr(miri, ignore = \"intentionally leaks memory\")]\nfn panic_visit() {\n    #[expect(unused)]\n    struct PanicVisit(Gc<Self>);\n\n    /// We technically can make it part of the contract for `Trace` to reject panicking impls,\n    /// but it is good form to accept these even though they are malformed.\n    unsafe impl<V: Visitor> TraceWith<V> for PanicVisit {\n        fn accept(&self, _: &mut V) -> Result<(), ()> {\n            panic!(\"panic on visit\");\n        }\n    }\n\n    let gc = Gc::new_cyclic(PanicVisit);\n    let _ = gc.clone();\n    drop(gc);\n    collect();\n}\n\n#[test]\n/// Test that creating a `Gc` during a `Drop` implementation will still not leak the `Gc`.\nfn sync_leak_by_creation_in_drop() {\n    static BAR_DROP_COUNT: AtomicUsize = AtomicUsize::new(0);\n    struct Foo(OnceLock<Gc<Self>>);\n    struct Bar(OnceLock<Gc<Self>>);\n\n    unsafe impl<V: Visitor> TraceWith<V> for Foo {\n        fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n            self.0.accept(visitor)\n        }\n    }\n\n    unsafe impl<V: Visitor> TraceWith<V> for Bar {\n        fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n            self.0.accept(visitor)\n        }\n    }\n\n    impl Drop for Foo {\n        fn drop(&mut self) {\n            let gcbar = Gc::new(Bar(OnceLock::new()));\n            let _ = gcbar.0.set(gcbar.clone());\n            drop(gcbar);\n            crate::sync::collect::deliver_dumpster(); // needed to prevent allocation from being\n                                                      // lost in other thread\n        }\n    }\n\n    impl Drop for Bar {\n        fn drop(&mut self) {\n            BAR_DROP_COUNT.fetch_add(1, Ordering::Relaxed);\n        }\n    }\n\n    let foo = Gc::new(Foo(OnceLock::new()));\n    let _ = foo.0.set(foo.clone());\n    drop(foo);\n\n    collect(); // causes Bar to be created and then leaked\n    collect(); // cleans up Bar (eventually)\n\n    assert!(super::collect::DUMPSTER.with(|d| d.contents.borrow().is_empty()));\n\n    assert_eq!(BAR_DROP_COUNT.load(Ordering::Relaxed), 1);\n}\n\n#[test]\nfn custom_trait_object() {\n    trait MyTrait: Trace + Send + Sync {}\n    impl<T: Trace + Send + Sync> MyTrait for T {}\n\n    let gc = Gc::new(5i32);\n    let gc: Gc<dyn MyTrait> = coerce_gc!(gc);\n    _ = gc;\n}\n\n#[test]\nfn new_cyclic_simple() {\n    struct Cycle(Gc<Self>);\n    unsafe impl<V: Visitor> TraceWith<V> for Cycle {\n        fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n            self.0.accept(visitor)\n        }\n    }\n    let gc = Gc::new_cyclic(Cycle);\n    assert_eq!(Gc::ref_count(&gc).get(), 2);\n    drop(gc);\n}\n\n#[test]\n#[should_panic = \"told you\"]\nfn panic_new_cyclic() {\n    let _ = Gc::<()>::new_cyclic(|_| panic!(\"told you\"));\n}\n\n#[test]\nfn gc_from_iter() {\n    let _gc = (0..100).collect::<Gc<[_]>>();\n}\n\n#[test]\nfn self_referential_from_iter() {\n    struct Ab {\n        a: Gc<Self>,\n        b: Gc<Self>,\n    }\n\n    unsafe impl<V: Visitor> TraceWith<V> for Ab {\n        fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n            self.a.accept(visitor)?;\n            self.b.accept(visitor)?;\n\n            Ok(())\n        }\n    }\n\n    let mut gcs = Vec::<Gc<Ab>>::new();\n    gcs.push(Gc::new_cyclic(|a: Gc<Ab>| Ab { a: a.clone(), b: a }));\n    for _ in 0..10 {\n        let b = gcs.last().unwrap().clone();\n        gcs.push(Gc::new_cyclic(|a: Gc<Ab>| Ab { a, b }));\n    }\n    let _big_gc = gcs.into_iter().collect::<Gc<[_]>>();\n}\n"
  },
  {
    "path": "dumpster/src/unsync/collect.rs",
    "content": "/*\n    dumpster, a cycle-tracking garbage collector for Rust.    Copyright (C) 2023 Clayton Ramsey.\n\n    This Source Code Form is subject to the terms of the Mozilla Public\n    License, v. 2.0. If a copy of the MPL was not distributed with this\n    file, You can obtain one at http://mozilla.org/MPL/2.0/.\n*/\n\n//! Implementations of the single-threaded garbage-collection logic.\n\nuse std::{\n    alloc::{dealloc, Layout},\n    cell::{Cell, RefCell},\n    collections::hash_map::Entry,\n    mem::take,\n    num::NonZeroUsize,\n    ptr::{drop_in_place, NonNull},\n};\n\nuse foldhash::{HashMap, HashMapExt, HashSet, HashSetExt};\n\nuse crate::{\n    ptr::Erased,\n    unsync::{default_collect_condition, CollectInfo, Gc},\n    Trace, Visitor,\n};\n\nuse super::{CollectCondition, GcBox};\n\nthread_local! {\n    /// Whether the current thread is running a cleanup process.\n    static COLLECTING: Cell<bool> = const { Cell::new(false) };\n    /// The global collection of allocation information for this thread.\n    pub(super) static DUMPSTER: Dumpster = Dumpster {\n        to_collect: RefCell::new(HashMap::new()),\n        n_ref_drops: Cell::new(0),\n        n_refs_living: Cell::new(0),\n        collect_condition: Cell::new(default_collect_condition),\n    };\n}\n\n/// A dumpster is a collection of all the garbage that may or may not need to be cleaned up.\n/// It also contains information relevant to when a cleanup should be triggered.\npub(super) struct Dumpster {\n    /// A map from allocation IDs for allocations which may need to be collected to pointers to\n    /// their allocations.\n    to_collect: RefCell<HashMap<AllocationId, Cleanup>>,\n    /// The number of times a reference has been dropped since the last collection was triggered.\n    pub n_ref_drops: Cell<usize>,\n    /// The number of references that currently exist in the entire heap and stack.\n    pub n_refs_living: Cell<usize>,\n    /// The function for determining whether a collection should be run.\n    pub collect_condition: Cell<CollectCondition>,\n}\n\n#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]\n/// A unique identifier for an allocated garbage-collected block.\n///\n/// It contains a pointer to the reference count of the allocation.\nstruct AllocationId(pub NonNull<Cell<NonZeroUsize>>);\n\nimpl<T> From<NonNull<GcBox<T>>> for AllocationId\nwhere\n    T: Trace + ?Sized,\n{\n    /// Get an allocation ID from a pointer to an allocation.\n    fn from(value: NonNull<GcBox<T>>) -> Self {\n        AllocationId(value.cast())\n    }\n}\n\n#[derive(Debug)]\n/// The necessary information required to collect some garbage-collected data.\n/// This data is stored in a map from allocation IDs to the necessary cleanup operation.\nstruct Cleanup {\n    /// The function which is called to build the reference graph and find all allocations\n    /// reachable from this allocation.\n    dfs_fn: unsafe fn(Erased, &mut Dfs),\n    /// The function which is called to mark descendants of this allocation as reachable.\n    mark_fn: unsafe fn(Erased, &mut Mark),\n    /// A function used for dropping the allocation.\n    drop_fn: unsafe fn(Erased, &mut DropAlloc<'_>),\n    /// An erased pointer to the allocation.\n    ptr: Erased,\n}\n\n/// Creates a function that applies a visitor to some erased pointer.\n///\n/// # Safety\n///\n/// `T` must be the same type that `ptr` was created with via [`Erased::new`].\nmacro_rules! apply_visitor {\n    () => {\n        |ptr, visitor| unsafe {\n            _ = ptr.specify::<GcBox<T>>().as_ref().value.accept(visitor);\n        }\n    };\n}\n\nimpl Cleanup {\n    /// Construct a new cleanup for an allocation.\n    fn new<T: Trace + ?Sized>(box_ptr: NonNull<GcBox<T>>) -> Cleanup {\n        Cleanup {\n            dfs_fn: apply_visitor!(),\n            mark_fn: apply_visitor!(),\n            drop_fn: drop_assist::<T>,\n            ptr: Erased::new(box_ptr),\n        }\n    }\n}\n\nimpl Dumpster {\n    /// Collect all unreachable allocations that this dumpster is responsible for.\n    pub fn collect_all(&self) {\n        if COLLECTING.get() {\n            return; // Do not double-collect.\n        }\n        self.n_ref_drops.set(0);\n\n        unsafe {\n            let mut dfs = Dfs {\n                visited: HashSet::with_capacity(self.to_collect.borrow().len()),\n                ref_graph: HashMap::with_capacity(self.to_collect.borrow().len()),\n            };\n\n            for (k, v) in &*self.to_collect.borrow() {\n                if dfs.visited.insert(*k) {\n                    (v.dfs_fn)(v.ptr, &mut dfs);\n                }\n            }\n\n            let mut mark = Mark {\n                visited: HashSet::with_capacity(dfs.visited.len()),\n            };\n            for (id, reachability) in dfs\n                .ref_graph\n                .iter()\n                .filter(|(_, reachability)| reachability.n_unaccounted != 0)\n            {\n                mark.visited.insert(*id);\n                (reachability.mark_fn)(reachability.ptr, &mut mark);\n            }\n\n            // any allocations which we didn't find must also be roots\n            for (id, cleanup) in self\n                .to_collect\n                .borrow()\n                .iter()\n                .filter(|(id, _)| !dfs.ref_graph.contains_key(id))\n            {\n                mark.visited.insert(*id);\n                (cleanup.mark_fn)(cleanup.ptr, &mut mark);\n            }\n\n            dfs.visited.clear();\n            let mut decrementer = DropAlloc {\n                visited: dfs.visited,\n                reachable: &mark.visited,\n            };\n\n            COLLECTING.set(true);\n            // Do not hold mutable reference, as it is possible for a `Gc` to be marked dirty during\n            // collection by dropping.\n            let mut collectees = take(&mut *self.to_collect.borrow_mut());\n            for cleanup in collectees\n                .drain()\n                .filter_map(|(id, cleanup)| (!mark.visited.contains(&id)).then_some(cleanup))\n            {\n                (cleanup.drop_fn)(cleanup.ptr, &mut decrementer);\n            }\n            COLLECTING.set(false);\n            assert!(collectees.is_empty());\n            let mut new_to_collect = self.to_collect.borrow_mut();\n            if new_to_collect.is_empty() {\n                // Reuse allocation from `collectees`\n                *new_to_collect = collectees;\n            }\n        }\n    }\n\n    /// Mark an allocation as \"dirty,\" implying that it may need to be swept through later to find\n    /// out if it has any references pointing to it.\n    pub fn mark_dirty<T: Trace + ?Sized>(&self, box_ptr: NonNull<GcBox<T>>) {\n        self.to_collect\n            .borrow_mut()\n            .entry(AllocationId::from(box_ptr))\n            .or_insert_with(|| Cleanup::new(box_ptr));\n    }\n\n    /// Mark an allocation as \"cleaned,\" implying that the allocation is about to be destroyed and\n    /// therefore should not be cleaned up later.\n    pub fn mark_cleaned<T: Trace + ?Sized>(&self, box_ptr: NonNull<GcBox<T>>) {\n        self.to_collect\n            .borrow_mut()\n            .remove(&AllocationId::from(box_ptr));\n    }\n\n    /// Notify the dumpster that a garbage-collected pointer has been dropped.\n    ///\n    /// This may trigger a cleanup of the heap, but is guaranteed to be amortized to _O(1)_.\n    pub fn notify_dropped_gc(&self) {\n        self.n_ref_drops.set(self.n_ref_drops.get() + 1);\n        let old_refs_living = self.n_refs_living.get();\n        assert_ne!(\n            old_refs_living, 0,\n            \"underflow on unsync::Gc number of living Gcs\"\n        );\n        self.n_refs_living.set(old_refs_living - 1);\n\n        // check if it's been a long time since the last time we collected all\n        // the garbage.\n        // if so, go and collect it all again (amortized O(1))\n        if (self.collect_condition.get())(&CollectInfo { _private: () }) {\n            self.collect_all();\n        }\n    }\n\n    /// Notify the dumpster that a new [`Gc`] has been created.\n    pub fn notify_created_gc(&self) {\n        self.n_refs_living.set(self.n_refs_living.get() + 1);\n    }\n}\n\nimpl Drop for Dumpster {\n    fn drop(&mut self) {\n        // cleanup any leftover allocations\n        self.collect_all();\n    }\n}\n\n/// The data required to construct the graph of reachable allocations.\npub(super) struct Dfs {\n    /// The set of allocations which have already been visited.\n    visited: HashSet<AllocationId>,\n    /// A map from allocation identifiers to information about their reachability.\n    ref_graph: HashMap<AllocationId, Reachability>,\n}\n\n#[derive(Debug)]\n/// Information about the reachability of a structure.\nstruct Reachability {\n    /// The number of unaccounted-for references to this allocation.\n    /// If this number is 0, the reference is not a root.\n    n_unaccounted: usize,\n    /// An erased pointer to the allocation under concern.\n    ptr: Erased,\n    /// A function used to mark descendants of this allocation as accessible.\n    mark_fn: unsafe fn(Erased, &mut Mark),\n}\n\nimpl Visitor for Dfs {\n    fn visit_sync<T>(&mut self, _: &crate::sync::Gc<T>)\n    where\n        T: Trace + Send + Sync + ?Sized,\n    {\n        // because `Gc` is `!Sync`, we know we won't find a `Gc` this way and can return\n        // immediately.\n    }\n\n    fn visit_unsync<T>(&mut self, gc: &Gc<T>)\n    where\n        T: Trace + ?Sized,\n    {\n        if Gc::is_dead(gc) {\n            return;\n        }\n        let ptr = gc.ptr.get().unwrap();\n        let next_id = AllocationId::from(ptr);\n        match self.ref_graph.entry(next_id) {\n            Entry::Occupied(ref mut o) => {\n                o.get_mut().n_unaccounted -= 1;\n            }\n            Entry::Vacant(v) => {\n                v.insert(Reachability {\n                    n_unaccounted: unsafe { next_id.0.as_ref().get().get() - 1 },\n                    ptr: Erased::new(ptr),\n                    mark_fn: apply_visitor!(),\n                });\n            }\n        }\n        if self.visited.insert(next_id) {\n            let _ = unsafe { ptr.as_ref() }.value.accept(self);\n        }\n    }\n}\n\n/// A mark traversal, which marks allocations as reachable.\npub(super) struct Mark {\n    /// The set of allocations which have been marked as reachable.\n    visited: HashSet<AllocationId>,\n}\n\nimpl Visitor for Mark {\n    fn visit_sync<T>(&mut self, _: &crate::sync::Gc<T>)\n    where\n        T: Trace + Send + Sync + ?Sized,\n    {\n        // because `Gc` is `!Sync`, we know we won't find a `Gc` this way and can return\n        // immediately.\n    }\n\n    fn visit_unsync<T>(&mut self, gc: &Gc<T>)\n    where\n        T: Trace + ?Sized,\n    {\n        if Gc::is_dead(gc) {\n            return;\n        }\n        let ptr = gc.ptr.get().unwrap();\n        if self.visited.insert(AllocationId::from(ptr)) {\n            let _ = unsafe { ptr.as_ref().value.accept(self) };\n        }\n    }\n}\n\n/// A visitor for dropping allocations.\npub(super) struct DropAlloc<'a> {\n    /// The set of unreachable allocations we've already visited.\n    visited: HashSet<AllocationId>,\n    /// The set of unreachable allocations.\n    reachable: &'a HashSet<AllocationId>,\n}\n\nimpl Visitor for DropAlloc<'_> {\n    fn visit_sync<T>(&mut self, _: &crate::sync::Gc<T>)\n    where\n        T: Trace + Send + Sync + ?Sized,\n    {\n        // do nothing\n    }\n\n    fn visit_unsync<T>(&mut self, gc: &Gc<T>)\n    where\n        T: Trace + ?Sized,\n    {\n        if Gc::is_dead(gc) {\n            return;\n        }\n        let ptr = gc.ptr.get().unwrap();\n        let id = AllocationId::from(ptr);\n        gc.kill();\n        if self.reachable.contains(&id) {\n            unsafe {\n                let cell_ref = &ptr.as_ref().ref_count;\n                cell_ref.set(NonZeroUsize::new(cell_ref.get().get() - 1).expect(\n                    \"reachable allocation cannot be rendered unreachable by deleting lost alloc\",\n                ));\n            }\n            return;\n        }\n\n        if self.visited.insert(id) {\n            unsafe {\n                ptr.as_ref().value.accept(self).unwrap();\n                let layout = Layout::for_value(ptr.as_ref());\n                drop_in_place(ptr.as_ptr());\n                dealloc(ptr.as_ptr().cast(), layout);\n            }\n        }\n    }\n}\n\n/// Decrement the outbound reference counts for any reachable allocations which this allocation can\n/// find.\n/// Also, drop the allocation when done.\nunsafe fn drop_assist<T: Trace + ?Sized>(ptr: Erased, visitor: &mut DropAlloc<'_>) {\n    let mut spec = ptr.specify::<GcBox<T>>();\n    if visitor.visited.insert(AllocationId::from(spec)) {\n        spec.as_ref().value.accept(visitor).unwrap();\n\n        let mut_spec = spec.as_mut();\n        let layout = Layout::for_value(mut_spec);\n        drop_in_place(mut_spec);\n        dealloc(std::ptr::from_mut::<GcBox<T>>(mut_spec).cast(), layout);\n    }\n}\n"
  },
  {
    "path": "dumpster/src/unsync/mod.rs",
    "content": "/*\n    dumpster, a cycle-tracking garbage collector for Rust.    Copyright (C) 2023 Clayton Ramsey.\n\n    This Source Code Form is subject to the terms of the Mozilla Public\n    License, v. 2.0. If a copy of the MPL was not distributed with this\n    file, You can obtain one at http://mozilla.org/MPL/2.0/.\n*/\n\n//! Thread-local garbage collection.\n//!\n//! Most users of this library will want to direct their attention to [`Gc`].\n//! If you want to tune the garbage collector's cleanup frequency, take a look at\n//! [`set_collect_condition`].\n//!\n//! # Examples\n//!\n//! ```\n//! use dumpster::{unsync::Gc, Trace};\n//! use std::cell::RefCell;\n//!\n//! #[derive(Trace)]\n//! struct Foo {\n//!     refs: RefCell<Vec<Gc<Self>>>,\n//! }\n//!\n//! let foo = Gc::new(Foo {\n//!     refs: RefCell::new(Vec::new()),\n//! });\n//!\n//! // If you had used `Rc`, this would be a memory leak.\n//! // `Gc` can collect it, though!\n//! foo.refs.borrow_mut().push(foo.clone());\n//! ```\n\nuse crate::{\n    contains_gcs, panic_deref_of_collected_object, ptr::Nullable, Trace, TraceWith, Visitor,\n};\nuse std::{\n    alloc::{dealloc, handle_alloc_error, Layout},\n    any::TypeId,\n    borrow::{Borrow, Cow},\n    cell::Cell,\n    fmt::Display,\n    mem::{self, ManuallyDrop, MaybeUninit},\n    num::NonZeroUsize,\n    ops::Deref,\n    ptr::{self, addr_of, addr_of_mut, drop_in_place, NonNull},\n    slice,\n};\n\nuse self::collect::{Dfs, DropAlloc, Dumpster, Mark, DUMPSTER};\n\nmod collect;\n#[cfg(test)]\nmod tests;\n\n/// Allows tracing with all unsync visitors.\n#[expect(private_bounds)]\npub(crate) trait TraceUnsync:\n    TraceWith<Dfs> + TraceWith<Mark> + for<'a> TraceWith<DropAlloc<'a>> + TraceWith<Rehydrate>\n{\n}\n\nimpl<T> TraceUnsync for T where\n    T: ?Sized\n        + TraceWith<Dfs>\n        + TraceWith<Mark>\n        + for<'a> TraceWith<DropAlloc<'a>>\n        + TraceWith<Rehydrate>\n{\n}\n\n#[derive(Debug)]\n/// A garbage-collected pointer.\n///\n/// This garbage-collected pointer may be used for data which is not safe to share across threads\n/// (such as a [`std::cell::RefCell`]).\n/// It can also be used for variably sized data.\n///\n/// # Examples\n///\n/// ```\n/// use dumpster::unsync::Gc;\n///\n/// let x: Gc<u8> = Gc::new(3);\n///\n/// println!(\"{}\", *x); // prints '3'\n///                     // x is then freed automatically!\n/// ```\n///\n/// # Interaction with `Drop`\n///\n/// While collecting cycles, it's possible for a `Gc` to exist that points to some deallocated\n/// object.\n/// To prevent undefined behavior, these `Gc`s are marked as dead during collection and rendered\n/// inaccessible.\n/// Dereferencing or cloning a `Gc` during the `Drop` implementation of a `Trace` type could\n/// result in the program panicking to keep the program from accessing memory after freeing it.\n/// If you're accessing a `Gc` during a `Drop` implementation, make sure to use the fallible\n/// operations [`Gc::try_deref`] and [`Gc::try_clone`].\npub struct Gc<T: Trace + ?Sized + 'static> {\n    /// A pointer to the heap allocation containing the data under concern.\n    /// The pointee box should never be mutated.\n    ///\n    /// If `ptr` is `None`, then this is a dead `Gc`, meaning that the allocation it points to has\n    /// been dropped.\n    /// This can only happen observably if this `Gc` is accessed during the [`Drop`] implementation\n    /// of a [`Trace`] type.\n    ptr: Cell<Nullable<GcBox<T>>>,\n}\n\n/// Collect all existing unreachable allocations.\n///\n/// This operation is most useful for making sure that the `Drop` implementation for some data has\n/// been called before moving on (such as for a file handle or mutex guard), because the garbage\n/// collector is not eager under normal conditions.\n/// This only collects the allocations local to the caller's thread.\n///\n/// # Examples\n///\n/// ```\n/// # fn main() -> Result<(), Box<dyn std::error::Error + 'static>> {\n/// use dumpster::unsync::{collect, Gc};\n/// use std::sync::Mutex;\n///\n/// static MY_MUTEX: Mutex<()> = Mutex::new(());\n///\n/// let guard_gc = Gc::new(MY_MUTEX.lock()?);\n/// drop(guard_gc);\n/// // We're not certain that the handle that was contained in `guard_gc` has been dropped, so we\n/// // should force a collection to make sure.\n/// collect();\n///\n/// // We know this won't cause a deadlock because we made sure to run a collection.\n/// let _x = MY_MUTEX.lock()?;\n/// # Ok(())\n/// # }\n/// ```\npub fn collect() {\n    _ = DUMPSTER.try_with(Dumpster::collect_all);\n}\n\n/// Information passed to a [`CollectCondition`] used to determine whether the garbage collector\n/// should start collecting.\npub struct CollectInfo {\n    /// Dummy value so this is a private structure.\n    _private: (),\n}\n\n/// A function which determines whether the garbage collector should start collecting.\n/// This function primarily exists so that it can be used with [`set_collect_condition`].\n///\n/// # Examples\n///\n/// ```rust\n/// use dumpster::unsync::{set_collect_condition, CollectInfo};\n///\n/// fn always_collect(_: &CollectInfo) -> bool {\n///     true\n/// }\n///\n/// set_collect_condition(always_collect);\n/// ```\npub type CollectCondition = fn(&CollectInfo) -> bool;\n\n#[must_use]\n/// The default collection condition used by the garbage collector.\n///\n/// There are no guarantees about what this function returns, other than that it will return `true`\n/// with sufficient frequency to ensure that all `Gc` operations are amortized _O(1)_ in runtime.\n///\n/// This function isn't really meant to be called by users, but rather it's supposed to be handed\n/// off to [`set_collect_condition`] to return to the default operating mode of the library.\n///\n/// This collection condition applies locally, i.e. only to this thread.\n/// If you want it to apply globally, you'll have to update it every time you spawn a thread.\n///\n/// # Examples\n///\n/// ```rust\n/// use dumpster::unsync::{default_collect_condition, set_collect_condition};\n///\n/// set_collect_condition(default_collect_condition);\n/// ```\npub fn default_collect_condition(info: &CollectInfo) -> bool {\n    info.n_gcs_dropped_since_last_collect() > info.n_gcs_existing()\n}\n\n/// Set the function which determines whether the garbage collector should be run.\n///\n/// `f` will be periodically called by the garbage collector to determine whether it should perform\n/// a full cleanup of the heap.\n/// When `f` returns true, a cleanup will begin.\n///\n/// # Examples\n///\n/// ```\n/// use dumpster::unsync::{set_collect_condition, CollectInfo};\n///\n/// /// This function will make sure a GC cleanup never happens unless directly activated.\n/// fn never_collect(_: &CollectInfo) -> bool {\n///     false\n/// }\n///\n/// set_collect_condition(never_collect);\n/// ```\npub fn set_collect_condition(f: CollectCondition) {\n    _ = DUMPSTER.try_with(|d| d.collect_condition.set(f));\n}\n\n#[repr(C)]\n// This is only public to make the `unsync_coerce_gc` macro work.\n#[doc(hidden)]\n/// The underlying heap allocation for a [`Gc`].\npub struct GcBox<T: Trace + ?Sized> {\n    /// The number of extant references to this garbage-collected data.\n    ref_count: Cell<NonZeroUsize>,\n    /// The stored value inside this garbage-collected box.\n    value: T,\n}\n\nimpl<T: Trace + ?Sized> Gc<T> {\n    /// Construct a new garbage-collected allocation, with `value` as its value.\n    ///\n    /// # Examples\n    ///\n    /// ```\n    /// use dumpster::unsync::Gc;\n    ///\n    /// let gc = Gc::new(0);\n    /// ```\n    pub fn new(value: T) -> Gc<T>\n    where\n        T: Sized,\n    {\n        _ = DUMPSTER.try_with(Dumpster::notify_created_gc);\n        Gc {\n            ptr: Cell::new(Nullable::new(NonNull::from(Box::leak(Box::new(GcBox {\n                ref_count: Cell::new(NonZeroUsize::MIN),\n                value,\n            }))))),\n        }\n    }\n\n    /// Construct a self-referencing `Gc`.\n    ///\n    /// `new_cyclic` first allocates memory for `T`, then constructs a dead `Gc` pointing to the\n    /// allocation. The dead `Gc` is then passed to `data_fn` to construct a value of `T`, which\n    /// is stored in the allocation. Finally, `new_cyclic` will update the dead self-referential\n    /// `Gc`s and rehydrate them to produce the final value.\n    ///\n    /// # Panics\n    ///\n    /// If `data_fn` panics, the panic is propagated to the caller.\n    /// The allocation is cleaned up normally.\n    ///\n    /// Additionally, if, when attempting to rehydrate the `Gc` members of `F`, the visitor fails to\n    /// reach a `Gc`, this function will panic and reserve the allocation to be cleaned up\n    /// later.\n    ///\n    /// # Notes on safety\n    ///\n    /// Incorrect implementations of `data_fn` may have unusual or strange results.\n    /// Although `dumpster` guarantees that it will be safe, and will do its best to ensure correct\n    /// results, it is generally unwise to allow dead `Gc`s to exist for long.\n    /// If you implement `data_fn` wrong, this may cause panics later on inside of the collection\n    /// process.\n    ///\n    /// # Examples\n    ///\n    /// ```\n    /// use dumpster::{unsync::Gc, Trace};\n    ///\n    /// #[derive(Trace)]\n    /// struct Cycle {\n    ///     this: Gc<Self>,\n    /// }\n    ///\n    /// let gc = Gc::new_cyclic(|this| Cycle { this });\n    /// assert!(Gc::ptr_eq(&gc, &gc.this));\n    /// ```\n    pub fn new_cyclic<F: FnOnce(Gc<T>) -> T>(data_fn: F) -> Self\n    where\n        T: Sized,\n    {\n        /// A struct containing an uninitialized value of `T`.\n        /// May only be used inside `new_cyclic`.\n        #[repr(transparent)]\n        struct Uninitialized<T>(MaybeUninit<T>);\n\n        unsafe impl<V: Visitor, T> TraceWith<V> for Uninitialized<T> {\n            fn accept(&self, _: &mut V) -> Result<(), ()> {\n                Ok(())\n            }\n        }\n\n        /// Data structure for cleaning up the allocation in case we panic along the way.\n        struct CleanUp<T: Trace + 'static> {\n            /// Is `true` if the [`GcBox::value`] is initialized.\n            initialized: bool,\n            /// Pointer to the `GcBox` with a maybe uninitialized value.\n            ptr: NonNull<GcBox<T>>,\n        }\n\n        impl<T: Trace + 'static> Drop for CleanUp<T> {\n            fn drop(&mut self) {\n                if self.initialized {\n                    // push this `Gc` into the destruction queue\n                    _ = DUMPSTER.try_with(|d| d.mark_dirty(self.ptr));\n                } else {\n                    // deallocate\n                    unsafe {\n                        dealloc(\n                            self.ptr.as_ptr().cast::<u8>(),\n                            Layout::for_value(self.ptr.as_ref()),\n                        );\n                    }\n                }\n            }\n        }\n\n        // make an uninitialized allocation\n        _ = DUMPSTER.try_with(Dumpster::notify_created_gc);\n        let mut gcbox = NonNull::from(Box::leak(Box::new(GcBox {\n            ref_count: Cell::new(NonZeroUsize::MIN),\n            value: Uninitialized(MaybeUninit::<T>::uninit()),\n        })));\n        let mut cleanup = CleanUp {\n            ptr: gcbox,\n            initialized: false,\n        };\n\n        // nilgc is a dead Gc\n        let nilgc = Gc {\n            ptr: Cell::new(Nullable::new(gcbox.cast::<GcBox<T>>()).as_null()),\n        };\n        assert!(Gc::is_dead(&nilgc));\n        unsafe {\n            // SAFETY: `gcbox` is a valid pointer to an uninitialized datum that we have allocated.\n            gcbox.as_mut().value = Uninitialized(MaybeUninit::new(data_fn(nilgc)));\n        }\n        cleanup.initialized = true;\n\n        let gcbox = gcbox.cast::<GcBox<T>>();\n        let res = unsafe {\n            // SAFETY: the above unsafe block correctly constructed the Uninitialized value, so it\n            // is safe to cast `gcbox` and then construct a reference.\n            gcbox.as_ref().value.accept(&mut Rehydrate {\n                ptr: Nullable::new(gcbox.cast()),\n                type_id: TypeId::of::<T>(),\n            })\n        };\n\n        assert!(\n            res.is_ok(),\n            \"visitor must be able to access all Gc fields of structure when rehydrating dead Gcs\"\n        );\n        let gc = Gc {\n            ptr: Cell::new(Nullable::new(gcbox)),\n        };\n\n        let _ = ManuallyDrop::new(cleanup);\n        gc\n    }\n\n    /// Attempt to dereference this `Gc`.\n    ///\n    /// This function will return `None` if `self` is a \"dead\" `Gc`, which points to an\n    /// already-deallocated object.\n    /// This can only occur if a `Gc` is accessed during the `Drop` implementation of a\n    /// [`Trace`] object.\n    ///\n    /// For a version which panics instead of returning `None`, consider using [`Deref`].\n    ///\n    /// # Examples\n    ///\n    /// For a still-living `Gc`, this always returns `Some`.\n    ///\n    /// ```\n    /// use dumpster::unsync::Gc;\n    ///\n    /// let gc1 = Gc::new(0);\n    /// assert!(Gc::try_deref(&gc1).is_some());\n    /// ```\n    ///\n    /// The only way to get a `Gc` which fails on `try_clone` is by accessing a `Gc` during its\n    /// `Drop` implementation.\n    ///\n    /// ```\n    /// use dumpster::{unsync::Gc, Trace};\n    ///\n    /// #[derive(Trace)]\n    /// struct Cycle(Gc<Self>);\n    ///\n    /// impl Drop for Cycle {\n    ///     fn drop(&mut self) {\n    ///         let maybe_ref = Gc::try_deref(&self.0);\n    ///         assert!(maybe_ref.is_none());\n    ///     }\n    /// }\n    ///\n    /// let gc1 = Gc::new_cyclic(|this| Cycle(this));\n    /// # drop(gc1);\n    /// # dumpster::unsync::collect();\n    /// ```\n    pub fn try_deref(gc: &Gc<T>) -> Option<&T> {\n        (!gc.ptr.get().is_null()).then(|| &**gc)\n    }\n\n    /// Attempt to clone this `Gc`.\n    ///\n    /// This function will return `None` if `self` is a \"dead\" `Gc`, which points to an\n    /// already-deallocated object.\n    /// This can only occur if a `Gc` is accessed during the `Drop` implementation of a\n    /// [`Trace`] object.\n    ///\n    /// For a version which panics instead of returning `None`, consider using [`Clone`].\n    ///\n    /// # Examples\n    ///\n    /// For a still-living `Gc`, this always returns `Some`.\n    ///\n    /// ```\n    /// use dumpster::unsync::Gc;\n    ///\n    /// let gc1 = Gc::new(0);\n    /// let gc2 = Gc::try_clone(&gc1).unwrap();\n    /// ```\n    ///\n    /// The only way to get a `Gc` which fails on `try_clone` is by accessing a `Gc` during its\n    /// `Drop` implementation.\n    ///\n    /// ```\n    /// use dumpster::{unsync::Gc, Trace};\n    ///\n    /// #[derive(Trace)]\n    /// struct Cycle(Gc<Self>);\n    ///\n    /// impl Drop for Cycle {\n    ///     fn drop(&mut self) {\n    ///         let cloned = Gc::try_clone(&self.0);\n    ///         assert!(cloned.is_none());\n    ///     }\n    /// }\n    ///\n    /// let gc1 = Gc::new_cyclic(|this| Cycle(this));\n    /// # drop(gc1);\n    /// # dumpster::unsync::collect();\n    /// ```\n    pub fn try_clone(gc: &Gc<T>) -> Option<Gc<T>> {\n        (!gc.ptr.get().is_null()).then(|| gc.clone())\n    }\n\n    /// Provides a raw pointer to the data.\n    ///\n    /// Panics if `self` is a \"dead\" `Gc`,\n    /// which points to an already-deallocated object.\n    /// This can only occur if a `Gc` is accessed during the `Drop` implementation of a\n    /// [`Trace`] object.\n    ///\n    /// # Examples\n    ///\n    /// ```\n    /// use dumpster::unsync::Gc;\n    /// let x = Gc::new(\"hello\".to_owned());\n    /// let y = Gc::clone(&x);\n    /// let x_ptr = Gc::as_ptr(&x);\n    /// assert_eq!(x_ptr, Gc::as_ptr(&x));\n    /// assert_eq!(unsafe { &*x_ptr }, \"hello\");\n    /// ```\n    pub fn as_ptr(gc: &Gc<T>) -> *const T {\n        let ptr = NonNull::as_ptr(gc.ptr.get().unwrap());\n        unsafe { addr_of_mut!((*ptr).value) }\n    }\n\n    /// Determine whether two `Gc`s are equivalent by reference.\n    /// Returns `true` if both `this` and `other` point to the same value, in the same style as\n    /// [`std::ptr::eq`].\n    ///\n    /// # Examples\n    ///\n    /// ```\n    /// use dumpster::unsync::Gc;\n    ///\n    /// let gc1 = Gc::new(0);\n    /// let gc2 = Gc::clone(&gc1); // points to same spot as `gc1`\n    /// let gc3 = Gc::new(0); // same value, but points to a different object than `gc1`\n    ///\n    /// assert!(Gc::ptr_eq(&gc1, &gc2));\n    /// assert!(!Gc::ptr_eq(&gc1, &gc3));\n    /// ```\n    pub fn ptr_eq(this: &Gc<T>, other: &Gc<T>) -> bool {\n        this.ptr.get().as_option() == other.ptr.get().as_option()\n    }\n\n    /// Get the number of references to the value pointed to by this `Gc`.\n    ///\n    /// This does not include internal references generated by the garbage collector.\n    ///\n    /// # Panics\n    ///\n    /// This function may panic if the `Gc` whose reference count we are loading is \"dead\" (i.e.\n    /// generated through a `Drop` implementation). For further reference, take a look at\n    /// [`Gc::is_dead`].\n    ///\n    /// # Examples\n    ///\n    /// ```\n    /// use dumpster::unsync::Gc;\n    ///\n    /// let gc = Gc::new(());\n    /// assert_eq!(Gc::ref_count(&gc).get(), 1);\n    /// let gc2 = gc.clone();\n    /// assert_eq!(Gc::ref_count(&gc).get(), 2);\n    /// drop(gc);\n    /// drop(gc2);\n    /// ```\n    pub fn ref_count(gc: &Self) -> NonZeroUsize {\n        let box_ptr = gc.ptr.get().expect(\n            \"Attempt to dereference Gc to already-collected object. \\\n    This means a Gc escaped from a Drop implementation, likely implying a bug in your code.\",\n        );\n        let box_ref = unsafe { box_ptr.as_ref() };\n        box_ref.ref_count.get()\n    }\n\n    /// Determine whether this is a dead `Gc`.\n    ///\n    /// A `Gc` is dead if it does not point to a valid value.\n    /// Such a `Gc` can only be made in one of two ways: first, if a `Gc` is accessed during the\n    /// `Drop` implementation of a structure, and second, if a `Gc` leaks out of [`Gc::new_cyclic`].\n    ///\n    /// # Examples\n    ///\n    /// ```\n    /// use dumpster::{unsync::Gc, Trace};\n    ///\n    /// #[derive(Trace)]\n    /// struct Cycle(Gc<Self>);\n    ///\n    /// impl Drop for Cycle {\n    ///     fn drop(&mut self) {\n    ///         assert!(Gc::is_dead(&self.0));\n    ///     }\n    /// }\n    ///\n    /// let gc1 = Gc::new_cyclic(|this| Cycle(this));\n    /// # drop(gc1);\n    /// # dumpster::unsync::collect();\n    /// ```\n    pub fn is_dead(gc: &Self) -> bool {\n        gc.ptr.get().is_null()\n    }\n\n    /// Consumes the `Gc<T>`, returning the inner `GcBox<T>` pointer.\n    #[inline]\n    #[must_use]\n    fn into_ptr(this: Self) -> *const GcBox<T> {\n        let this = ManuallyDrop::new(this);\n        this.ptr.get().as_ptr()\n    }\n\n    /// Constructs a `Gc<T>` from the innner `GcBox<T>` pointer.\n    #[inline]\n    #[must_use]\n    unsafe fn from_ptr(ptr: *const GcBox<T>) -> Self {\n        Self {\n            ptr: Cell::new(Nullable::from_ptr(ptr.cast_mut())),\n        }\n    }\n\n    /// Exists solely for the [`coerce_gc`] macro.\n    #[inline]\n    #[must_use]\n    #[doc(hidden)]\n    pub fn __private_into_ptr(this: Self) -> *const GcBox<T> {\n        Self::into_ptr(this)\n    }\n\n    /// Exists solely for the [`coerce_gc`] macro.\n    #[inline]\n    #[must_use]\n    #[doc(hidden)]\n    pub unsafe fn __private_from_ptr(ptr: *const GcBox<T>) -> Self {\n        Self::from_ptr(ptr)\n    }\n\n    /// Kill this `Gc`, replacing it with a dead `Gc`.\n    fn kill(&self) {\n        self.ptr.set(self.ptr.get().as_null());\n    }\n}\n\n/// A struct for converting dead `Gc`s into live ones.\n///\n/// This is used in [`Gc::new_cyclic`].\npub(super) struct Rehydrate {\n    /// The pointer to the currently hydrating [`GcBox`].\n    ptr: Nullable<GcBox<()>>,\n    /// The [`TypeId`] of `T` in `Gc<T>` to be hydrated.\n    type_id: TypeId,\n}\n\nimpl Visitor for Rehydrate {\n    fn visit_sync<T>(&mut self, _: &crate::sync::Gc<T>)\n    where\n        T: Trace + Send + Sync + ?Sized,\n    {\n    }\n\n    fn visit_unsync<T>(&mut self, gc: &Gc<T>)\n    where\n        T: Trace + ?Sized,\n    {\n        if Gc::is_dead(gc) && TypeId::of::<T>() == self.type_id {\n            unsafe {\n                // SAFETY: it is safe to transmute these pointers because we have checked\n                // that they are of the same type.\n                // Additionally, the `GcBox` has been fully initialized, so it is safe to\n                // create a reference here.\n                let cell_ptr = (&raw const gc.ptr).cast::<Cell<Nullable<GcBox<()>>>>();\n                (*cell_ptr).set(self.ptr);\n\n                let box_ref = &*self.ptr.as_ptr();\n                box_ref\n                    .ref_count\n                    .set(box_ref.ref_count.get().saturating_add(1));\n                _ = DUMPSTER.try_with(Dumpster::notify_created_gc);\n            }\n        }\n    }\n}\n\nimpl<T: Trace + Clone> Gc<T> {\n    /// Makes a mutable reference to the given `Gc`.\n    ///\n    /// If there are other `Gc` pointers to the same allocation, then `make_mut` will\n    /// [`clone`] the inner value to a new allocation to ensure unique ownership. This is also\n    /// referred to as clone-on-write.\n    ///\n    /// [`clone`]: Clone::clone\n    ///\n    /// # Panics\n    ///\n    /// This function may panic if the `Gc` whose reference count we are loading is \"dead\" (i.e.\n    /// generated through a `Drop` implementation). For further reference, take a look at\n    /// [`Gc::is_dead`].\n    ///\n    /// # Examples\n    ///\n    /// ```\n    /// use dumpster::unsync::Gc;\n    ///\n    /// let mut data = Gc::new(5);\n    ///\n    /// *Gc::make_mut(&mut data) += 1; // Won't clone anything\n    /// let mut other_data = Gc::clone(&data); // Won't clone inner data\n    /// *Gc::make_mut(&mut data) += 1; // Clones inner data\n    /// *Gc::make_mut(&mut data) += 1; // Won't clone anything\n    /// *Gc::make_mut(&mut other_data) *= 2; // Won't clone anything\n    ///\n    /// // Now `data` and `other_data` point to different allocations.\n    /// assert_eq!(*data, 8);\n    /// assert_eq!(*other_data, 12);\n    /// ```\n    #[inline]\n    pub fn make_mut(this: &mut Self) -> &mut T {\n        if Gc::is_dead(this) {\n            panic_deref_of_collected_object();\n        }\n\n        // SAFETY: we checked above that the object is alive (not null)\n        let ptr = unsafe { this.ptr.get().unwrap_unchecked() };\n        let box_ref = unsafe { ptr.as_ref() };\n\n        if box_ref.ref_count.get() == NonZeroUsize::MIN {\n            // The dumpster must not contain this allocation while we hold\n            // a mutable reference to its value because on collection\n            // it would dereference the value to trace it.\n            _ = DUMPSTER.try_with(|d| d.mark_cleaned(ptr));\n        } else {\n            // We don't have unique access to the value so we need to clone it.\n            *this = Gc::new(box_ref.value.clone());\n        }\n\n        // SAFETY: we have exclusive access to this `GcBox` because we ensured\n        // that the ref count is 1 and that there are no loose pointers in the\n        // `to_collect` buffer of this thread's dumpster.\n        unsafe { &mut (*this.ptr.get_mut().as_ptr()).value }\n    }\n}\n\nimpl<T: Trace + ?Sized> Gc<T> {\n    /// Allocates an `GcBox<T>` with sufficient space for\n    /// a value of the provided layout.\n    ///\n    /// The function `mem_to_gc_box` is called with the data pointer\n    /// and must return back a pointer for the `GcBox<T>`.\n    unsafe fn allocate_for_layout(\n        value_layout: Layout,\n        mem_to_gc_box: impl FnOnce(*mut u8) -> *mut GcBox<T>,\n    ) -> *mut GcBox<T> {\n        let layout = Layout::new::<GcBox<()>>()\n            .extend(value_layout)\n            .unwrap()\n            .0\n            .pad_to_align();\n\n        Self::allocate_for_layout_of_box(layout, mem_to_gc_box)\n    }\n\n    /// Allocates an `GcBox<T>` with the given layout.\n    ///\n    /// The function `mem_to_gc_box` is called with the data pointer\n    /// and must return back a pointer for the `GcBox<T>`.\n    unsafe fn allocate_for_layout_of_box(\n        layout: Layout,\n        mem_to_gc_box: impl FnOnce(*mut u8) -> *mut GcBox<T>,\n    ) -> *mut GcBox<T> {\n        // SAFETY: layout has non-zero size because of the `ref_count` field\n        let ptr = unsafe { std::alloc::alloc(layout) };\n\n        if ptr.is_null() {\n            handle_alloc_error(layout);\n        }\n\n        let inner = mem_to_gc_box(ptr);\n\n        unsafe {\n            (&raw mut (*inner).ref_count).write(Cell::new(NonZeroUsize::MIN));\n        }\n\n        inner\n    }\n}\n\nimpl<T: Trace> Gc<[T]> {\n    /// Allocates an `GcBox<[T]>` with the given length.\n    #[inline]\n    fn allocate_for_slice(len: usize) -> *mut GcBox<[T]> {\n        unsafe {\n            Self::allocate_for_layout(Layout::array::<T>(len).unwrap(), |mem| {\n                ptr::slice_from_raw_parts_mut(mem.cast::<T>(), len) as *mut GcBox<[T]>\n            })\n        }\n    }\n}\n\n/// Allows coercing `T` of [`Gc<T>`](Gc).\n///\n/// This means that you can convert a `Gc` containing a strictly-sized type (such as `[T; N]`) into\n/// a `Gc` containing its unsized version (such as `[T]`), all without using nightly-only features.\n///\n/// This is one of two easy ways to create a `Gc<[T]>`; the other method is to use [`FromIterator`].\n///\n/// # Examples\n///\n/// ```\n/// use dumpster::unsync::{coerce_gc, Gc};\n///\n/// let gc1: Gc<[u8; 3]> = Gc::new([7, 8, 9]);\n/// let gc2: Gc<[u8]> = coerce_gc!(gc1);\n/// assert_eq!(&gc2[..], &[7, 8, 9]);\n/// ```\n///\n/// Note that although this macro allows for type conversion, it _cannot_ be used for converting\n/// between incompatible types.\n///\n/// ```compile_fail\n/// // This program is incorrect!\n/// use dumpster::unsync::{Gc, coerce_gc};\n///\n/// let gc1: Gc<u8> = Gc::new(1);\n/// let gc2: Gc<i8> = coerce_gc!(gc1);\n/// ```\n#[doc(hidden)]\n#[macro_export]\nmacro_rules! __unsync_coerce_gc {\n    ($gc:expr) => {{\n        // Temporarily convert the `Gc` into a raw pointer to allow for coercion to occur.\n        let ptr: *const _ = $crate::unsync::Gc::__private_into_ptr($gc);\n        unsafe { $crate::unsync::Gc::__private_from_ptr(ptr) }\n    }};\n}\n\n#[doc(inline)]\npub use crate::__unsync_coerce_gc as coerce_gc;\n\nimpl<T: Trace + ?Sized> Deref for Gc<T> {\n    type Target = T;\n\n    /// Dereference this pointer, creating a reference to the contained value `T`.\n    ///\n    /// # Panics\n    ///\n    /// This function may panic if it is called from within the implementation of `std::ops::Drop`\n    /// of its owning value, since returning such a reference could cause a use-after-free.\n    /// It is not guaranteed to panic.\n    ///\n    /// For a version which returns `None` instead of panicking, consider [`Gc::try_deref`].\n    ///\n    /// # Examples\n    ///\n    /// The following is a correct time to dereference a `Gc`.\n    ///\n    /// ```\n    /// use dumpster::unsync::Gc;\n    ///\n    /// let my_gc = Gc::new(0u8);\n    /// let my_ref: &u8 = &my_gc;\n    /// ```\n    ///\n    /// Dereferencing a `Gc` while dropping is not correct.\n    ///\n    /// ```should_panic\n    /// // This is wrong!\n    /// use dumpster::{unsync::Gc, Trace};\n    ///\n    /// #[derive(Trace)]\n    /// struct Bad {\n    ///     s: String,\n    ///     this: Gc<Bad>,\n    /// }\n    ///\n    /// impl Drop for Bad {\n    ///     fn drop(&mut self) {\n    ///         // will panic when dereferencing `this`\n    ///         println!(\"{}\", self.this.s)\n    ///     }\n    /// }\n    ///\n    /// let foo = Gc::new_cyclic(|this| Bad {\n    ///     s: \"foo\".to_string(),\n    ///     this,\n    /// });\n    /// ```\n    fn deref(&self) -> &Self::Target {\n        unsafe {\n            &self.ptr.get().expect(\"dereferencing Gc to already-collected object. \\\n            This means a Gc escaped from a Drop implementation, likely implying a bug in your code.\").as_ref().value\n        }\n    }\n}\n\nimpl<T: Trace + ?Sized> Clone for Gc<T> {\n    /// Create a duplicate reference to the same data pointed to by `self`.\n    /// This does not duplicate the data.\n    /// If this `Gc` [is dead](`Gc::is_dead`), the cloned value will also be a dead `Gc`.\n    ///\n    /// For a fallible version, refer to [`Gc::try_clone`].\n    ///\n    /// # Examples\n    ///\n    /// ```\n    /// use dumpster::unsync::Gc;\n    /// use std::sync::atomic::{AtomicU8, Ordering};\n    ///\n    /// let gc1 = Gc::new(AtomicU8::new(0));\n    /// let gc2 = gc1.clone();\n    ///\n    /// gc1.store(1, Ordering::Relaxed);\n    /// assert_eq!(gc2.load(Ordering::Relaxed), 1);\n    /// ```\n    ///\n    /// You can also clone dead `Gc`s.\n    ///\n    /// ```\n    /// use dumpster::{unsync::Gc, Trace};\n    ///\n    /// #[derive(Trace)]\n    /// struct Cycle(Gc<Self>);\n    ///\n    /// impl Drop for Cycle {\n    ///     fn drop(&mut self) {\n    ///         let gc = self.0.clone();\n    ///         assert!(Gc::is_dead(&gc));\n    ///     }\n    /// }\n    ///\n    /// let gc1 = Gc::new_cyclic(|this| Cycle(this));\n    /// # drop(gc1);\n    /// # dumpster::unsync::collect();\n    /// ```\n    fn clone(&self) -> Self {\n        let Some(ptr) = self.ptr.get().as_option() else {\n            return Self {\n                ptr: self.ptr.clone(),\n            };\n        };\n        unsafe {\n            let box_ref = ptr.as_ref();\n            box_ref\n                .ref_count\n                .set(box_ref.ref_count.get().saturating_add(1));\n        }\n        _ = DUMPSTER.try_with(|d| {\n            d.notify_created_gc();\n            // d.mark_cleaned(self.ptr);\n        });\n        Self {\n            ptr: self.ptr.clone(),\n        }\n    }\n}\n\nimpl<T: Trace + ?Sized> Drop for Gc<T> {\n    /// Destroy this garbage-collected pointer.\n    ///\n    /// If this is the last reference which can reach the pointed-to data, the allocation that it\n    /// points to will be destroyed.\n    fn drop(&mut self) {\n        let Some(mut ptr) = self.ptr.get().as_option() else {\n            return;\n        };\n\n        let dumpster_is_destroyed = DUMPSTER\n            .try_with(|d| {\n                let box_ref = unsafe { ptr.as_ref() };\n                match box_ref.ref_count.get() {\n                    NonZeroUsize::MIN => {\n                        d.mark_cleaned(ptr);\n                        unsafe {\n                            // this was the last reference, drop unconditionally\n                            drop_in_place(addr_of_mut!(ptr.as_mut().value));\n                            // note: `box_ref` is no longer usable\n                            dealloc(ptr.as_ptr().cast::<u8>(), Layout::for_value(ptr.as_ref()));\n                        }\n                    }\n                    n => {\n                        // decrement the ref count - but another reference to this data still\n                        // lives\n                        box_ref\n                            .ref_count\n                            .set(NonZeroUsize::new(n.get() - 1).unwrap());\n\n                        if contains_gcs(&box_ref.value).unwrap_or(true) {\n                            // remaining references could be a cycle - therefore, mark it as dirty\n                            // so we can check later\n                            d.mark_dirty(ptr);\n                        }\n                    }\n                }\n                // Notify that a GC has been dropped, potentially triggering a cleanup\n                d.notify_dropped_gc();\n            })\n            .is_err();\n\n        if dumpster_is_destroyed {\n            // The `DUMPSTER` thread local has already been destroyed. This will only happen\n            // when if `Gc` is itself stored in a thread local or was created in a thread local\n            // destructor. We still do reference counting but won't be able to collect cycles.\n\n            let box_ref = unsafe { ptr.as_ref() };\n\n            match box_ref.ref_count.get() {\n                NonZeroUsize::MIN => {\n                    unsafe {\n                        // this was the last reference, drop unconditionally\n                        drop_in_place(addr_of_mut!(ptr.as_mut().value));\n                        // note: `box_ref` is no longer usable\n                        dealloc(ptr.as_ptr().cast::<u8>(), Layout::for_value(ptr.as_ref()));\n                    }\n                }\n                n => {\n                    // decrement the ref count - but another reference to this data still\n                    // lives\n                    box_ref\n                        .ref_count\n                        .set(NonZeroUsize::new(n.get() - 1).unwrap());\n                }\n            }\n        }\n    }\n}\n\nimpl<T> PartialEq<Gc<T>> for Gc<T>\nwhere\n    T: Trace + ?Sized + PartialEq,\n{\n    /// Test for equality on two `Gc`s.\n    ///\n    /// Two `Gc`s are equal if their inner values are equal, even if they are stored in different\n    /// allocations.\n    /// Because `PartialEq` does not imply reflexivity, and there is no current path for trait\n    /// specialization, this function does not do a \"fast-path\" check for reference equality.\n    /// Therefore, if two `Gc`s point to the same allocation, the implementation of `eq` will still\n    /// require a direct call to `eq` on the values.\n    ///\n    /// # Panics\n    ///\n    /// This function may panic if it is called from within the implementation of `std::ops::Drop`\n    /// of its owning value, since returning such a reference could cause a use-after-free.\n    /// It is not guaranteed to panic.\n    /// Additionally, if this `Gc` is moved out of an allocation during a `Drop` implementation, it\n    /// could later cause a panic.\n    /// For further details, refer to the main documentation for `Gc`.\n    ///\n    /// # Examples\n    ///\n    /// ```\n    /// use dumpster::unsync::Gc;\n    ///\n    /// let gc = Gc::new(6);\n    /// assert!(gc == Gc::new(6));\n    /// ```\n    fn eq(&self, other: &Gc<T>) -> bool {\n        self.as_ref() == other.as_ref()\n    }\n}\n\nimpl<T> Eq for Gc<T> where T: Trace + ?Sized + PartialEq {}\n\nimpl CollectInfo {\n    #[must_use]\n    /// Get the number of times that a [`Gc`] has been dropped since the last time a collection\n    /// operation was performed.\n    ///\n    /// # Examples\n    ///\n    /// ```\n    /// use dumpster::unsync::{set_collect_condition, CollectInfo};\n    ///\n    /// // Collection condition for whether many Gc's have been dropped.\n    /// fn have_many_gcs_dropped(info: &CollectInfo) -> bool {\n    ///     info.n_gcs_dropped_since_last_collect() > 100\n    /// }\n    ///\n    /// set_collect_condition(have_many_gcs_dropped);\n    /// ```\n    pub fn n_gcs_dropped_since_last_collect(&self) -> usize {\n        DUMPSTER.try_with(|d| d.n_ref_drops.get()).unwrap_or(0)\n    }\n\n    #[must_use]\n    /// Get the total number of [`Gc`]s which currently exist.\n    ///\n    /// # Examples\n    ///\n    /// ```\n    /// use dumpster::unsync::{set_collect_condition, CollectInfo};\n    ///\n    /// // Collection condition for whether many Gc's currently exist.\n    /// fn do_many_gcs_exist(info: &CollectInfo) -> bool {\n    ///     info.n_gcs_existing() > 100\n    /// }\n    ///\n    /// set_collect_condition(do_many_gcs_exist);\n    /// ```\n    pub fn n_gcs_existing(&self) -> usize {\n        DUMPSTER.try_with(|d| d.n_refs_living.get()).unwrap_or(0)\n    }\n}\n\nunsafe impl<V: Visitor, T: Trace + ?Sized> TraceWith<V> for Gc<T> {\n    fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n        visitor.visit_unsync(self);\n        Ok(())\n    }\n}\n\nimpl<T: Trace + ?Sized> AsRef<T> for Gc<T> {\n    fn as_ref(&self) -> &T {\n        self\n    }\n}\n\nimpl<T: Trace + ?Sized> Borrow<T> for Gc<T> {\n    fn borrow(&self) -> &T {\n        self\n    }\n}\n\nimpl<T: Trace + Default> Default for Gc<T> {\n    fn default() -> Self {\n        Gc::new(T::default())\n    }\n}\n\nimpl<T: Trace + ?Sized> std::fmt::Pointer for Gc<T> {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        std::fmt::Pointer::fmt(&addr_of!(**self), f)\n    }\n}\n\n#[cfg(feature = \"coerce-unsized\")]\nimpl<T, U> std::ops::CoerceUnsized<Gc<U>> for Gc<T>\nwhere\n    T: std::marker::Unsize<U> + Trace + ?Sized,\n    U: Trace + ?Sized,\n{\n}\n\nimpl<T: Trace + Display + ?Sized> Display for Gc<T> {\n    /// Formats the value using its `Display` implementation.\n    ///\n    /// # Note\n    ///\n    /// If `T` contains cyclic references through `Gc` pointers and its `Display` implementation\n    /// attempts to traverse them, this may cause infinite recursion. Types with potential cycles\n    /// should implement `Display` to avoid following cyclic references.\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        Display::fmt(&**self, f)\n    }\n}\n\nimpl<T: Trace> From<T> for Gc<T> {\n    /// Converts a generic type `T` into an `Gc<T>`\n    ///\n    /// The conversion allocates on the heap and moves `t`\n    /// from the stack into it.\n    ///\n    /// # Example\n    /// ```rust\n    /// # use dumpster::unsync::Gc;\n    /// let x = 5;\n    /// let rc = Gc::new(5);\n    ///\n    /// assert_eq!(Gc::from(x), rc);\n    /// ```\n    fn from(value: T) -> Self {\n        Gc::new(value)\n    }\n}\n\nimpl<T: Trace, const N: usize> From<[T; N]> for Gc<[T]> {\n    /// Converts a [`[T; N]`](prim@array) into an `Gc<[T]>`.\n    ///\n    /// The conversion moves the array into a newly allocated `Gc`.\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// # use dumpster::unsync::Gc;\n    /// let original: [i32; 3] = [1, 2, 3];\n    /// let shared: Gc<[i32]> = Gc::from(original);\n    /// assert_eq!(&[1, 2, 3], &shared[..]);\n    /// ```\n    #[inline]\n    fn from(v: [T; N]) -> Gc<[T]> {\n        coerce_gc!(Gc::<[T; N]>::from(v))\n    }\n}\n\nimpl<T: Trace + Clone> From<&[T]> for Gc<[T]> {\n    /// Allocates a garbage-collected slice and fills it by cloning `slice`'s items.\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// # use dumpster::unsync::Gc;\n    /// let original: &[i32] = &[1, 2, 3];\n    /// let shared: Gc<[i32]> = Gc::from(original);\n    /// assert_eq!(&[1, 2, 3], &shared[..]);\n    /// ```\n    #[inline]\n    fn from(slice: &[T]) -> Gc<[T]> {\n        // Panic guard while cloning T elements.\n        // In the event of a panic, elements that have been written\n        // into the new GcBox will be dropped, then the memory freed.\n        struct Guard<T> {\n            /// pointer to `GcBox` to deallocate on panic\n            mem: *mut u8,\n            /// layout of the `GcBox` to deallocate on panic\n            layout: Layout,\n            /// pointer to the `GcBox`'s value\n            elems: *mut T,\n            /// the number of elements cloned so far\n            n_elems: usize,\n        }\n\n        impl<T> Drop for Guard<T> {\n            fn drop(&mut self) {\n                unsafe {\n                    let slice = slice::from_raw_parts_mut(self.elems, self.n_elems);\n                    ptr::drop_in_place(slice);\n\n                    dealloc(self.mem, self.layout);\n                }\n            }\n        }\n\n        unsafe {\n            let value_layout = Layout::array::<T>(slice.len()).unwrap();\n\n            let layout = Layout::new::<GcBox<()>>()\n                .extend(value_layout)\n                .unwrap()\n                .0\n                .pad_to_align();\n\n            let ptr = Self::allocate_for_layout_of_box(layout, |mem| {\n                ptr::slice_from_raw_parts_mut(mem.cast::<T>(), slice.len()) as *mut GcBox<[T]>\n            });\n\n            // Pointer to first element\n            let elems = (&raw mut (*ptr).value).cast::<T>();\n\n            let mut guard = Guard {\n                mem: ptr.cast::<u8>(),\n                layout,\n                elems,\n                n_elems: 0,\n            };\n\n            for (i, item) in slice.iter().enumerate() {\n                ptr::write(elems.add(i), item.clone());\n                guard.n_elems += 1;\n            }\n\n            // All clear. Forget the guard so it doesn't free the new GcBox.\n            mem::forget(guard);\n\n            _ = DUMPSTER.try_with(Dumpster::notify_created_gc);\n\n            Self {\n                ptr: Cell::new(Nullable::from_ptr(ptr)),\n            }\n        }\n    }\n}\n\nimpl<T: Trace + Clone> From<&mut [T]> for Gc<[T]> {\n    /// Allocates a garbage-collected slice and fills it by cloning `v`'s items.\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// # use dumpster::unsync::Gc;\n    /// let mut original = [1, 2, 3];\n    /// let original: &mut [i32] = &mut original;\n    /// let shared: Gc<[i32]> = Gc::from(original);\n    /// assert_eq!(&[1, 2, 3], &shared[..]);\n    /// ```\n    #[inline]\n    fn from(value: &mut [T]) -> Self {\n        Gc::from(&*value)\n    }\n}\n\nimpl From<&str> for Gc<str> {\n    /// Allocates a garbage-collected string slice and copies `v` into it.\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// # use dumpster::unsync::Gc;\n    /// let shared: Gc<str> = Gc::from(\"statue\");\n    /// assert_eq!(\"statue\", &shared[..]);\n    /// ```\n    #[inline]\n    fn from(v: &str) -> Self {\n        let bytes = Gc::<[u8]>::from(v.as_bytes());\n        unsafe { Gc::from_ptr(Gc::into_ptr(bytes) as *const GcBox<str>) }\n    }\n}\n\nimpl From<&mut str> for Gc<str> {\n    /// Allocates a garbage-collected string slice and copies `v` into it.\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// # use dumpster::unsync::Gc;\n    /// let mut original = String::from(\"statue\");\n    /// let original: &mut str = &mut original;\n    /// let shared: Gc<str> = Gc::from(original);\n    /// assert_eq!(\"statue\", &shared[..]);\n    /// ```\n    #[inline]\n    fn from(v: &mut str) -> Self {\n        Gc::from(&*v)\n    }\n}\n\nimpl From<Gc<str>> for Gc<[u8]> {\n    /// Converts a garbage-collected string slice into a byte slice.\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// # use dumpster::unsync::Gc;\n    /// let string: Gc<str> = Gc::from(\"eggplant\");\n    /// let bytes: Gc<[u8]> = Gc::from(string);\n    /// assert_eq!(\"eggplant\".as_bytes(), bytes.as_ref());\n    /// ```\n    #[inline]\n    fn from(value: Gc<str>) -> Self {\n        unsafe { Gc::from_ptr(Gc::into_ptr(value) as *const GcBox<[u8]>) }\n    }\n}\n\nimpl From<String> for Gc<str> {\n    /// Allocates a garbage-collected string slice and copies `v` into it.\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// # use dumpster::unsync::Gc;\n    /// let original: String = \"statue\".to_owned();\n    /// let shared: Gc<str> = Gc::from(original);\n    /// assert_eq!(\"statue\", &shared[..]);\n    /// ```\n    #[inline]\n    fn from(value: String) -> Self {\n        Self::from(&value[..])\n    }\n}\n\nimpl<T: Trace> From<Box<T>> for Gc<T> {\n    /// Move a boxed object to a new, garbage collected, allocation.\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// # use dumpster::unsync::Gc;\n    /// let original: Box<i32> = Box::new(1);\n    /// let shared: Gc<i32> = Gc::from(original);\n    /// assert_eq!(1, *shared);\n    /// ```\n    #[inline]\n    fn from(src: Box<T>) -> Self {\n        unsafe {\n            let layout = Layout::for_value(&*src);\n            let gc_ptr = Gc::allocate_for_layout(layout, <*mut u8>::cast::<GcBox<T>>);\n\n            // Copy value as bytes\n            ptr::copy_nonoverlapping(\n                (&raw const *src).cast::<u8>(),\n                (&raw mut (*gc_ptr).value).cast::<u8>(),\n                layout.size(),\n            );\n\n            // Free the allocation without dropping its contents\n            let bptr = Box::into_raw(src);\n            let src = Box::from_raw(bptr.cast::<mem::ManuallyDrop<T>>());\n            drop(src);\n\n            _ = DUMPSTER.try_with(Dumpster::notify_created_gc);\n            Self::from_ptr(gc_ptr)\n        }\n    }\n}\n\nimpl<T: Trace> From<Vec<T>> for Gc<[T]> {\n    /// Allocates a garbage-collected slice and moves `vec`'s items into it.\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// # use dumpster::unsync::Gc;\n    /// let unique: Vec<i32> = vec![1, 2, 3];\n    /// let shared: Gc<[i32]> = Gc::from(unique);\n    /// assert_eq!(&[1, 2, 3], &shared[..]);\n    /// ```\n    #[inline]\n    fn from(vec: Vec<T>) -> Self {\n        let mut vec = ManuallyDrop::new(vec);\n        let vec_cap = vec.capacity();\n        let vec_len = vec.len();\n        let vec_ptr = vec.as_mut_ptr();\n\n        let gc_ptr = Self::allocate_for_slice(vec_len);\n\n        unsafe {\n            let dst_ptr = (&raw mut (*gc_ptr).value).cast::<T>();\n            ptr::copy_nonoverlapping(vec_ptr, dst_ptr, vec_len);\n\n            let _ = Vec::from_raw_parts(vec_ptr, 0, vec_cap);\n\n            _ = DUMPSTER.try_with(Dumpster::notify_created_gc);\n            Self::from_ptr(gc_ptr)\n        }\n    }\n}\n\nimpl<'a, B: Trace> From<Cow<'a, B>> for Gc<B>\nwhere\n    B: ToOwned + ?Sized,\n    Gc<B>: From<&'a B> + From<B::Owned>,\n{\n    /// Creates a garbage-collected pointer from a clone-on-write pointer by\n    /// copying its content.\n    ///\n    /// # Example\n    ///\n    /// ```rust\n    /// # use dumpster::unsync::Gc;\n    /// # use std::borrow::Cow;\n    /// let cow: Cow<'_, str> = Cow::Borrowed(\"eggplant\");\n    /// let shared: Gc<str> = Gc::from(cow);\n    /// assert_eq!(\"eggplant\", &shared[..]);\n    /// ```\n    #[inline]\n    fn from(cow: Cow<'a, B>) -> Gc<B> {\n        match cow {\n            Cow::Borrowed(s) => Gc::from(s),\n            Cow::Owned(s) => Gc::from(s),\n        }\n    }\n}\n\nimpl<T> FromIterator<T> for Gc<[T]>\nwhere\n    T: Trace,\n{\n    fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {\n        // Collect into a `Vec` for O(n) performance.\n        // TODO: this could be slightly optimized by using the `Gc<[]>` layout for perf, but this is\n        // a later problem.\n        Self::from(iter.into_iter().collect::<Vec<_>>())\n    }\n}\n"
  },
  {
    "path": "dumpster/src/unsync/tests.rs",
    "content": "/*\n    dumpster, a cycle-tracking garbage collector for Rust.    Copyright (C) 2023 Clayton Ramsey.\n\n    This Source Code Form is subject to the terms of the Mozilla Public\n    License, v. 2.0. If a copy of the MPL was not distributed with this\n    file, You can obtain one at http://mozilla.org/MPL/2.0/.\n*/\n\n//! Simple tests using manual implementations of [`Trace`].\n\nuse foldhash::{HashMap, HashMapExt};\n\nuse crate::{unsync::coerce_gc, Visitor};\n\nuse super::*;\nuse std::{\n    cell::{OnceCell, RefCell},\n    collections::hash_map::Entry,\n    mem::{take, transmute, MaybeUninit},\n    sync::{\n        atomic::{AtomicBool, AtomicU8, AtomicUsize, Ordering},\n        Mutex,\n    },\n};\n\nstruct DropCount(&'static AtomicUsize);\n\nimpl Drop for DropCount {\n    fn drop(&mut self) {\n        self.0.fetch_add(1, Ordering::Relaxed);\n    }\n}\n\nunsafe impl<V: Visitor> TraceWith<V> for DropCount {\n    fn accept(&self, _: &mut V) -> Result<(), ()> {\n        Ok(())\n    }\n}\n\n#[test]\n/// Test a simple data structure\nfn simple() {\n    static DROPPED: AtomicBool = AtomicBool::new(false);\n    struct Foo;\n\n    impl Drop for Foo {\n        fn drop(&mut self) {\n            DROPPED.store(true, Ordering::Relaxed);\n        }\n    }\n\n    unsafe impl<V: Visitor> TraceWith<V> for Foo {\n        fn accept(&self, _: &mut V) -> Result<(), ()> {\n            Ok(())\n        }\n    }\n\n    let gc1 = Gc::new(Foo);\n    let gc2 = Gc::clone(&gc1);\n\n    assert!(!DROPPED.load(Ordering::Relaxed));\n\n    drop(gc1);\n\n    assert!(!DROPPED.load(Ordering::Relaxed));\n\n    drop(gc2);\n\n    assert!(DROPPED.load(Ordering::Relaxed));\n}\n\n#[derive(Debug)]\nstruct MultiRef {\n    refs: RefCell<Vec<Gc<MultiRef>>>,\n    drop_count: &'static AtomicUsize,\n}\n\nunsafe impl<V: Visitor> TraceWith<V> for MultiRef {\n    fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n        self.refs.accept(visitor)\n    }\n}\n\nimpl Drop for MultiRef {\n    fn drop(&mut self) {\n        self.drop_count.fetch_add(1, Ordering::Relaxed);\n    }\n}\n\n#[test]\nfn self_referential() {\n    static DROPPED: AtomicU8 = AtomicU8::new(0);\n    struct Foo(RefCell<Option<Gc<Foo>>>);\n\n    unsafe impl<V: Visitor> TraceWith<V> for Foo {\n        #[inline]\n        fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n            self.0.accept(visitor)\n        }\n    }\n\n    impl Drop for Foo {\n        fn drop(&mut self) {\n            DROPPED.fetch_add(1, Ordering::Relaxed);\n        }\n    }\n\n    let gc = Gc::new(Foo(RefCell::new(None)));\n    gc.0.replace(Some(Gc::clone(&gc)));\n\n    assert_eq!(DROPPED.load(Ordering::Relaxed), 0);\n    drop(gc);\n    collect();\n    assert_eq!(DROPPED.load(Ordering::Relaxed), 1);\n}\n\n#[test]\nfn cyclic() {\n    static DROPPED: AtomicU8 = AtomicU8::new(0);\n    struct Foo(RefCell<Option<Gc<Foo>>>);\n\n    unsafe impl<V: Visitor> TraceWith<V> for Foo {\n        #[inline]\n        fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n            self.0.accept(visitor)\n        }\n    }\n\n    impl Drop for Foo {\n        fn drop(&mut self) {\n            DROPPED.fetch_add(1, Ordering::Relaxed);\n        }\n    }\n\n    let foo1 = Gc::new(Foo(RefCell::new(None)));\n    let foo2 = Gc::new(Foo(RefCell::new(Some(Gc::clone(&foo1)))));\n    foo1.0.replace(Some(Gc::clone(&foo2)));\n\n    assert_eq!(DROPPED.load(Ordering::Relaxed), 0);\n    drop(foo1);\n    assert_eq!(DROPPED.load(Ordering::Relaxed), 0);\n    drop(foo2);\n    collect();\n    assert_eq!(DROPPED.load(Ordering::Relaxed), 2);\n}\n\n/// Construct a complete graph of garbage-collected\nfn complete_graph(detectors: &'static [AtomicUsize]) -> Vec<Gc<MultiRef>> {\n    let mut gcs = Vec::new();\n    for d in detectors {\n        let gc = Gc::new(MultiRef {\n            refs: RefCell::new(Vec::new()),\n            drop_count: d,\n        });\n        for x in &gcs {\n            gc.refs.borrow_mut().push(Gc::clone(x));\n            x.refs.borrow_mut().push(Gc::clone(&gc));\n        }\n        gcs.push(gc);\n    }\n\n    gcs\n}\n\n#[test]\nfn complete4() {\n    static DETECTORS: [AtomicUsize; 4] = [\n        AtomicUsize::new(0),\n        AtomicUsize::new(0),\n        AtomicUsize::new(0),\n        AtomicUsize::new(0),\n    ];\n\n    let mut gcs = complete_graph(&DETECTORS);\n\n    for _ in 0..3 {\n        gcs.pop();\n    }\n\n    for detector in &DETECTORS {\n        assert_eq!(detector.load(Ordering::Relaxed), 0);\n    }\n\n    drop(gcs);\n    collect();\n\n    for detector in &DETECTORS {\n        assert_eq!(detector.load(Ordering::Relaxed), 1);\n    }\n}\n\n#[test]\nfn parallel_loop() {\n    static COUNT_1: AtomicUsize = AtomicUsize::new(0);\n    static COUNT_2: AtomicUsize = AtomicUsize::new(0);\n    static COUNT_3: AtomicUsize = AtomicUsize::new(0);\n    static COUNT_4: AtomicUsize = AtomicUsize::new(0);\n\n    let gc1 = Gc::new(MultiRef {\n        drop_count: &COUNT_1,\n        refs: RefCell::new(Vec::new()),\n    });\n    let gc2 = Gc::new(MultiRef {\n        drop_count: &COUNT_2,\n        refs: RefCell::new(vec![Gc::clone(&gc1)]),\n    });\n    let gc3 = Gc::new(MultiRef {\n        drop_count: &COUNT_3,\n        refs: RefCell::new(vec![Gc::clone(&gc1)]),\n    });\n    let gc4 = Gc::new(MultiRef {\n        drop_count: &COUNT_4,\n        refs: RefCell::new(vec![Gc::clone(&gc2), Gc::clone(&gc3)]),\n    });\n    gc1.refs.borrow_mut().push(Gc::clone(&gc4));\n\n    assert_eq!(COUNT_1.load(Ordering::Relaxed), 0);\n    assert_eq!(COUNT_2.load(Ordering::Relaxed), 0);\n    assert_eq!(COUNT_3.load(Ordering::Relaxed), 0);\n    assert_eq!(COUNT_4.load(Ordering::Relaxed), 0);\n    drop(gc1);\n    assert_eq!(COUNT_1.load(Ordering::Relaxed), 0);\n    assert_eq!(COUNT_2.load(Ordering::Relaxed), 0);\n    assert_eq!(COUNT_3.load(Ordering::Relaxed), 0);\n    assert_eq!(COUNT_4.load(Ordering::Relaxed), 0);\n    drop(gc2);\n    assert_eq!(COUNT_1.load(Ordering::Relaxed), 0);\n    assert_eq!(COUNT_2.load(Ordering::Relaxed), 0);\n    assert_eq!(COUNT_3.load(Ordering::Relaxed), 0);\n    assert_eq!(COUNT_4.load(Ordering::Relaxed), 0);\n    drop(gc3);\n    assert_eq!(COUNT_1.load(Ordering::Relaxed), 0);\n    assert_eq!(COUNT_2.load(Ordering::Relaxed), 0);\n    assert_eq!(COUNT_3.load(Ordering::Relaxed), 0);\n    assert_eq!(COUNT_4.load(Ordering::Relaxed), 0);\n    drop(gc4);\n    collect();\n    assert_eq!(COUNT_1.load(Ordering::Relaxed), 1);\n    assert_eq!(COUNT_2.load(Ordering::Relaxed), 1);\n    assert_eq!(COUNT_3.load(Ordering::Relaxed), 1);\n    assert_eq!(COUNT_4.load(Ordering::Relaxed), 1);\n}\n\n#[test]\n/// Check that we can drop a Gc which points to some allocation with a borrowed `RefCell` in it.\nfn double_borrow() {\n    static DROP_COUNT: AtomicUsize = AtomicUsize::new(0);\n\n    let gc = Gc::new(MultiRef {\n        refs: RefCell::new(Vec::new()),\n        drop_count: &DROP_COUNT,\n    });\n    gc.refs.borrow_mut().push(gc.clone());\n    let mut my_borrow = gc.refs.borrow_mut();\n    my_borrow.pop();\n    drop(my_borrow);\n\n    assert_eq!(DROP_COUNT.load(Ordering::Relaxed), 0);\n    collect();\n    drop(gc);\n    collect();\n    assert_eq!(DROP_COUNT.load(Ordering::Relaxed), 1);\n}\n\n#[test]\n#[cfg(feature = \"coerce-unsized\")]\nfn coerce_array() {\n    let gc1: Gc<[u8; 3]> = Gc::new([0, 0, 0]);\n    let gc2: Gc<[u8]> = gc1;\n    assert_eq!(gc2.len(), 3);\n    assert_eq!(\n        std::mem::size_of::<Gc<[u8]>>(),\n        2 * std::mem::size_of::<usize>()\n    );\n}\n\n#[test]\nfn coerce_array_using_macro() {\n    let gc1: Gc<[u8; 3]> = Gc::new([0, 0, 0]);\n    let gc2: Gc<[u8]> = coerce_gc!(gc1);\n    assert_eq!(gc2.len(), 3);\n    assert_eq!(\n        std::mem::size_of::<Gc<[u8]>>(),\n        2 * std::mem::size_of::<usize>()\n    );\n}\n\n#[test]\n#[should_panic = \"dereferencing Gc to already-collected object. This means a Gc escaped from a Drop implementation, likely implying a bug in your code.\"]\nfn escape_dead_pointer() {\n    thread_local! {static  ESCAPED: Mutex<Option<Gc<Escape>>> = const { Mutex::new(None) };}\n\n    struct Escape {\n        x: u8,\n        ptr: Mutex<Option<Gc<Escape>>>,\n    }\n\n    impl Drop for Escape {\n        fn drop(&mut self) {\n            ESCAPED.with(|e| {\n                let mut escaped_guard = e.lock().unwrap();\n                if escaped_guard.is_none() {\n                    *escaped_guard = (*self.ptr.lock().unwrap()).take();\n                }\n            });\n        }\n    }\n\n    unsafe impl<V: Visitor> TraceWith<V> for Escape {\n        fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n            self.ptr.accept(visitor)\n        }\n    }\n\n    let esc = Gc::new(Escape {\n        x: 0,\n        ptr: Mutex::new(None),\n    });\n\n    *(*esc).ptr.lock().unwrap() = Some(esc.clone());\n    drop(esc);\n    collect();\n    let _x = ESCAPED.with(|e| e.lock().unwrap().as_ref().unwrap().x);\n}\n\n#[test]\nfn from_box() {\n    let gc: Gc<String> = Gc::from(Box::new(String::from(\"hello\")));\n\n    // The `From<Box<T>>` implementation executes a different code path to\n    // construct the `Gc`.\n    //\n    // Here we ensure that the metadata is initialized to a valid state.\n    assert_eq!(Gc::ref_count(&gc).get(), 1);\n\n    assert_eq!(&*gc, \"hello\");\n}\n\n#[test]\nfn from_slice() {\n    let gc: Gc<[String]> = Gc::from(&[String::from(\"hello\"), String::from(\"world\")][..]);\n\n    // The `From<&[T]>` implementation executes a different code path to\n    // construct the `Gc`.\n    //\n    // Here we ensure that the metadata is initialized to a valid state.\n    assert_eq!(Gc::ref_count(&gc).get(), 1);\n\n    assert_eq!(&*gc, [\"hello\", \"world\"]);\n}\n\n#[test]\n#[should_panic = \"told you\"]\nfn from_slice_panic() {\n    struct MayPanicOnClone {\n        value: String,\n        panic: bool,\n    }\n\n    impl Clone for MayPanicOnClone {\n        fn clone(&self) -> Self {\n            assert!(!self.panic, \"told you\");\n\n            Self {\n                value: self.value.clone(),\n                panic: self.panic,\n            }\n        }\n    }\n\n    unsafe impl<V: Visitor> TraceWith<V> for MayPanicOnClone {\n        fn accept(&self, _: &mut V) -> Result<(), ()> {\n            Ok(())\n        }\n    }\n\n    let slice: &[MayPanicOnClone] = &[\n        MayPanicOnClone {\n            value: String::from(\"a\"),\n            panic: false,\n        },\n        MayPanicOnClone {\n            value: String::from(\"b\"),\n            panic: false,\n        },\n        MayPanicOnClone {\n            value: String::from(\"c\"),\n            panic: true,\n        },\n    ];\n\n    let _: Gc<[MayPanicOnClone]> = Gc::from(slice);\n}\n\n#[test]\nfn from_vec() {\n    let gc: Gc<[String]> = Gc::from(vec![String::from(\"hello\"), String::from(\"world\")]);\n\n    // The `From<Vec<T>>` implementation executes a different code path to\n    // construct the `Gc`.\n    //\n    // Here we ensure that the metadata is initialized to a valid state.\n    assert_eq!(Gc::ref_count(&gc).get(), 1);\n\n    assert_eq!(&*gc, [\"hello\", \"world\"]);\n}\n\n#[test]\nfn make_mut() {\n    let mut a = Gc::new(42);\n    let mut b = a.clone();\n    let mut c = b.clone();\n\n    assert_eq!(*Gc::make_mut(&mut a), 42);\n    assert_eq!(*Gc::make_mut(&mut b), 42);\n    assert_eq!(*Gc::make_mut(&mut c), 42);\n\n    *Gc::make_mut(&mut a) += 1;\n    *Gc::make_mut(&mut b) += 2;\n    *Gc::make_mut(&mut c) += 3;\n\n    assert_eq!(*a, 43);\n    assert_eq!(*b, 44);\n    assert_eq!(*c, 45);\n\n    // they should all be unique\n    assert_eq!(Gc::ref_count(&a).get(), 1);\n    assert_eq!(Gc::ref_count(&b).get(), 1);\n    assert_eq!(Gc::ref_count(&c).get(), 1);\n}\n\n#[test]\nfn make_mut_2() {\n    let mut a = Gc::new(42);\n    let b = a.clone();\n    let c = b.clone();\n\n    assert_eq!(*a, 42);\n    assert_eq!(*b, 42);\n    assert_eq!(*c, 42);\n\n    *Gc::make_mut(&mut a) += 1;\n\n    assert_eq!(*a, 43);\n    assert_eq!(*b, 42);\n    assert_eq!(*c, 42);\n\n    // a should be unique\n    // b and c should share their object\n    assert_eq!(Gc::ref_count(&a).get(), 1);\n    assert_eq!(Gc::ref_count(&b).get(), 2);\n    assert_eq!(Gc::ref_count(&c).get(), 2);\n}\n\n#[test]\nfn make_mut_of_object_in_dumpster() {\n    #[derive(Clone)]\n    struct Foo {\n        // just some gc pointer so foo lands in the dumpster\n        something: Gc<i32>,\n    }\n\n    unsafe impl<V: Visitor> TraceWith<V> for Foo {\n        fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n            self.something.accept(visitor)\n        }\n    }\n\n    let mut foo = Gc::new(Foo {\n        something: Gc::new(5),\n    });\n\n    drop(foo.clone());\n\n    // now foo is in the dumpster\n    // and its ref count is one\n    assert_eq!(Gc::ref_count(&foo).get(), 1);\n\n    // we get a mut reference\n    let foo_mut = Gc::make_mut(&mut foo);\n\n    // now we collect garbage while we're also holding onto a mutable reference to foo\n    // if foo is still in the dumpster then the collection will dereference it and cause UB\n    collect();\n\n    // we need to do something with `foo_mut` here so the mutable borrow is actually held\n    // during collection\n    assert_eq!(*foo_mut.something, 5);\n}\n\n#[test]\n#[should_panic = \"panic on visit\"]\n#[cfg_attr(miri, ignore = \"intentionally leaks memory\")]\nfn panic_visit() {\n    #[expect(unused)]\n    struct PanicVisit(Gc<Self>);\n\n    /// We technically can make it part of the contract for `Trace` to reject panicking impls,\n    /// but it is good form to accept these even though they are malformed.\n    unsafe impl<V: Visitor> TraceWith<V> for PanicVisit {\n        fn accept(&self, _: &mut V) -> Result<(), ()> {\n            panic!(\"panic on visit\");\n        }\n    }\n\n    let gc = Gc::new_cyclic(PanicVisit);\n    let _ = gc.clone();\n    drop(gc);\n    collect();\n}\n\n#[test]\nfn new_cyclic_nothing() {\n    static COUNT: AtomicUsize = AtomicUsize::new(0);\n\n    let gc = Gc::new_cyclic(|_| DropCount(&COUNT));\n    drop(gc);\n    // collect not necessary since this a drop by reference count\n    assert_eq!(COUNT.load(Ordering::Relaxed), 1);\n}\n\n#[test]\nfn new_cyclic_one() {\n    static DROP_COUNT: AtomicUsize = AtomicUsize::new(0);\n    #[expect(unused)]\n    struct Cycle(Gc<Self>, DropCount);\n\n    unsafe impl<V: Visitor> TraceWith<V> for Cycle {\n        fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n            self.0.accept(visitor)\n        }\n    }\n\n    let cyc = Gc::new_cyclic(|gc| Cycle(gc, DropCount(&DROP_COUNT)));\n    assert_eq!(Gc::ref_count(&cyc).get(), 2);\n    drop(cyc);\n    collect();\n\n    assert_eq!(DROP_COUNT.load(Ordering::Relaxed), 1);\n}\n\n#[test]\n#[should_panic = \"ehehe\"]\nfn new_cyclic_panic() {\n    let _: Gc<()> = Gc::new_cyclic(|_| panic!(\"ehehe\"));\n}\n\n#[test]\nfn dead_inside_alive() {\n    struct Cycle(Option<Gc<Self>>);\n    thread_local! {\n        static ESCAPE: Cell<Option<Gc<Cycle>>> = const { Cell::new(None) };\n    }\n\n    unsafe impl<V: Visitor> TraceWith<V> for Cycle {\n        fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n            self.0.accept(visitor)\n        }\n    }\n\n    impl Drop for Cycle {\n        fn drop(&mut self) {\n            ESCAPE.set(take(&mut self.0));\n        }\n    }\n\n    let c1 = Gc::new_cyclic(|gc| Cycle(Some(gc)));\n    drop(c1);\n    collect();\n\n    // `ESCAPE` is now a dead pointer\n\n    let alloc = Gc::new(ESCAPE.take().unwrap());\n    let alloc2 = alloc.clone();\n    drop(alloc);\n    drop(alloc2);\n    collect(); // if correct, this collection should not panic or encounter UB when collecting\n               // `alloc`\n}\n\n#[test]\n/// Test that creating a `Gc` during a `Drop` implementation will still not leak the `Gc`.\nfn leak_by_creation_in_drop() {\n    static DID_BAR_DROP: AtomicBool = AtomicBool::new(false);\n    struct Foo(OnceCell<Gc<Self>>);\n    struct Bar(OnceCell<Gc<Self>>);\n\n    unsafe impl<V: Visitor> TraceWith<V> for Foo {\n        fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n            self.0.accept(visitor)\n        }\n    }\n\n    unsafe impl<V: Visitor> TraceWith<V> for Bar {\n        fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n            self.0.accept(visitor)\n        }\n    }\n\n    impl Drop for Foo {\n        fn drop(&mut self) {\n            let gcbar = Gc::new(Bar(OnceCell::new()));\n            let _ = gcbar.0.set(gcbar.clone());\n            drop(gcbar);\n        }\n    }\n\n    impl Drop for Bar {\n        fn drop(&mut self) {\n            DID_BAR_DROP.store(true, Ordering::Relaxed);\n        }\n    }\n\n    let foo = Gc::new(Foo(OnceCell::new()));\n    let _ = foo.0.set(foo.clone());\n    drop(foo);\n    collect(); // causes Bar to be created and then leaked\n    collect(); // cleans up Bar\n    assert!(DID_BAR_DROP.load(Ordering::Relaxed));\n}\n\n#[test]\n#[cfg_attr(miri, ignore = \"miri is too slow\")]\n#[expect(clippy::too_many_lines)]\nfn unsync_fuzz() {\n    const N: usize = 100_000;\n    static DROP_DETECTORS: [AtomicUsize; N] = {\n        let mut detectors: [MaybeUninit<AtomicUsize>; N] =\n            unsafe { transmute(MaybeUninit::<[AtomicUsize; N]>::uninit()) };\n\n        let mut i = 0;\n        while i < N {\n            detectors[i] = MaybeUninit::new(AtomicUsize::new(0));\n            i += 1;\n        }\n\n        unsafe { transmute(detectors) }\n    };\n\n    #[derive(Debug)]\n    struct Alloc {\n        refs: Mutex<Vec<Gc<Alloc>>>,\n        id: usize,\n    }\n\n    impl Drop for Alloc {\n        fn drop(&mut self) {\n            let n_drop = DROP_DETECTORS[self.id].fetch_add(1, Ordering::Relaxed);\n            assert_eq!(n_drop, 0, \"must not double drop an allocation\");\n        }\n    }\n\n    unsafe impl<V: Visitor> TraceWith<V> for Alloc {\n        fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n            self.refs.accept(visitor)\n        }\n    }\n\n    fn dfs(alloc: &Gc<Alloc>, graph: &mut HashMap<usize, Vec<usize>>) {\n        if let Entry::Vacant(v) = graph.entry(alloc.id) {\n            v.insert(Vec::new());\n            alloc.refs.lock().unwrap().iter().for_each(|a| {\n                graph.get_mut(&alloc.id).unwrap().push(a.id);\n                dfs(a, graph);\n            });\n        }\n    }\n\n    fastrand::seed(12345);\n    let mut gcs = (0..50)\n        .map(|i| {\n            Gc::new(Alloc {\n                refs: Mutex::new(Vec::new()),\n                id: i,\n            })\n        })\n        .collect::<Vec<_>>();\n\n    let mut next_detector = 50;\n    for _ in 0..N {\n        if gcs.is_empty() {\n            gcs.push(Gc::new(Alloc {\n                refs: Mutex::new(Vec::new()),\n                id: next_detector,\n            }));\n            next_detector += 1;\n        }\n        match fastrand::u8(0..4) {\n            0 => {\n                // println!(\"add gc {next_detector}\");\n                gcs.push(Gc::new(Alloc {\n                    refs: Mutex::new(Vec::new()),\n                    id: next_detector,\n                }));\n                next_detector += 1;\n            }\n            1 => {\n                if gcs.len() > 1 {\n                    let from = fastrand::usize(0..gcs.len());\n                    let to = fastrand::usize(0..gcs.len());\n                    // println!(\"add ref {} -> {}\", gcs[from].id, gcs[to].id);\n                    let new_gc = gcs[to].clone();\n                    let mut guard = gcs[from].refs.lock().unwrap();\n                    guard.push(new_gc);\n                }\n            }\n            2 => {\n                let idx = fastrand::usize(0..gcs.len());\n                // println!(\"remove gc {}\", gcs[idx].id);\n                gcs.swap_remove(idx);\n            }\n            3 => {\n                let from = fastrand::usize(0..gcs.len());\n                let mut guard = gcs[from].refs.lock().unwrap();\n                if !guard.is_empty() {\n                    let to = fastrand::usize(0..guard.len());\n                    // println!(\"drop ref {} -> {}\", gcs[from].id, guard[to].id);\n                    guard.swap_remove(to);\n                }\n            }\n            _ => unreachable!(),\n        }\n    }\n\n    let mut graph = HashMap::new();\n    graph.insert(9999, Vec::new());\n    for alloc in &gcs {\n        graph.get_mut(&9999).unwrap().push(alloc.id);\n        dfs(alloc, &mut graph);\n    }\n    // println!(\"{graph:#?}\");\n\n    drop(gcs);\n    collect();\n\n    let mut n_missing = 0;\n    for count in &DROP_DETECTORS[..next_detector] {\n        let num = count.load(Ordering::Relaxed);\n        if num != 1 {\n            // println!(\"expected 1 for id {id} but got {num}\");\n            n_missing += 1;\n        }\n    }\n    assert_eq!(n_missing, 0);\n}\n\n#[test]\nfn custom_trait_object() {\n    trait MyTrait: Trace + Send + Sync {}\n    impl<T: Trace + Send + Sync> MyTrait for T {}\n\n    let gc = Gc::new(5i32);\n    let gc: Gc<dyn MyTrait> = coerce_gc!(gc);\n    _ = gc;\n}\n\n#[test]\nfn gc_from_iter() {\n    let _gc = (0..100).collect::<Gc<[_]>>();\n}\n\n#[test]\nfn self_referential_from_iter() {\n    struct Ab {\n        a: Gc<Self>,\n        b: Gc<Self>,\n    }\n\n    unsafe impl<V: Visitor> TraceWith<V> for Ab {\n        fn accept(&self, visitor: &mut V) -> Result<(), ()> {\n            self.a.accept(visitor)?;\n            self.b.accept(visitor)?;\n\n            Ok(())\n        }\n    }\n\n    let mut gcs = Vec::<Gc<Ab>>::new();\n    gcs.push(Gc::new_cyclic(|a: Gc<Ab>| Ab { a: a.clone(), b: a }));\n    for _ in 0..10 {\n        let b = gcs.last().unwrap().clone();\n        gcs.push(Gc::new_cyclic(|a: Gc<Ab>| Ab { a, b }));\n    }\n    let _big_gc = gcs.into_iter().collect::<Gc<[_]>>();\n}\n"
  },
  {
    "path": "dumpster_bench/.gitignore",
    "content": "/target\n/Cargo.lock\n"
  },
  {
    "path": "dumpster_bench/Cargo.toml",
    "content": "[package]\nname = \"dumpster_bench\"\nversion = \"0.1.0\"\nedition = \"2021\"\nlicense = \"MPL-2.0\"\nauthors = [\"Clayton Ramsey\"]\ndescription = \"Benchmark for dumpster garbage collection crate\"\nrepository = \"https://github.com/claytonwramsey/dumpster\"\nreadme = \"../README.md\"\nkeywords = [\"dumpster\", \"garbage_collector\", \"benchmark\"]\ncategories = [\"data-structures\", \"memory-management\"]\npublish = false\n\n[dependencies]\ndumpster = { path = \"../dumpster\", features = [\"derive\"] }\ngc = \"0.5.0\"\nbacon_rajan_cc = \"0.4.0\"\nfastrand = \"2.0.0\"\nshredder = \"0.2.0\"\nshredder_derive = \"0.2.0\"\nparking_lot = \"0.12.3\"\nrust-cc = \"0.6.2\"\ntracing-rc = { version = \"0.2.0\", default-features = false, features = [\n    \"sync\",\n] }\n"
  },
  {
    "path": "dumpster_bench/scripts/make_plots.py",
    "content": "# dumpster, a cycle-tracking garbage collector for Rust.\n# Copyright (C) 2023 Clayton Ramsey.\n\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport matplotlib.pyplot as plt\nimport sys\n\ncsv_file = open(sys.argv[1])\n\nmulti_times = {}\nsingle_times = {}\n\nfor line in csv_file.read().split('\\n'):\n    if len(line) == 0:\n        continue\n    name, test_type, n_threads, n_ops, time = line.split(',')\n    times = single_times if test_type == 'single_threaded' else multi_times\n    if name not in times.keys():\n        times[name] = ([], [])\n    times[name][0].append(int(n_threads))\n    times[name][1].append(float(time) / 1000.0)\n\nfor (name, v) in multi_times.items():\n    (xs, ys) = v\n    plt.scatter(xs, ys, label=name)\nplt.xlabel('Number of threads')\nplt.ylabel('Time taken for 1M ops (ms)')\nplt.title('Parallel garbage collector scaling')\nplt.legend()\nplt.show()\n\nmulti_times.pop('shredder', None)\nfor (i, (name, v)) in enumerate(multi_times.items()):\n    (xs, ys) = v\n    plt.scatter(xs, ys, label=name, color=f\"tab:{['blue', 'orange', 'green', 'purple'][i]}\")\nplt.xlabel('Number of threads')\nplt.ylabel('Time taken for 1M ops (ms)')\nplt.title('Parallel garbage collector scaling (sans shredder)')\nplt.legend()\nplt.show()\n\ndef violin(times: dict, name: str):\n    data = []\n    labels = []\n    for (label, (_, ys)) in times.items():\n        data.append(ys)\n        labels.append(label)\n\n    fig = plt.figure()\n    plt.violinplot(data, range(len(data)), vert=False)\n    plt.yticks(range(len(data)), labels=labels)\n    plt.ylabel('Garbage collector')\n    plt.xlabel('Runtime for 1M ops (ms)')\n    plt.tight_layout(rect=(10, 1.08, 1.08, 1.08))\n    plt.title(name)\n    plt.show()\n\nviolin(single_times, 'Single-threaded GC comparison')\nsingle_times.pop('shredder', None)\nviolin(single_times, 'Single-threaded GC comparison (sans shredder)')\n"
  },
  {
    "path": "dumpster_bench/src/lib.rs",
    "content": "/*\n    dumpster, a cycle-tracking garbage collector for Rust.\n    Copyright (C) 2023 Clayton Ramsey.\n\n    This Source Code Form is subject to the terms of the Mozilla Public\n    License, v. 2.0. If a copy of the MPL was not distributed with this\n    file, You can obtain one at http://mozilla.org/MPL/2.0/.\n*/\n\n#![expect(non_local_definitions)]\n\nuse std::{\n    rc::Rc,\n    sync::{Arc, Mutex},\n};\n\n/// A garbage-collected structure which points to an arbitrary number of other garbage-collected\n/// structures.\n///\n/// Cloning a `Multiref` yields a duplicated pointer, not a deep copy.\npub trait Multiref: Clone {\n    /// Create a new multiref which points to some data.\n    fn new(points_to: Vec<Self>) -> Self;\n    /// Apply some function to the backing set of references owned by this structure.\n    fn apply(&self, f: impl FnOnce(&mut Vec<Self>));\n    /// Collect all the floating GCs out there.\n    fn collect();\n}\n\n/// A trait for thread-safe synchronized multirefs.\npub trait SyncMultiref: Send + Sync + Multiref {}\n\nimpl<T> SyncMultiref for T where T: Send + Sync + Multiref {}\n\n/// A simple multi-reference which uses `Rc`, which is technically not a garbage collector, as a\n/// baseline.\npub struct RcMultiref {\n    refs: Mutex<Vec<Rc<Self>>>,\n}\n\n/// A simple multi-reference which uses `Arc`, which is technically not a garbage collector, as a\n/// baseline.\npub struct ArcMultiref {\n    refs: Mutex<Vec<Arc<Self>>>,\n}\n\n#[derive(dumpster::Trace, Debug)]\npub struct DumpsterSyncMultiref {\n    refs: Mutex<Vec<dumpster::sync::Gc<Self>>>,\n}\n\n#[derive(dumpster::Trace)]\npub struct DumpsterUnsyncMultiref {\n    refs: Mutex<Vec<dumpster::unsync::Gc<Self>>>,\n}\n\npub struct GcMultiref {\n    refs: gc::GcCell<Vec<gc::Gc<GcMultiref>>>,\n}\n\npub struct BaconRajanMultiref {\n    refs: Mutex<Vec<bacon_rajan_cc::Cc<Self>>>,\n}\n\n#[derive(shredder_derive::Scan)]\npub struct ShredderMultiref {\n    refs: Mutex<Vec<shredder::Gc<Self>>>,\n}\n\n#[derive(shredder_derive::Scan)]\npub struct ShredderSyncMultiref {\n    refs: Mutex<Vec<shredder::Gc<Self>>>,\n}\n\nimpl bacon_rajan_cc::Trace for BaconRajanMultiref {\n    fn trace(&self, tracer: &mut bacon_rajan_cc::Tracer) {\n        self.refs.lock().unwrap().trace(tracer);\n    }\n}\n\nimpl gc::Finalize for GcMultiref {}\n\nunsafe impl gc::Trace for GcMultiref {\n    #[inline]\n    unsafe fn trace(&self) {\n        self.refs.trace();\n    }\n\n    #[inline]\n    unsafe fn root(&self) {\n        self.refs.root();\n    }\n\n    #[inline]\n    unsafe fn unroot(&self) {\n        self.refs.unroot();\n    }\n\n    #[inline]\n    fn finalize_glue(&self) {\n        self.refs.finalize_glue()\n    }\n}\n\n#[derive(rust_cc::Finalize)]\npub struct RustCcMultiRef {\n    refs: Mutex<Vec<rust_cc::Cc<RustCcMultiRef>>>,\n}\n\nunsafe impl rust_cc::Trace for RustCcMultiRef {\n    fn trace(&self, ctx: &mut rust_cc::Context<'_>) {\n        self.refs.lock().unwrap().trace(ctx)\n    }\n}\n\npub struct TracingRcUnsyncMultiRef {\n    refs: Vec<tracing_rc::rc::Gc<TracingRcUnsyncMultiRef>>,\n}\n\nimpl tracing_rc::rc::Trace for TracingRcUnsyncMultiRef {\n    fn visit_children(&self, visitor: &mut tracing_rc::rc::GcVisitor) {\n        self.refs.visit_children(visitor)\n    }\n}\n\npub struct TracingRcSyncMultiRef {\n    refs: Mutex<Vec<tracing_rc::sync::Agc<TracingRcSyncMultiRef>>>,\n}\n\nimpl tracing_rc::sync::Trace for TracingRcSyncMultiRef {\n    fn visit_children(&self, visitor: &mut tracing_rc::sync::GcVisitor) {\n        self.refs.lock().unwrap().visit_children(visitor)\n    }\n}\n\nimpl Multiref for dumpster::sync::Gc<DumpsterSyncMultiref> {\n    fn new(points_to: Vec<Self>) -> Self {\n        dumpster::sync::Gc::new(DumpsterSyncMultiref {\n            refs: Mutex::new(points_to),\n        })\n    }\n\n    fn apply(&self, f: impl FnOnce(&mut Vec<Self>)) {\n        f(self.refs.lock().unwrap().as_mut());\n    }\n\n    fn collect() {\n        dumpster::sync::collect()\n    }\n}\n\nimpl Multiref for dumpster::unsync::Gc<DumpsterUnsyncMultiref> {\n    fn new(points_to: Vec<Self>) -> Self {\n        dumpster::unsync::Gc::new(DumpsterUnsyncMultiref {\n            refs: Mutex::new(points_to),\n        })\n    }\n\n    fn apply(&self, f: impl FnOnce(&mut Vec<Self>)) {\n        f(self.refs.lock().unwrap().as_mut());\n    }\n\n    fn collect() {\n        dumpster::unsync::collect()\n    }\n}\n\nimpl Multiref for gc::Gc<GcMultiref> {\n    fn new(points_to: Vec<Self>) -> Self {\n        gc::Gc::new(GcMultiref {\n            refs: gc::GcCell::new(points_to),\n        })\n    }\n\n    fn apply(&self, f: impl FnOnce(&mut Vec<Self>)) {\n        f(self.refs.borrow_mut().as_mut())\n    }\n\n    fn collect() {\n        gc::force_collect();\n    }\n}\n\nimpl Multiref for bacon_rajan_cc::Cc<BaconRajanMultiref> {\n    fn new(points_to: Vec<Self>) -> Self {\n        bacon_rajan_cc::Cc::new(BaconRajanMultiref {\n            refs: Mutex::new(points_to),\n        })\n    }\n\n    fn apply(&self, f: impl FnOnce(&mut Vec<Self>)) {\n        f(self.refs.lock().unwrap().as_mut());\n    }\n\n    fn collect() {\n        bacon_rajan_cc::collect_cycles();\n        assert_eq!(bacon_rajan_cc::number_of_roots_buffered(), 0);\n    }\n}\n\nimpl Multiref for shredder::Gc<ShredderMultiref> {\n    fn new(points_to: Vec<Self>) -> Self {\n        shredder::Gc::new(ShredderMultiref {\n            refs: Mutex::new(points_to),\n        })\n    }\n\n    fn apply(&self, f: impl FnOnce(&mut Vec<Self>)) {\n        f(self.get().refs.lock().unwrap().as_mut());\n    }\n\n    fn collect() {\n        shredder::synchronize_destructors();\n    }\n}\n\nimpl Multiref for shredder::Gc<ShredderSyncMultiref> {\n    fn new(points_to: Vec<Self>) -> Self {\n        shredder::Gc::new(ShredderSyncMultiref {\n            refs: Mutex::new(points_to),\n        })\n    }\n\n    fn apply(&self, f: impl FnOnce(&mut Vec<Self>)) {\n        f(self.get().refs.lock().unwrap().as_mut());\n    }\n\n    fn collect() {\n        shredder::synchronize_destructors();\n    }\n}\n\nimpl Multiref for rust_cc::Cc<RustCcMultiRef> {\n    fn new(points_to: Vec<Self>) -> Self {\n        rust_cc::Cc::new(RustCcMultiRef {\n            refs: Mutex::new(points_to),\n        })\n    }\n\n    fn apply(&self, f: impl FnOnce(&mut Vec<Self>)) {\n        f(self.refs.lock().unwrap().as_mut());\n    }\n\n    fn collect() {\n        rust_cc::collect_cycles();\n    }\n}\n\nimpl Multiref for tracing_rc::rc::Gc<TracingRcUnsyncMultiRef> {\n    fn new(points_to: Vec<Self>) -> Self {\n        tracing_rc::rc::Gc::new(TracingRcUnsyncMultiRef { refs: points_to })\n    }\n\n    fn apply(&self, f: impl FnOnce(&mut Vec<Self>)) {\n        f(self.borrow_mut().refs.as_mut());\n    }\n\n    fn collect() {\n        tracing_rc::rc::collect_full();\n    }\n}\n\nimpl Multiref for tracing_rc::sync::Agc<TracingRcSyncMultiRef> {\n    fn new(points_to: Vec<Self>) -> Self {\n        tracing_rc::sync::Agc::new(TracingRcSyncMultiRef {\n            refs: Mutex::new(points_to),\n        })\n    }\n\n    fn apply(&self, f: impl FnOnce(&mut Vec<Self>)) {\n        f(self.read().refs.lock().unwrap().as_mut());\n    }\n\n    fn collect() {\n        tracing_rc::sync::collect_full();\n    }\n}\n\nimpl Multiref for Rc<RcMultiref> {\n    fn new(points_to: Vec<Self>) -> Self {\n        Rc::new(RcMultiref {\n            refs: Mutex::new(points_to),\n        })\n    }\n\n    fn apply(&self, f: impl FnOnce(&mut Vec<Self>)) {\n        f(self.refs.lock().unwrap().as_mut());\n    }\n\n    fn collect() {}\n}\n\nimpl Multiref for Arc<ArcMultiref> {\n    fn new(points_to: Vec<Self>) -> Self {\n        Arc::new(ArcMultiref {\n            refs: Mutex::new(points_to),\n        })\n    }\n\n    fn apply(&self, f: impl FnOnce(&mut Vec<Self>)) {\n        f(self.refs.lock().unwrap().as_mut());\n    }\n\n    fn collect() {}\n}\n"
  },
  {
    "path": "dumpster_bench/src/main.rs",
    "content": "/*\n    dumpster, a cycle-tracking garbage collector for Rust.\n    Copyright (C) 2023 Clayton Ramsey.\n\n    This Source Code Form is subject to the terms of the Mozilla Public\n    License, v. 2.0. If a copy of the MPL was not distributed with this\n    file, You can obtain one at http://mozilla.org/MPL/2.0/.\n*/\n\n//! Benchmarks for the `dumpster` garbage collection library.\n\nuse std::{\n    fmt::Display,\n    rc::Rc,\n    sync::Arc,\n    thread::{self, available_parallelism, scope},\n    time::{Duration, Instant},\n};\n\nuse dumpster_bench::{\n    ArcMultiref, BaconRajanMultiref, DumpsterSyncMultiref, DumpsterUnsyncMultiref, GcMultiref,\n    Multiref, RcMultiref, RustCcMultiRef, ShredderMultiref, ShredderSyncMultiref, SyncMultiref,\n    TracingRcSyncMultiRef, TracingRcUnsyncMultiRef,\n};\n\nuse parking_lot::Mutex;\n\nstruct BenchmarkData {\n    name: &'static str,\n    test: &'static str,\n    n_threads: usize,\n    n_ops: usize,\n    duration: Duration,\n}\n\nimpl Display for BenchmarkData {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        write!(\n            f,\n            \"{},{},{},{},{}\",\n            self.name,\n            self.test,\n            self.n_threads,\n            self.n_ops,\n            self.duration.as_micros()\n        )\n    }\n}\n\nfn unsync_never_collect(_: &dumpster::unsync::CollectInfo) -> bool {\n    false\n}\n\nfn sync_never_collect(_: &dumpster::sync::CollectInfo) -> bool {\n    false\n}\n\nfn main() {\n    const N_ITERS: usize = 1_000_000;\n    for _ in 0..100 {\n        dumpster::unsync::set_collect_condition(dumpster::unsync::default_collect_condition);\n        println!(\n            \"{}\",\n            single_threaded::<dumpster::unsync::Gc<DumpsterUnsyncMultiref>>(\n                \"dumpster (unsync)\",\n                N_ITERS,\n            )\n        );\n        dumpster::unsync::set_collect_condition(unsync_never_collect);\n        println!(\n            \"{}\",\n            single_threaded::<dumpster::unsync::Gc<DumpsterUnsyncMultiref>>(\n                \"dumpster (unsync/manual)\",\n                N_ITERS,\n            )\n        );\n        dumpster::sync::set_collect_condition(dumpster::sync::default_collect_condition);\n        println!(\n            \"{}\",\n            single_threaded::<dumpster::sync::Gc<DumpsterSyncMultiref>>(\"dumpster (sync)\", N_ITERS)\n        );\n        dumpster::sync::set_collect_condition(sync_never_collect);\n        println!(\n            \"{}\",\n            single_threaded::<dumpster::sync::Gc<DumpsterSyncMultiref>>(\n                \"dumpster (sync/manual)\",\n                N_ITERS\n            )\n        );\n        println!(\"{}\", single_threaded::<gc::Gc<GcMultiref>>(\"gc\", N_ITERS));\n        println!(\n            \"{}\",\n            single_threaded::<bacon_rajan_cc::Cc<BaconRajanMultiref>>(\"bacon-rajan-cc\", N_ITERS)\n        );\n\n        rust_cc::config::config(|config| {\n            config.set_auto_collect(true);\n        })\n        .unwrap();\n        println!(\n            \"{}\",\n            single_threaded::<rust_cc::Cc<RustCcMultiRef>>(\"rust-cc\", N_ITERS)\n        );\n        rust_cc::config::config(|config| {\n            config.set_auto_collect(false);\n        })\n        .unwrap();\n        println!(\n            \"{}\",\n            single_threaded::<rust_cc::Cc<RustCcMultiRef>>(\"rust-cc (manual)\", N_ITERS)\n        );\n        println!(\n            \"{}\",\n            single_threaded::<tracing_rc::rc::Gc<TracingRcUnsyncMultiRef>>(\n                \"tracing-rc (unsync)\",\n                N_ITERS\n            )\n        );\n        println!(\n            \"{}\",\n            single_threaded::<tracing_rc::sync::Agc<TracingRcSyncMultiRef>>(\n                \"tracing-rc (sync)\",\n                N_ITERS\n            )\n        );\n        for n_threads in 1..=available_parallelism().unwrap().get() {\n            // println!(\"--- {n_threads} threads\");\n            dumpster::sync::set_collect_condition(dumpster::sync::default_collect_condition);\n            println!(\n                \"{}\",\n                multi_threaded::<dumpster::sync::Gc<DumpsterSyncMultiref>>(\n                    \"dumpster (sync)\",\n                    N_ITERS,\n                    n_threads,\n                )\n            );\n\n            dumpster::sync::set_collect_condition(sync_never_collect);\n            println!(\n                \"{}\",\n                multi_threaded::<dumpster::sync::Gc<DumpsterSyncMultiref>>(\n                    \"dumpster (sync/manual)\",\n                    N_ITERS,\n                    n_threads,\n                )\n            );\n            println!(\n                \"{}\",\n                multi_threaded::<tracing_rc::sync::Agc<TracingRcSyncMultiRef>>(\n                    \"tracing-rc (sync)\",\n                    N_ITERS,\n                    n_threads\n                )\n            );\n        }\n    }\n\n    for _ in 0..20 {\n        // run fewer tests of shredder because it takes forever\n\n        println!(\n            \"{}\",\n            single_threaded::<shredder::Gc<ShredderMultiref>>(\"shredder\", N_ITERS)\n        );\n\n        for n_threads in 1..=available_parallelism().unwrap().get() {\n            println!(\n                \"{}\",\n                multi_threaded::<shredder::Gc<ShredderSyncMultiref>>(\n                    \"shredder\", N_ITERS, n_threads\n                )\n            );\n        }\n    }\n\n    for _ in 0..100 {\n        println!(\"{}\", single_threaded::<Rc<RcMultiref>>(\"Rc\", N_ITERS));\n        println!(\"{}\", single_threaded::<Arc<ArcMultiref>>(\"Arc\", N_ITERS));\n        for n_threads in 1..=available_parallelism().unwrap().get() {\n            println!(\n                \"{}\",\n                multi_threaded::<Arc<ArcMultiref>>(\"Arc\", N_ITERS, n_threads)\n            );\n        }\n    }\n}\n\n/// Run a benchmark of a multi-threaded garbage collector.\nfn single_threaded<M: Multiref>(name: &'static str, n_iters: usize) -> BenchmarkData {\n    fastrand::seed(12345);\n    let mut gcs = (0..50).map(|_| M::new(Vec::new())).collect::<Vec<_>>();\n\n    // println!(\"{name}: running...\");\n    let tic = Instant::now();\n    for _n in 0..n_iters {\n        // println!(\"iter {_n}\");\n        if gcs.is_empty() {\n            gcs.push(M::new(Vec::new()));\n        } else {\n            match fastrand::u8(0..4) {\n                0 => {\n                    // println!(\"create allocation\");\n                    // create new allocation\n                    gcs.push(M::new(Vec::new()));\n                }\n                1 => {\n                    // println!(\"add reference\");\n                    // add a reference\n                    if gcs.len() > 1 {\n                        let from = fastrand::usize(0..gcs.len());\n                        let to = fastrand::usize(0..gcs.len());\n                        let new_gc = gcs[to].clone();\n                        gcs[from].apply(|v| v.push(new_gc));\n                    }\n                }\n                2 => {\n                    // println!(\"remove gc\");\n                    // destroy a reference owned by the vector\n                    gcs.swap_remove(fastrand::usize(0..gcs.len()));\n                }\n                3 => {\n                    // println!(\"remove reference\");\n                    // destroy a reference owned by some gc\n                    let from = fastrand::usize(0..gcs.len());\n                    gcs[from].apply(|v| {\n                        if !v.is_empty() {\n                            let to = fastrand::usize(0..v.len());\n                            v.swap_remove(to);\n                        }\n                    })\n                }\n                _ => unreachable!(),\n            }\n        }\n    }\n    drop(gcs);\n    M::collect();\n    let toc = Instant::now();\n    // println!(\"finished {name} in {:?}\", (toc - tic));\n    BenchmarkData {\n        name,\n        test: \"single_threaded\",\n        n_threads: 1,\n        n_ops: n_iters,\n        duration: toc.duration_since(tic),\n    }\n}\n\nfn multi_threaded<M: SyncMultiref>(\n    name: &'static str,\n    n_iters: usize,\n    n_threads: usize,\n) -> BenchmarkData {\n    let vecs: Vec<Mutex<Vec<M>>> = (0..(n_threads * 10))\n        .map(|_| Mutex::new((0..50).map(|_| M::new(Vec::new())).collect()))\n        .collect();\n\n    let tic = Mutex::new(Instant::now());\n    let toc = Mutex::new(Instant::now());\n    scope(|s| {\n        for i in 0..n_threads {\n            let vecs = &vecs;\n            let tic = &tic;\n            let toc = &toc;\n            thread::Builder::new()\n                .name(format!(\"multi_threaded{i}\"))\n                .spawn_scoped(s, move || {\n                    *tic.lock() = Instant::now();\n                    fastrand::seed(12345 + i as u64);\n\n                    for _n in 0..(n_iters / n_threads) {\n                        let v1_id = fastrand::usize(0..vecs.len());\n                        match fastrand::u8(0..4) {\n                            // create\n                            0 => vecs[v1_id].lock().push(M::new(Vec::new())),\n                            // add ref\n                            1 => {\n                                let v2_id = fastrand::usize(0..vecs.len());\n                                if v1_id == v2_id {\n                                    let g1 = vecs[v1_id].lock();\n                                    if g1.len() < 2 {\n                                        continue;\n                                    }\n                                    let i1 = fastrand::usize(0..g1.len());\n                                    let i2 = fastrand::usize(0..g1.len());\n                                    let new_gc = g1[i2].clone();\n                                    g1[i1].apply(|v| v.push(new_gc));\n                                } else {\n                                    // prevent deadlock by locking lower one first\n                                    let (g1, g2) = if v1_id < v2_id {\n                                        (vecs[v1_id].lock(), vecs[v2_id].lock())\n                                    } else {\n                                        let g2 = vecs[v2_id].lock();\n                                        (vecs[v1_id].lock(), g2)\n                                    };\n                                    if g1.is_empty() || g2.is_empty() {\n                                        continue;\n                                    }\n                                    let i1 = fastrand::usize(0..g1.len());\n                                    let i2 = fastrand::usize(0..g2.len());\n                                    let new_gc = g2[i2].clone();\n                                    g1[i1].apply(|v| v.push(new_gc));\n                                }\n                            }\n                            // destroy gc\n                            2 => {\n                                let mut guard = vecs[v1_id].lock();\n                                if guard.is_empty() {\n                                    continue;\n                                }\n                                let idx = fastrand::usize(0..guard.len());\n                                guard.swap_remove(idx);\n                            }\n                            // destroy ref\n                            3 => {\n                                let guard = vecs[v1_id].lock();\n                                if guard.is_empty() {\n                                    continue;\n                                }\n                                guard[fastrand::usize(0..guard.len())].apply(|v| {\n                                    if !v.is_empty() {\n                                        v.swap_remove(fastrand::usize(0..v.len()));\n                                    }\n                                });\n                            }\n                            _ => unreachable!(),\n                        };\n                    }\n                    *toc.lock() = Instant::now();\n                })\n                .unwrap();\n        }\n    });\n    M::collect(); // This op is single threaded and shouldn't count\n    let duration = toc.lock().duration_since(*tic.lock());\n\n    // println!(\"finished {name} in {duration:?}\");\n    BenchmarkData {\n        name,\n        test: \"multi_threaded\",\n        n_threads,\n        n_ops: (n_iters / n_threads) * n_threads,\n        duration,\n    }\n}\n"
  },
  {
    "path": "dumpster_derive/.gitignore",
    "content": "/target\n/Cargo.lock\n"
  },
  {
    "path": "dumpster_derive/Cargo.toml",
    "content": "[package]\nname = \"dumpster_derive\"\nversion = \"2.0.0\"\nedition = \"2021\"\nlicense = \"MPL-2.0\"\nauthors = [\"Clayton Ramsey\"]\ndescription = \"Implementation of #[derive(Trace)] for dumpster\"\nrepository = \"https://github.com/claytonwramsey/dumpster\"\nreadme = \"../README.md\"\nkeywords = [\"dumpster\", \"garbage_collector\", \"derive\", \"gc\"]\ncategories = [\"memory-management\", \"data-structures\"]\n\n[lib]\nproc-macro = true\n\n[dependencies]\nproc-macro2 = \"1.0.60\"\nquote = \"1.0\"\nsyn = \"2.0\"\n# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html\n"
  },
  {
    "path": "dumpster_derive/src/lib.rs",
    "content": "/*\n    dumpster, a cycle-tracking garbage collector for Rust.\n    Copyright (C) 2023 Clayton Ramsey.\n\n    This Source Code Form is subject to the terms of the Mozilla Public\n    License, v. 2.0. If a copy of the MPL was not distributed with this\n    file, You can obtain one at http://mozilla.org/MPL/2.0/.\n*/\n\n#![warn(clippy::pedantic)]\n#![warn(clippy::cargo)]\n#![allow(clippy::multiple_crate_versions)]\n\nuse proc_macro2::{TokenStream, TokenTree};\nuse quote::{format_ident, quote, quote_spanned, ToTokens as _};\nuse syn::{\n    parse_macro_input, parse_quote, spanned::Spanned, Data, DeriveInput, Fields, GenericParam,\n    Generics, Ident, Index, Path,\n};\n\n#[proc_macro_derive(Trace, attributes(dumpster))]\n/// Derive `Trace` for a type.\npub fn derive_trace(input: proc_macro::TokenStream) -> proc_macro::TokenStream {\n    let input = parse_macro_input!(input as DeriveInput);\n    let mut dumpster: Path = parse_quote!(::dumpster);\n\n    // look for `crate` argument\n    for attr in &input.attrs {\n        if !attr.path().is_ident(\"dumpster\") {\n            continue;\n        }\n\n        let result = attr.parse_nested_meta(|meta| {\n            if meta.path.is_ident(\"crate\") {\n                dumpster = meta.value()?.parse()?;\n                Ok(())\n            } else {\n                Err(meta.error(\"unsupported attribute\"))\n            }\n        });\n\n        if let Err(err) = result {\n            return err.into_compile_error().into();\n        }\n    }\n\n    // name of the type being implemented\n    let name = &input.ident;\n\n    // generic parameters of the type being implemented\n    let generics = add_trait_bounds(&dumpster, input.generics);\n    let (impl_generics, ty_generics, where_clause) = generics.split_for_impl();\n\n    let impl_generics = {\n        let tokens = impl_generics.into_token_stream();\n        let param = quote! { __V: #dumpster::Visitor };\n\n        let params = if tokens.is_empty() {\n            quote! { #param }\n        } else {\n            // remove the angle bracket delimiters\n            let mut tokens: Vec<TokenTree> = tokens.into_iter().skip(1).collect();\n            tokens.pop();\n\n            let tokens: TokenStream = tokens.into_iter().collect();\n\n            quote! { #param, #tokens }\n        };\n\n        quote! { < #params > }\n    };\n\n    let do_visitor = delegate_methods(&dumpster, name, &input.data);\n\n    let generated = quote! {\n        unsafe impl #impl_generics #dumpster::TraceWith<__V> for #name #ty_generics #where_clause {\n            #[inline]\n            fn accept(&self, visitor: &mut __V) -> ::core::result::Result<(), ()> {\n                #do_visitor\n            }\n        }\n    };\n\n    generated.into()\n}\n\n/// Collect the trait bounds for some generic expression.\nfn add_trait_bounds(dumpster: &Path, mut generics: Generics) -> Generics {\n    for param in &mut generics.params {\n        if let GenericParam::Type(ref mut type_param) = *param {\n            type_param\n                .bounds\n                .push(parse_quote!(#dumpster::TraceWith<__V>));\n        }\n    }\n    generics\n}\n\n#[allow(clippy::too_many_lines)]\n/// Generate method implementations for [`Trace`] for some data type.\nfn delegate_methods(dumpster: &Path, name: &Ident, data: &Data) -> TokenStream {\n    match data {\n        Data::Struct(data) => match data.fields {\n            Fields::Named(ref f) => {\n                let delegate_visit = f.named.iter().map(|f| {\n                    let name = &f.ident;\n                    quote_spanned! {f.span() =>\n                        #dumpster::TraceWith::accept(\n                            &self.#name,\n                            visitor\n                        )?;\n                    }\n                });\n\n                quote! { #(#delegate_visit)* ::core::result::Result::Ok(()) }\n            }\n            Fields::Unnamed(ref f) => {\n                let delegate_visit = f.unnamed.iter().enumerate().map(|(i, f)| {\n                    let index = Index::from(i);\n                    quote_spanned! {f.span() =>\n                        #dumpster::TraceWith::accept(\n                            &self.#index,\n                            visitor\n                        )?;\n                    }\n                });\n\n                quote! { #(#delegate_visit)* ::core::result::Result::Ok(()) }\n            }\n            Fields::Unit => quote! { ::core::result::Result::Ok(()) },\n        },\n        Data::Enum(e) => {\n            let mut delegate_visit = TokenStream::new();\n            for var in &e.variants {\n                let var_name = &var.ident;\n\n                match &var.fields {\n                    Fields::Named(n) => {\n                        let mut binding = TokenStream::new();\n                        let mut execution_visit = TokenStream::new();\n                        for (i, name) in n.named.iter().enumerate() {\n                            let field_name = format_ident!(\"field{i}\");\n                            let field_ident = name.ident.as_ref().unwrap();\n                            if i == 0 {\n                                binding.extend(quote! {\n                                    #field_ident: #field_name\n                                });\n                            } else {\n                                binding.extend(quote! {\n                                    , #field_ident: #field_name\n                                });\n                            }\n\n                            execution_visit.extend(quote! {\n                                #dumpster::TraceWith::accept(\n                                    #field_name,\n                                    visitor\n                                )?;\n                            });\n                        }\n\n                        delegate_visit.extend(\n                            quote! {#name::#var_name{#binding} => {#execution_visit ::core::result::Result::Ok(())},},\n                        );\n                    }\n                    Fields::Unnamed(u) => {\n                        let mut binding = TokenStream::new();\n                        let mut execution_visit = TokenStream::new();\n                        for (i, _) in u.unnamed.iter().enumerate() {\n                            let field_name = format_ident!(\"field{i}\");\n                            if i == 0 {\n                                binding.extend(quote! {\n                                    #field_name\n                                });\n                            } else {\n                                binding.extend(quote! {\n                                    , #field_name\n                                });\n                            }\n\n                            execution_visit.extend(quote! {\n                                #dumpster::TraceWith::accept(\n                                    #field_name,\n                                    visitor\n                                )?;\n                            });\n                        }\n\n                        delegate_visit.extend(\n                            quote! {#name::#var_name(#binding) => {#execution_visit ::core::result::Result::Ok(())},},\n                        );\n                    }\n                    Fields::Unit => {\n                        delegate_visit\n                            .extend(quote! {#name::#var_name => ::core::result::Result::Ok(()),});\n                    }\n                }\n            }\n\n            quote! {match self {#delegate_visit}}\n        }\n        Data::Union(u) => {\n            quote_spanned! {\n                u.union_token.span => compile_error!(\"`Trace` must be manually implemented for unions\");\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "dumpster_test/.gitignore",
    "content": "/target\n/Cargo.lock\n"
  },
  {
    "path": "dumpster_test/Cargo.toml",
    "content": "[package]\nname = \"dumpster_test\"\nversion = \"0.1.0\"\nedition = \"2021\"\nlicense = \"MPL-2.0\"\nauthors = [\"Clayton Ramsey\"]\ndescription = \"Tests for dumpster garbage collection crate\"\nrepository = \"https://github.com/claytonwramsey/dumpster\"\nreadme = \"../README.md\"\nkeywords = [\"dumpster\", \"garbage_collector\", \"test\"]\ncategories = [\"data-structures\", \"memory-management\"]\npublish = false\n\n[dev-dependencies]\ndumpster = { path = \"../dumpster\" }\ndumpster_derive = { path = \"../dumpster_derive\" }\n"
  },
  {
    "path": "dumpster_test/src/lib.rs",
    "content": "/*\n    dumpster, a cycle-tracking garbage collector for Rust.\n    Copyright (C) 2023 Clayton Ramsey.\n\n    This Source Code Form is subject to the terms of the Mozilla Public\n    License, v. 2.0. If a copy of the MPL was not distributed with this\n    file, You can obtain one at http://mozilla.org/MPL/2.0/.\n*/\n\n#![warn(clippy::pedantic)]\n#![warn(clippy::cargo)]\n#![cfg(test)]\n\nuse std::{\n    cell::RefCell,\n    sync::atomic::{AtomicU8, AtomicUsize, Ordering},\n};\n\nuse dumpster::unsync::{collect, Gc};\nuse dumpster_derive::Trace;\n\n#[derive(Trace)]\nstruct Empty;\n\n#[derive(Trace)]\n#[allow(dead_code)]\nstruct UnitTuple();\n\n#[derive(Trace)]\nstruct MultiRef {\n    counter: &'static AtomicUsize,\n    pointers: RefCell<Vec<Gc<MultiRef>>>,\n}\n\n#[derive(Trace)]\n#[allow(unused)]\nenum Refs {\n    None,\n    One(Gc<Refs>),\n    Many { refs: Vec<Gc<Refs>> },\n}\n\n#[derive(Trace)]\n#[allow(unused)]\nenum A {\n    None,\n}\n\n#[derive(Trace)]\n#[allow(unused)]\nenum B {\n    One(Gc<B>),\n}\n\n#[derive(Trace)]\n#[allow(unused)]\nstruct Generic<T> {\n    value: T,\n}\n\nimpl Drop for MultiRef {\n    fn drop(&mut self) {\n        self.counter.fetch_add(1, Ordering::Relaxed);\n    }\n}\n\n#[test]\nfn unit() {\n    static DROP_COUNT: AtomicU8 = AtomicU8::new(0);\n    #[derive(Trace)]\n    struct DropCount;\n\n    impl Drop for DropCount {\n        fn drop(&mut self) {\n            DROP_COUNT.fetch_add(1, Ordering::Relaxed);\n        }\n    }\n\n    let gc1 = Gc::new(DropCount);\n    let gc2 = Gc::clone(&gc1);\n\n    drop(gc1);\n    assert_eq!(DROP_COUNT.load(Ordering::Relaxed), 0);\n    drop(gc2);\n    assert_eq!(DROP_COUNT.load(Ordering::Relaxed), 1);\n}\n\n#[test]\nfn self_referential() {\n    static COUNT: AtomicUsize = AtomicUsize::new(0);\n\n    let gc1 = Gc::new(MultiRef {\n        counter: &COUNT,\n        pointers: RefCell::new(Vec::new()),\n    });\n    gc1.pointers.borrow_mut().push(Gc::clone(&gc1));\n\n    assert_eq!(COUNT.load(Ordering::Relaxed), 0);\n    drop(gc1);\n    collect();\n    assert_eq!(COUNT.load(Ordering::Relaxed), 1);\n}\n\n#[test]\nfn double_loop() {\n    static COUNT: AtomicUsize = AtomicUsize::new(0);\n\n    let gc1 = Gc::new(MultiRef {\n        counter: &COUNT,\n        pointers: RefCell::new(Vec::new()),\n    });\n    gc1.pointers\n        .borrow_mut()\n        .extend([Gc::clone(&gc1), Gc::clone(&gc1)]);\n\n    assert_eq!(COUNT.load(Ordering::Relaxed), 0);\n    drop(gc1);\n    collect();\n    assert_eq!(COUNT.load(Ordering::Relaxed), 1);\n}\n\n#[test]\nfn parallel_loop() {\n    static COUNT_1: AtomicUsize = AtomicUsize::new(0);\n    static COUNT_2: AtomicUsize = AtomicUsize::new(0);\n    static COUNT_3: AtomicUsize = AtomicUsize::new(0);\n    static COUNT_4: AtomicUsize = AtomicUsize::new(0);\n\n    let gc1 = Gc::new(MultiRef {\n        counter: &COUNT_1,\n        pointers: RefCell::new(Vec::new()),\n    });\n    let gc2 = Gc::new(MultiRef {\n        counter: &COUNT_2,\n        pointers: RefCell::new(vec![Gc::clone(&gc1)]),\n    });\n    let gc3 = Gc::new(MultiRef {\n        counter: &COUNT_3,\n        pointers: RefCell::new(vec![Gc::clone(&gc1)]),\n    });\n    let gc4 = Gc::new(MultiRef {\n        counter: &COUNT_4,\n        pointers: RefCell::new(vec![Gc::clone(&gc2), Gc::clone(&gc3)]),\n    });\n    gc1.pointers.borrow_mut().push(Gc::clone(&gc4));\n\n    drop(gc1);\n    drop(gc2);\n    drop(gc3);\n    assert_eq!(COUNT_1.load(Ordering::Relaxed), 0);\n    assert_eq!(COUNT_2.load(Ordering::Relaxed), 0);\n    assert_eq!(COUNT_3.load(Ordering::Relaxed), 0);\n    assert_eq!(COUNT_4.load(Ordering::Relaxed), 0);\n    drop(gc4);\n    collect();\n    assert_eq!(COUNT_1.load(Ordering::Relaxed), 1);\n    assert_eq!(COUNT_2.load(Ordering::Relaxed), 1);\n    assert_eq!(COUNT_3.load(Ordering::Relaxed), 1);\n    assert_eq!(COUNT_4.load(Ordering::Relaxed), 1);\n}\n\n#[test]\n#[allow(clippy::similar_names)]\nfn unsync_as_ptr() {\n    #[derive(Trace)]\n    struct B(Gc<Empty>);\n\n    let empty = Gc::new(Empty);\n    let empty_a = Gc::clone(&empty);\n    let empty_ptr = Gc::as_ptr(&empty);\n    assert_eq!(empty_ptr, Gc::as_ptr(&empty_a));\n\n    let b = B(Gc::clone(&empty));\n    assert_eq!(empty_ptr, Gc::as_ptr(&b.0));\n    let bb = Gc::new(B(Gc::clone(&empty)));\n    assert_eq!(empty_ptr, Gc::as_ptr(&bb.0));\n\n    let empty2 = Gc::new(Empty);\n    let empty2_ptr = Gc::as_ptr(&empty2);\n    assert_ne!(empty_ptr, empty2_ptr);\n    let b2 = Gc::new(B(Gc::clone(&empty2)));\n    assert_eq!(empty2_ptr, Gc::as_ptr(&b2.0));\n    assert_ne!(empty_ptr, Gc::as_ptr(&b2.0));\n    assert_ne!(Gc::as_ptr(&b.0), Gc::as_ptr(&b2.0));\n    assert_ne!(Gc::as_ptr(&b.0), empty2_ptr);\n}\n"
  },
  {
    "path": "rustfmt.toml",
    "content": "newline_style = \"Unix\"\nwrap_comments = true\ncomment_width = 100\nformat_code_in_doc_comments = true\nimports_granularity = \"Crate\"\n"
  }
]