Repository: claytonwramsey/dumpster Branch: master Commit: 32bb3bbb67f9 Files: 34 Total size: 302.2 KB Directory structure: gitextract_qahat5j6/ ├── .github/ │ └── workflows/ │ └── rust.yml ├── .gitignore ├── CHANGELOG.md ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── LICENSE.md ├── README.md ├── dumpster/ │ ├── .gitignore │ ├── Cargo.toml │ └── src/ │ ├── impls.rs │ ├── lib.rs │ ├── ptr.rs │ ├── sync/ │ │ ├── cell.rs │ │ ├── collect.rs │ │ ├── loom_ext.rs │ │ ├── loom_tests.rs │ │ ├── mod.rs │ │ └── tests.rs │ └── unsync/ │ ├── collect.rs │ ├── mod.rs │ └── tests.rs ├── dumpster_bench/ │ ├── .gitignore │ ├── Cargo.toml │ ├── scripts/ │ │ └── make_plots.py │ └── src/ │ ├── lib.rs │ └── main.rs ├── dumpster_derive/ │ ├── .gitignore │ ├── Cargo.toml │ └── src/ │ └── lib.rs ├── dumpster_test/ │ ├── .gitignore │ ├── Cargo.toml │ └── src/ │ └── lib.rs └── rustfmt.toml ================================================ FILE CONTENTS ================================================ ================================================ FILE: .github/workflows/rust.yml ================================================ name: Rust on: push: branches: ["master"] pull_request: branches: ["master"] env: CARGO_TERM_COLOR: always jobs: test: runs-on: ${{ matrix.os }} strategy: matrix: os: - ubuntu-latest - windows-latest - macOS-latest toolchain: - nightly - stable cargo_flags: - "--all-features" - "--no-default-features" - "" exclude: - cargo_flags: "--all-features" toolchain: stable steps: - name: Checkout sources uses: actions/checkout@v2 - name: Install rust toolchain uses: dtolnay/rust-toolchain@stable with: toolchain: ${{ matrix.toolchain }} - name: Generate lockfile run: cargo generate-lockfile - name: Cache id: cache-restore uses: actions/cache@v4 with: path: | ~/.cargo/registry ~/.cargo/git target key: ${{ runner.os }}-cargo-test-${{ hashFiles('**/Cargo.lock') }}-${{ matrix.cargo_flags }} - name: Build with tests uses: actions-rs/cargo@v1 with: command: test args: --no-run --workspace ${{ matrix.cargo_flags }} --exclude dumpster_bench - name: Run tests uses: actions-rs/cargo@v1 with: command: test args: --workspace ${{ matrix.cargo_flags }} --exclude dumpster_bench - name: Save cache id: cache-save uses: actions/cache/save@v4 if: always() && steps.cache-restore.cache-hit != 'true' with: path: | ~/.cargo/registry ~/.cargo/git target key: ${{ runner.os }}-cargo-test-${{ hashFiles('**/Cargo.lock') }}-${{ matrix.cargo_flags }} miri: runs-on: ${{ matrix.os }} strategy: matrix: os: - ubuntu-latest - windows-latest - macOS-latest toolchain: - nightly cargo_flags: - "--all-features" steps: - name: Checkout sources uses: actions/checkout@v2 - name: Install rust toolchain uses: dtolnay/rust-toolchain@stable with: toolchain: ${{ matrix.toolchain }} components: miri - name: Generate lockfile run: cargo generate-lockfile - name: Cache id: cache-restore uses: actions/cache@v4 with: path: | ~/.cargo/registry ~/.cargo/git target key: ${{ runner.os }}-cargo-miri-${{ hashFiles('**/Cargo.lock') }}-${{ matrix.cargo_flags }} - name: Build miri test executables uses: actions-rs/cargo@v1 with: command: miri args: test --no-run --workspace ${{ matrix.cargo_flags }} --exclude dumpster_bench - name: Run miri tests uses: actions-rs/cargo@v1 with: command: miri args: test --workspace ${{ matrix.cargo_flags }} --exclude dumpster_bench - name: Save cache id: cache-save uses: actions/cache/save@v4 if: always() && steps.cache-restore.cache-hit != 'true' with: path: | ~/.cargo/registry ~/.cargo/git target key: ${{ runner.os }}-cargo-miri-${{ hashFiles('**/Cargo.lock') }}-${{ matrix.cargo_flags }} loom: runs-on: ubuntu-latest steps: - name: Checkout sources uses: actions/checkout@v2 - name: Install rust toolchain uses: dtolnay/rust-toolchain@stable with: toolchain: stable - name: Generate lockfile run: cargo generate-lockfile - name: Cache id: cache-restore uses: actions/cache@v4 with: path: | ~/.cargo/registry ~/.cargo/git target key: ${{ runner.os }}-cargo-loom-${{ hashFiles('**/Cargo.lock') }} - name: Build with tests uses: actions-rs/cargo@v1 env: RUSTFLAGS: "--cfg loom" with: command: test args: --lib -p dumpster loom --release --no-run - name: Run tests uses: actions-rs/cargo@v1 env: RUSTFLAGS: "--cfg loom" with: command: test args: --lib -p dumpster loom --release - name: Save cache id: cache-save uses: actions/cache/save@v4 if: always() && steps.cache-restore.cache-hit != 'true' with: path: | ~/.cargo/registry ~/.cargo/git target key: ${{ runner.os }}-cargo-loom-${{ hashFiles('**/Cargo.lock') }} ================================================ FILE: .gitignore ================================================ /target /Cargo.lock *.csv .vscode .zed ================================================ FILE: CHANGELOG.md ================================================ # `dumpster` Changelog ## 2.1.0 ### New features - Implemented `FromIterator` for `Gc<[T]>`. ## 2.0.0 ### Breaking changes - Refactored `Trace` to use `TraceWith`. ### New features - Added `sync::Gc::new_cyclic`. ## 1.2.0 ### New features - Added experimental support for testing under `loom`. - Added `unsync::Gc::new_cyclic`. - Implemented `Default` for `Gc`. - Added `Gc::make_mut`. - Added `From` implementations for `Gc`. - Supported differing `BuildHasher` types in `Trace` implementation for `HashSet`. - Added `sync::coerce_gc` and `unsync::coerce_gc`. - Added `Trace` implementation to more types in the Rust standard library. ### Bug fixes - Fixed broken references in documentation. - Added overflow testing for `Gc` reference counts. - `Gc`s created in a garbage-collected value's `Drop` implementation are no longer leaked. ## 1.1.1 ### Bug fixes - Using `dumpster` no longer fails under Miri as we have changed our underlying pointer model. ## 1.1.0 ### New features - Added support for [`either`](https://crates.io/crates/either). ### Bug fixes - Derive implementations no longer erroneously refer to `heapsize`. ### Other changes - Slight performance and code style improvements. - Improved internal documentation on safety. - Remove `strict-provenance` requirement as it is now stabilized. ## 1.0.0 ### Breaking changes - Rename `Collectable` to `Trace`. ## 0.2.1 ### New features - Implement `Collectable` for `std::any::TypeId`. ## 0.2.0 ### New features - Added `Gc::as_ptr`. - Added `Gc::ptr_eq`. - Implemented `PartialEq` and `Eq` for garbage collected pointers. ### Other - Changed license from GNU GPLv3 or later to MPL 2.0. - Allocations which do not contain `Gc`s will simply be reference counted. ## 0.1.2 ### New features - Implement `Collectable` for `OnceCell`, `HashMap`, and `BTreeMap`. - Add `try_clone` and `try_deref` to `unsync::Gc` and `sync::Gc`. - Make dereferencing `Gc` only panic on truly-dead `Gc`s. ### Bugfixes - Prevent dead `Gc`s from escaping their `Drop` implementation, potentially causing UAFs. - Use fully-qualified name for `Result` in derive macro, preventing some bugs. ### Other - Improve performance in `unsync` by using `parking_lot` for concurrency primitives. - Improve documentation of panicking behavior in `Gc`. - Fix spelling mistakes in documentation. ## 0.1.1 ### Bugfixes - Prevent possible UAFs caused by accessing `Gc`s during `Drop` impls by panicking. ### Other - Fix spelling mistakes in documentation. ## 0.1.0 Initial release. ================================================ FILE: Cargo.toml ================================================ [workspace] members = [ "dumpster", "dumpster_derive", "dumpster_test", "dumpster_bench", ] resolver = "2" [patch.crates-io] dumpster = { path = "dumpster" } [profile.release] lto = true ================================================ FILE: LICENSE-APACHE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS ================================================ FILE: LICENSE-MIT ================================================ Copyright (c) The Rust Project Contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: LICENSE.md ================================================ Mozilla Public License Version 2.0 ================================== ### 1. Definitions **1.1. “Contributor”** means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software. **1.2. “Contributor Version”** means the combination of the Contributions of others (if any) used by a Contributor and that particular Contributor's Contribution. **1.3. “Contribution”** means Covered Software of a particular Contributor. **1.4. “Covered Software”** means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof. **1.5. “Incompatible With Secondary Licenses”** means * **(a)** that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or * **(b)** that the Covered Software was made available under the terms of version 1.1 or earlier of the License, but not also under the terms of a Secondary License. **1.6. “Executable Form”** means any form of the work other than Source Code Form. **1.7. “Larger Work”** means a work that combines Covered Software with other material, in a separate file or files, that is not Covered Software. **1.8. “License”** means this document. **1.9. “Licensable”** means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently, any and all of the rights conveyed by this License. **1.10. “Modifications”** means any of the following: * **(a)** any file in Source Code Form that results from an addition to, deletion from, or modification of the contents of Covered Software; or * **(b)** any new file in Source Code Form that contains any Covered Software. **1.11. “Patent Claims” of a Contributor** means any patent claim(s), including without limitation, method, process, and apparatus claims, in any patent Licensable by such Contributor that would be infringed, but for the grant of the License, by the making, using, selling, offering for sale, having made, import, or transfer of either its Contributions or its Contributor Version. **1.12. “Secondary License”** means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses. **1.13. “Source Code Form”** means the form of the work preferred for making modifications. **1.14. “You” (or “Your”)** means an individual or a legal entity exercising rights under this License. For legal entities, “You” includes any entity that controls, is controlled by, or is under common control with You. For purposes of this definition, “control” means **(a)** the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or **(b)** ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. ### 2. License Grants and Conditions #### 2.1. Grants Each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license: * **(a)** under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its Contributions, either on an unmodified basis, with Modifications, or as part of a Larger Work; and * **(b)** under Patent Claims of such Contributor to make, use, sell, offer for sale, have made, import, and otherwise transfer either its Contributions or its Contributor Version. #### 2.2. Effective Date The licenses granted in Section 2.1 with respect to any Contribution become effective for each Contribution on the date the Contributor first distributes such Contribution. #### 2.3. Limitations on Grant Scope The licenses granted in this Section 2 are the only rights granted under this License. No additional rights or licenses will be implied from the distribution or licensing of Covered Software under this License. Notwithstanding Section 2.1(b) above, no patent license is granted by a Contributor: * **(a)** for any code that a Contributor has removed from Covered Software; or * **(b)** for infringements caused by: **(i)** Your and any other third party's modifications of Covered Software, or **(ii)** the combination of its Contributions with other software (except as part of its Contributor Version); or * **(c)** under Patent Claims infringed by Covered Software in the absence of its Contributions. This License does not grant any rights in the trademarks, service marks, or logos of any Contributor (except as may be necessary to comply with the notice requirements in Section 3.4). #### 2.4. Subsequent Licenses No Contributor makes additional grants as a result of Your choice to distribute the Covered Software under a subsequent version of this License (see Section 10.2) or under the terms of a Secondary License (if permitted under the terms of Section 3.3). #### 2.5. Representation Each Contributor represents that the Contributor believes its Contributions are its original creation(s) or it has sufficient rights to grant the rights to its Contributions conveyed by this License. #### 2.6. Fair Use This License is not intended to limit any rights You have under applicable copyright doctrines of fair use, fair dealing, or other equivalents. #### 2.7. Conditions Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in Section 2.1. ### 3. Responsibilities #### 3.1. Distribution of Source Form All distribution of Covered Software in Source Code Form, including any Modifications that You create or to which You contribute, must be under the terms of this License. You must inform recipients that the Source Code Form of the Covered Software is governed by the terms of this License, and how they can obtain a copy of this License. You may not attempt to alter or restrict the recipients' rights in the Source Code Form. #### 3.2. Distribution of Executable Form If You distribute Covered Software in Executable Form then: * **(a)** such Covered Software must also be made available in Source Code Form, as described in Section 3.1, and You must inform recipients of the Executable Form how they can obtain a copy of such Source Code Form by reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and * **(b)** You may distribute such Executable Form under the terms of this License, or sublicense it under different terms, provided that the license for the Executable Form does not attempt to limit or alter the recipients' rights in the Source Code Form under this License. #### 3.3. Distribution of a Larger Work You may create and distribute a Larger Work under terms of Your choice, provided that You also comply with the requirements of this License for the Covered Software. If the Larger Work is a combination of Covered Software with a work governed by one or more Secondary Licenses, and the Covered Software is not Incompatible With Secondary Licenses, this License permits You to additionally distribute such Covered Software under the terms of such Secondary License(s), so that the recipient of the Larger Work may, at their option, further distribute the Covered Software under the terms of either this License or such Secondary License(s). #### 3.4. Notices You may not remove or alter the substance of any license notices (including copyright notices, patent notices, disclaimers of warranty, or limitations of liability) contained within the Source Code Form of the Covered Software, except that You may alter any license notices to the extent required to remedy known factual inaccuracies. #### 3.5. Application of Additional Terms You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, You may do so only on Your own behalf, and not on behalf of any Contributor. You must make it absolutely clear that any such warranty, support, indemnity, or liability obligation is offered by You alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any jurisdiction. ### 4. Inability to Comply Due to Statute or Regulation If it is impossible for You to comply with any of the terms of this License with respect to some or all of the Covered Software due to statute, judicial order, or regulation then You must: **(a)** comply with the terms of this License to the maximum extent possible; and **(b)** describe the limitations and the code they affect. Such description must be placed in a text file included with all distributions of the Covered Software under this License. Except to the extent prohibited by statute or regulation, such description must be sufficiently detailed for a recipient of ordinary skill to be able to understand it. ### 5. Termination **5.1.** The rights granted under this License will terminate automatically if You fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated **(a)** provisionally, unless and until such Contributor explicitly and finally terminates Your grants, and **(b)** on an ongoing basis, if such Contributor fails to notify You of the non-compliance by some reasonable means prior to 60 days after You have come back into compliance. Moreover, Your grants from a particular Contributor are reinstated on an ongoing basis if such Contributor notifies You of the non-compliance by some reasonable means, this is the first time You have received notice of non-compliance with this License from such Contributor, and You become compliant prior to 30 days after Your receipt of the notice. **5.2.** If You initiate litigation against any entity by asserting a patent infringement claim (excluding declaratory judgment actions, counter-claims, and cross-claims) alleging that a Contributor Version directly or indirectly infringes any patent, then the rights granted to You by any and all Contributors for the Covered Software under Section 2.1 of this License shall terminate. **5.3.** In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been validly granted by You or Your distributors under this License prior to termination shall survive termination. ### 6. Disclaimer of Warranty > Covered Software is provided under this License on an “as is” > basis, without warranty of any kind, either expressed, implied, or > statutory, including, without limitation, warranties that the > Covered Software is free of defects, merchantable, fit for a > particular purpose or non-infringing. The entire risk as to the > quality and performance of the Covered Software is with You. > Should any Covered Software prove defective in any respect, You > (not any Contributor) assume the cost of any necessary servicing, > repair, or correction. This disclaimer of warranty constitutes an > essential part of this License. No use of any Covered Software is > authorized under this License except under this disclaimer. ### 7. Limitation of Liability > Under no circumstances and under no legal theory, whether tort > (including negligence), contract, or otherwise, shall any > Contributor, or anyone who distributes Covered Software as > permitted above, be liable to You for any direct, indirect, > special, incidental, or consequential damages of any character > including, without limitation, damages for lost profits, loss of > goodwill, work stoppage, computer failure or malfunction, or any > and all other commercial damages or losses, even if such party > shall have been informed of the possibility of such damages. This > limitation of liability shall not apply to liability for death or > personal injury resulting from such party's negligence to the > extent applicable law prohibits such limitation. Some > jurisdictions do not allow the exclusion or limitation of > incidental or consequential damages, so this exclusion and > limitation may not apply to You. ### 8. Litigation Any litigation relating to this License may be brought only in the courts of a jurisdiction where the defendant maintains its principal place of business and such litigation shall be governed by laws of that jurisdiction, without reference to its conflict-of-law provisions. Nothing in this Section shall prevent a party's ability to bring cross-claims or counter-claims. ### 9. Miscellaneous This License represents the complete agreement concerning the subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not be used to construe this License against a Contributor. ### 10. Versions of the License #### 10.1. New Versions Mozilla Foundation is the license steward. Except as provided in Section 10.3, no one other than the license steward has the right to modify or publish new versions of this License. Each version will be given a distinguishing version number. #### 10.2. Effect of New Versions You may distribute the Covered Software under the terms of the version of the License under which You originally received the Covered Software, or under the terms of any subsequent version published by the license steward. #### 10.3. Modified Versions If you create software not governed by this License, and you want to create a new license for such software, you may create and use a modified version of this License if you rename the license and remove any references to the name of the license steward (except to note that such modified license differs from this License). #### 10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses If You choose to distribute Source Code Form that is Incompatible With Secondary Licenses under the terms of this version of the License, the notice described in Exhibit B of this License must be attached. ## Exhibit A - Source Code Form License Notice This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. If it is not possible or desirable to put the notice in a particular file, then You may include the notice in a location (such as a LICENSE file in a relevant directory) where a recipient would be likely to look for such a notice. You may add additional accurate notices of copyright ownership. ## Exhibit B - “Incompatible With Secondary Licenses” Notice This Source Code Form is "Incompatible With Secondary Licenses", as defined by the Mozilla Public License, v. 2.0. ================================================ FILE: README.md ================================================ # `dumpster`: A cycle-tracking garbage collector for Rust [![Crates.io page](https://img.shields.io/crates/v/dumpster)](https://crates.io/crates/dumpster) [![docs.rs](https://img.shields.io/docsrs/dumpster)](https://docs.rs/dumpster) `dumpster` is a cycle-detecting garbage collector for Rust. It detects unreachable allocations and automatically frees them. ## Why should you use this crate? In short, `dumpster` offers a great mix of usability, performance, and flexibility. - `dumpster`'s API is a drop-in replacement for `std`'s reference-counted shared allocations (`Rc` and `Arc`). - It's very performant and has builtin implementations of both thread-local and concurrent garbage collection. - There are no restrictions on the reference structure within a garbage-collected allocation (references may point in any way you like). - It's trivial to make a custom type Trace using the provided derive macros. - You can even store `?Sized` data in a garbage-collected pointer! ## How it works `dumpster` is unlike most tracing garbage collectors. Other GCs keep track of a set of roots, which can then be used to perform a sweep and find out which allocations are reachable and which are not. Instead, `dumpster` extends reference-counted garbage collection (such as `std::rc::Rc`) with a cycle-detection algorithm, enabling it to effectively clean up self-referential data structures. For a deeper dive, check out this [blog post](https://claytonwramsey.github.io/2023/08/14/dumpster.html). ## What this library contains `dumpster` actually contains two garbage collector implementations: one thread-local, non-`Send` garbage collector in the module `unsync`, and one thread-safe garbage collector in the module `sync`. These garbage collectors can be safely mixed and matched. This library also comes with a derive macro for creating custom Trace types. ## Examples ```rust use dumpster::{Trace, unsync::Gc}; #[derive(Trace)] struct Foo { ptr: RefCell>>, } // Create a new garbage-collected Foo. let foo = Gc::new(Foo { ptr: RefCell::new(None), }); // Insert a circular reference inside of the foo. *foo.ptr.borrow_mut() = Some(foo.clone()); // Render the foo inaccessible. // This may trigger a collection, but it's not guaranteed. // If we had used `Rc` instead of `Gc`, this would have caused a memory leak. drop(foo); // Trigger a collection. // This isn't necessary, but it guarantees that `foo` will be collected immediately (instead of // later). dumpster::unsync::collect(); ``` ## Installation To install, simply add `dumpster` as a dependency to your project. ```toml [dependencies] dumpster = "2.1.0" ``` ## Optional features ### `derive` `derive` is enabled by default. It enables the derive macro for `Trace`, which makes it easy for users to implement their own Trace types. ```rust use dumpster::{unsync::Gc, Trace}; use std::cell::RefCell; #[derive(Trace)] // no manual implementation required struct Foo(RefCell>>); let my_foo = Gc::new(Foo(RefCell::new(None))); *my_foo.0.borrow_mut() = Some(my_foo.clone()); drop(my_foo); // my_foo will be automatically cleaned up ``` ### `either` `either` is disabled by default. It adds support for the [`either`](https://crates.io/crates/either) crate, specifically by implementing `Trace` for [`either::Either`](https://docs.rs/either/1.13.0/either/enum.Either.html). ### `coerce-unsized` `coerce-unsized` is disabled by default. This enables the implementation of `CoerceUnsized` for each garbage collector, making it possible to use `Gc` with `!Sized` types conveniently. ```rust use dumpster::unsync::Gc; // this only works with "coerce-unsized" enabled while compiling on nightly Rust let gc1: Gc<[u8]> = Gc::new([1, 2, 3]); ``` To use `coerce-unsized`, edit your installation to `Cargo.toml` to include the feature. ```toml [dependencies] dumpster = { version = "2.1.0", features = ["coerce-unsized"]} ``` ## Loom support `dumpster` has experimental support for permutation testing under [`loom`](https://github.com/tokio-rs/loom). It is expected to be unstable and buggy. To compile `dumpster` using `loom`, add `--cfg loom` to `RUSTFLAGS` when compiling, for example: ```sh RUSTFLAGS='--cfg loom' cargo test ``` ## License This code is licensed under the Mozilla Public License, version 2.0. For more information, refer to [LICENSE.md](LICENSE.md). This project includes portions of code derived from the Rust standard library, which is dual-licensed under the MIT and Apache 2.0 licenses. Copyright (c) The Rust Project Developers. ================================================ FILE: dumpster/.gitignore ================================================ /target /Cargo.lock ================================================ FILE: dumpster/Cargo.toml ================================================ [package] name = "dumpster" version = "2.1.0" edition = "2021" license = "MPL-2.0" authors = ["Clayton Ramsey"] description = "A concurrent cycle-tracking garbage collector." repository = "https://github.com/claytonwramsey/dumpster" readme = "../README.md" keywords = ["dumpster", "garbage_collector", "gc"] categories = ["memory-management", "data-structures"] [features] default = ["derive"] coerce-unsized = [] derive = ["dep:dumpster_derive"] either = ["dep:either"] [dependencies] parking_lot = "0.12.3" dumpster_derive = { version = "2.0.0", path = "../dumpster_derive", optional = true } either = { version = "1.13.0", optional = true } foldhash = { version = "0.2.0", default-features = false, features = ["std"] } [dev-dependencies] fastrand = "2.0.0" [target.'cfg(loom)'.dependencies] loom = { version = "0.7.2" } [package.metadata.playground] features = ["derive"] [package.metadata.docs.rs] features = ["derive"] targets = ["x86_64-unknown-linux-gnu"] rustdoc-args = ["--generate-link-to-definition"] [lints.rust] unexpected_cfgs = { level = "warn", check-cfg = ['cfg(loom)'] } ================================================ FILE: dumpster/src/impls.rs ================================================ /* dumpster, a cycle-tracking garbage collector for Rust. Copyright (C) 2023 Clayton Ramsey. This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! Implementations of [`TraceWith`] for common data types. #![allow(deprecated)] use std::{ borrow::Cow, cell::{Cell, OnceCell, RefCell}, collections::{BTreeMap, BTreeSet, BinaryHeap, HashMap, HashSet, LinkedList, VecDeque}, convert::Infallible, hash::{BuildHasher, BuildHasherDefault}, marker::PhantomData, num::{ NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize, NonZeroU128, NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize, }, ops::Deref, sync::{ atomic::{ AtomicI16, AtomicI32, AtomicI64, AtomicI8, AtomicIsize, AtomicU16, AtomicU32, AtomicU64, AtomicU8, AtomicUsize, }, Mutex, MutexGuard, OnceLock, RwLock, RwLockReadGuard, TryLockError, }, }; use crate::{TraceWith, Visitor}; unsafe impl TraceWith for Infallible { fn accept(&self, _: &mut V) -> Result<(), ()> { match *self {} } } #[cfg(feature = "either")] unsafe impl, B: TraceWith> TraceWith for either::Either { fn accept(&self, visitor: &mut V) -> Result<(), ()> { match self { either::Either::Left(a) => a.accept(visitor), either::Either::Right(b) => b.accept(visitor), } } } /// Implement `TraceWith` trivially for some parametric `?Sized` type. macro_rules! param_trivial_impl_unsized { ($x: ty) => { unsafe impl TraceWith for $x { #[inline] fn accept(&self, _: &mut V) -> Result<(), ()> { Ok(()) } } }; } param_trivial_impl_unsized!(MutexGuard<'static, T>); param_trivial_impl_unsized!(RwLockReadGuard<'static, T>); param_trivial_impl_unsized!(&'static T); param_trivial_impl_unsized!(PhantomData); /// Implement `TraceWith` trivially for some parametric `Sized` type. macro_rules! param_trivial_impl_sized { ($x: ty) => { unsafe impl TraceWith for $x { #[inline] fn accept(&self, _: &mut V) -> Result<(), ()> { Ok(()) } } }; } param_trivial_impl_sized!(std::future::Pending); param_trivial_impl_sized!(std::mem::Discriminant); unsafe impl + ?Sized> TraceWith for Box { fn accept(&self, visitor: &mut V) -> Result<(), ()> { (**self).accept(visitor) } } unsafe impl TraceWith for BuildHasherDefault { fn accept(&self, _: &mut V) -> Result<(), ()> { Ok(()) } } unsafe impl TraceWith for Cow<'_, T> where T::Owned: TraceWith, { fn accept(&self, visitor: &mut V) -> Result<(), ()> { if let Cow::Owned(ref v) = self { v.accept(visitor)?; } Ok(()) } } unsafe impl + ?Sized> TraceWith for RefCell { #[inline] fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.try_borrow().map_err(|_| ())?.accept(visitor) } } unsafe impl + ?Sized> TraceWith for Mutex { #[inline] fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.try_lock() .map_err(|e| match e { TryLockError::Poisoned(_) => panic!(), TryLockError::WouldBlock => (), })? .deref() .accept(visitor) } } unsafe impl + ?Sized> TraceWith for RwLock { #[inline] fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.try_read() .map_err(|e| match e { TryLockError::Poisoned(_) => panic!(), TryLockError::WouldBlock => (), })? .deref() .accept(visitor) } } unsafe impl> TraceWith for Option { #[inline] fn accept(&self, visitor: &mut V) -> Result<(), ()> { match self { Some(x) => x.accept(visitor), None => Ok(()), } } } unsafe impl, E: TraceWith> TraceWith for Result { #[inline] fn accept(&self, visitor: &mut V) -> Result<(), ()> { match self { Ok(t) => t.accept(visitor), Err(e) => e.accept(visitor), } } } unsafe impl> TraceWith for Cell { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.get().accept(visitor) } } unsafe impl> TraceWith for OnceCell { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.get().map_or(Ok(()), |x| x.accept(visitor)) } } unsafe impl> TraceWith for OnceLock { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.get().map_or(Ok(()), |x| x.accept(visitor)) } } unsafe impl> TraceWith for std::cmp::Reverse { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.0.accept(visitor) } } unsafe impl + ?Sized> TraceWith for std::io::BufReader { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.get_ref().accept(visitor) } } unsafe impl + std::io::Write + ?Sized> TraceWith for std::io::BufWriter { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.get_ref().accept(visitor) } } unsafe impl, U: TraceWith> TraceWith for std::io::Chain { fn accept(&self, visitor: &mut V) -> Result<(), ()> { let (t, u) = self.get_ref(); t.accept(visitor)?; u.accept(visitor) } } unsafe impl> TraceWith for std::io::Cursor { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.get_ref().accept(visitor) } } unsafe impl + std::io::Write + ?Sized> TraceWith for std::io::LineWriter { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.get_ref().accept(visitor) } } unsafe impl> TraceWith for std::io::Take { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.get_ref().accept(visitor) } } unsafe impl> TraceWith for std::mem::ManuallyDrop { fn accept(&self, visitor: &mut V) -> Result<(), ()> { (**self).accept(visitor) } } unsafe impl> TraceWith for std::num::Saturating { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.0.accept(visitor) } } unsafe impl> TraceWith for std::num::Wrapping { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.0.accept(visitor) } } unsafe impl> TraceWith for std::ops::Range { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.start.accept(visitor)?; self.end.accept(visitor) } } unsafe impl> TraceWith for std::ops::RangeFrom { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.start.accept(visitor) } } unsafe impl> TraceWith for std::ops::RangeInclusive { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.start().accept(visitor)?; self.end().accept(visitor) } } unsafe impl> TraceWith for std::ops::RangeTo { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.end.accept(visitor) } } unsafe impl> TraceWith for std::ops::RangeToInclusive { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.end.accept(visitor) } } unsafe impl> TraceWith for std::ops::Bound { fn accept(&self, visitor: &mut V) -> Result<(), ()> { match self { std::ops::Bound::Included(x) | std::ops::Bound::Excluded(x) => x.accept(visitor), std::ops::Bound::Unbounded => Ok(()), } } } unsafe impl, C: TraceWith> TraceWith for std::ops::ControlFlow { fn accept(&self, visitor: &mut V) -> Result<(), ()> { match self { std::ops::ControlFlow::Continue(c) => c.accept(visitor), std::ops::ControlFlow::Break(b) => b.accept(visitor), } } } unsafe impl> TraceWith for std::panic::AssertUnwindSafe { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.0.accept(visitor) } } unsafe impl> TraceWith for std::task::Poll { fn accept(&self, visitor: &mut V) -> Result<(), ()> { match self { std::task::Poll::Ready(r) => r.accept(visitor), std::task::Poll::Pending => Ok(()), } } } /// Implement [`TraceWith`] for a collection data structure which has some method `iter()` that /// iterates over all elements of the data structure and `iter_mut()` which does the same over /// mutable references. macro_rules! Trace_collection_impl { ($x: ty) => { unsafe impl> TraceWith for $x { #[inline] fn accept(&self, visitor: &mut V) -> Result<(), ()> { for elem in self { elem.accept(visitor)?; } Ok(()) } } }; } Trace_collection_impl!(Vec); Trace_collection_impl!(VecDeque); Trace_collection_impl!(LinkedList); Trace_collection_impl!([T]); Trace_collection_impl!(BinaryHeap); Trace_collection_impl!(BTreeSet); unsafe impl> TraceWith for std::vec::IntoIter { #[inline] fn accept(&self, visitor: &mut V) -> Result<(), ()> { for elem in self.as_slice() { elem.accept(visitor)?; } Ok(()) } } unsafe impl, V: TraceWith, S: BuildHasher + TraceWith> TraceWith for HashMap { fn accept(&self, visitor: &mut Z) -> Result<(), ()> { for (k, v) in self { k.accept(visitor)?; v.accept(visitor)?; } self.hasher().accept(visitor) } } unsafe impl, S: BuildHasher + TraceWith> TraceWith for HashSet { fn accept(&self, visitor: &mut Z) -> Result<(), ()> { for elem in self { elem.accept(visitor)?; } self.hasher().accept(visitor) } } unsafe impl, V: TraceWith> TraceWith for BTreeMap { fn accept(&self, visitor: &mut Z) -> Result<(), ()> { for (k, v) in self { k.accept(visitor)?; v.accept(visitor)?; } Ok(()) } } unsafe impl, const N: usize> TraceWith for [T; N] { #[inline] fn accept(&self, visitor: &mut V) -> Result<(), ()> { for elem in self { elem.accept(visitor)?; } Ok(()) } } /// Implement [`TraceWith`] for a trivially-collected type which contains no `Gc`s in its /// fields. macro_rules! Trace_trivial_impl { ($x: ty) => { unsafe impl TraceWith for $x { #[inline] fn accept(&self, _: &mut V) -> Result<(), ()> { Ok(()) } } }; } Trace_trivial_impl!(()); Trace_trivial_impl!(u8); Trace_trivial_impl!(u16); Trace_trivial_impl!(u32); Trace_trivial_impl!(u64); Trace_trivial_impl!(u128); Trace_trivial_impl!(usize); Trace_trivial_impl!(i8); Trace_trivial_impl!(i16); Trace_trivial_impl!(i32); Trace_trivial_impl!(i64); Trace_trivial_impl!(i128); Trace_trivial_impl!(isize); Trace_trivial_impl!(bool); Trace_trivial_impl!(char); Trace_trivial_impl!(f32); Trace_trivial_impl!(f64); Trace_trivial_impl!(AtomicU8); Trace_trivial_impl!(AtomicU16); Trace_trivial_impl!(AtomicU32); Trace_trivial_impl!(AtomicU64); Trace_trivial_impl!(AtomicUsize); Trace_trivial_impl!(AtomicI8); Trace_trivial_impl!(AtomicI16); Trace_trivial_impl!(AtomicI32); Trace_trivial_impl!(AtomicI64); Trace_trivial_impl!(AtomicIsize); Trace_trivial_impl!(NonZeroU8); Trace_trivial_impl!(NonZeroU16); Trace_trivial_impl!(NonZeroU32); Trace_trivial_impl!(NonZeroU64); Trace_trivial_impl!(NonZeroU128); Trace_trivial_impl!(NonZeroUsize); Trace_trivial_impl!(NonZeroI8); Trace_trivial_impl!(NonZeroI16); Trace_trivial_impl!(NonZeroI32); Trace_trivial_impl!(NonZeroI64); Trace_trivial_impl!(NonZeroI128); Trace_trivial_impl!(NonZeroIsize); Trace_trivial_impl!(std::alloc::Layout); Trace_trivial_impl!(std::alloc::LayoutError); Trace_trivial_impl!(std::alloc::System); Trace_trivial_impl!(std::any::TypeId); Trace_trivial_impl!(std::ascii::EscapeDefault); Trace_trivial_impl!(std::backtrace::Backtrace); Trace_trivial_impl!(std::backtrace::BacktraceStatus); Trace_trivial_impl!(std::cmp::Ordering); Trace_trivial_impl!(std::char::CharTryFromError); Trace_trivial_impl!(std::char::EscapeDebug); Trace_trivial_impl!(std::char::EscapeDefault); Trace_trivial_impl!(std::char::EscapeUnicode); Trace_trivial_impl!(std::char::ToLowercase); Trace_trivial_impl!(std::char::ToUppercase); Trace_trivial_impl!(std::env::Args); Trace_trivial_impl!(std::env::ArgsOs); Trace_trivial_impl!(std::env::JoinPathsError); Trace_trivial_impl!(std::env::Vars); Trace_trivial_impl!(std::env::VarsOs); Trace_trivial_impl!(std::env::VarError); Trace_trivial_impl!(std::ffi::CStr); Trace_trivial_impl!(std::ffi::CString); Trace_trivial_impl!(std::ffi::FromBytesUntilNulError); Trace_trivial_impl!(std::ffi::FromVecWithNulError); Trace_trivial_impl!(std::ffi::IntoStringError); Trace_trivial_impl!(std::ffi::NulError); Trace_trivial_impl!(std::ffi::OsStr); Trace_trivial_impl!(std::ffi::OsString); Trace_trivial_impl!(std::ffi::FromBytesWithNulError); Trace_trivial_impl!(std::ffi::c_void); Trace_trivial_impl!(std::fmt::Error); Trace_trivial_impl!(std::fmt::Alignment); Trace_trivial_impl!(std::fs::DirBuilder); Trace_trivial_impl!(std::fs::DirEntry); Trace_trivial_impl!(std::fs::File); Trace_trivial_impl!(std::fs::FileTimes); Trace_trivial_impl!(std::fs::FileType); Trace_trivial_impl!(std::fs::Metadata); Trace_trivial_impl!(std::fs::OpenOptions); Trace_trivial_impl!(std::fs::Permissions); Trace_trivial_impl!(std::fs::ReadDir); Trace_trivial_impl!(std::fs::TryLockError); Trace_trivial_impl!(std::hash::DefaultHasher); Trace_trivial_impl!(std::hash::RandomState); Trace_trivial_impl!(std::hash::SipHasher); Trace_trivial_impl!(std::io::Empty); Trace_trivial_impl!(std::io::Error); Trace_trivial_impl!(std::io::PipeReader); Trace_trivial_impl!(std::io::PipeWriter); Trace_trivial_impl!(std::io::Repeat); Trace_trivial_impl!(std::io::Sink); Trace_trivial_impl!(std::io::Stdin); Trace_trivial_impl!(std::io::Stdout); Trace_trivial_impl!(std::io::WriterPanicked); Trace_trivial_impl!(std::io::ErrorKind); Trace_trivial_impl!(std::io::SeekFrom); Trace_trivial_impl!(std::marker::PhantomPinned); Trace_trivial_impl!(std::net::AddrParseError); Trace_trivial_impl!(std::net::Ipv4Addr); Trace_trivial_impl!(std::net::Ipv6Addr); Trace_trivial_impl!(std::net::SocketAddrV4); Trace_trivial_impl!(std::net::SocketAddrV6); Trace_trivial_impl!(std::net::TcpListener); Trace_trivial_impl!(std::net::TcpStream); Trace_trivial_impl!(std::net::UdpSocket); Trace_trivial_impl!(std::net::IpAddr); Trace_trivial_impl!(std::net::Shutdown); Trace_trivial_impl!(std::net::SocketAddr); Trace_trivial_impl!(std::num::ParseFloatError); Trace_trivial_impl!(std::num::ParseIntError); Trace_trivial_impl!(std::num::TryFromIntError); Trace_trivial_impl!(std::num::FpCategory); Trace_trivial_impl!(std::num::IntErrorKind); Trace_trivial_impl!(std::ops::RangeFull); Trace_trivial_impl!(std::path::Path); Trace_trivial_impl!(std::path::PathBuf); Trace_trivial_impl!(std::path::StripPrefixError); Trace_trivial_impl!(std::process::Child); Trace_trivial_impl!(std::process::ChildStderr); Trace_trivial_impl!(std::process::ChildStdin); Trace_trivial_impl!(std::process::ChildStdout); Trace_trivial_impl!(std::process::Command); Trace_trivial_impl!(std::process::ExitCode); Trace_trivial_impl!(std::process::Output); Trace_trivial_impl!(std::process::Stdio); Trace_trivial_impl!(std::slice::GetDisjointMutError); Trace_trivial_impl!(str); Trace_trivial_impl!(std::rc::Rc); Trace_trivial_impl!(std::sync::Arc); Trace_trivial_impl!(std::string::FromUtf8Error); Trace_trivial_impl!(std::string::FromUtf16Error); Trace_trivial_impl!(std::string::String); Trace_trivial_impl!(std::thread::AccessError); Trace_trivial_impl!(std::thread::Builder); Trace_trivial_impl!(std::thread::Thread); Trace_trivial_impl!(std::thread::ThreadId); Trace_trivial_impl!(std::time::Duration); Trace_trivial_impl!(std::time::Instant); Trace_trivial_impl!(std::time::SystemTime); Trace_trivial_impl!(std::time::SystemTimeError); Trace_trivial_impl!(std::time::TryFromFloatSecsError); /// Implement [`TraceWith`] for a tuple. macro_rules! Trace_tuple { () => {}; // This case is handled above by the trivial case ($($args:ident),*) => { unsafe impl),*> TraceWith for ($($args,)*) { fn accept(&self, visitor: &mut V) -> Result<(), ()> { #[expect(clippy::allow_attributes)] #[allow(non_snake_case)] let &($(ref $args,)*) = self; $(($args).accept(visitor)?;)* Ok(()) } } } } Trace_tuple!(); Trace_tuple!(A); Trace_tuple!(A, B); Trace_tuple!(A, B, C); Trace_tuple!(A, B, C, D); Trace_tuple!(A, B, C, D, E); Trace_tuple!(A, B, C, D, E, F); Trace_tuple!(A, B, C, D, E, F, G); Trace_tuple!(A, B, C, D, E, F, G, H); Trace_tuple!(A, B, C, D, E, F, G, H, I); Trace_tuple!(A, B, C, D, E, F, G, H, I, J); /// Implement `TraceWith` for one function type. macro_rules! Trace_fn { ($ty:ty $(,$args:ident)*) => { unsafe impl TraceWith for $ty { fn accept(&self, _: &mut V) -> Result<(), ()> { Ok(()) } } } } /// Implement `TraceWith` for all functions with a given set of args. macro_rules! Trace_fn_group { () => { Trace_fn!(extern "Rust" fn () -> Ret); Trace_fn!(extern "C" fn () -> Ret); Trace_fn!(unsafe extern "Rust" fn () -> Ret); Trace_fn!(unsafe extern "C" fn () -> Ret); }; ($($args:ident),*) => { Trace_fn!(extern "Rust" fn ($($args),*) -> Ret, $($args),*); Trace_fn!(extern "C" fn ($($args),*) -> Ret, $($args),*); Trace_fn!(extern "C" fn ($($args),*, ...) -> Ret, $($args),*); Trace_fn!(unsafe extern "Rust" fn ($($args),*) -> Ret, $($args),*); Trace_fn!(unsafe extern "C" fn ($($args),*) -> Ret, $($args),*); Trace_fn!(unsafe extern "C" fn ($($args),*, ...) -> Ret, $($args),*); } } Trace_fn_group!(); Trace_fn_group!(A); Trace_fn_group!(A, B); Trace_fn_group!(A, B, C); Trace_fn_group!(A, B, C, D); Trace_fn_group!(A, B, C, D, E); Trace_fn_group!(A, B, C, D, E, F); Trace_fn_group!(A, B, C, D, E, F, G); Trace_fn_group!(A, B, C, D, E, F, G, H); Trace_fn_group!(A, B, C, D, E, F, G, H, I); Trace_fn_group!(A, B, C, D, E, F, G, H, I, J); ================================================ FILE: dumpster/src/lib.rs ================================================ /* dumpster, a cycle-tracking garbage collector for Rust. Copyright (C) 2023 Clayton Ramsey. This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! A cycle-tracking concurrent garbage collector with an easy-to-use API. //! //! Most garbage collectors are _tracing_ garbage collectors, meaning that they keep track of a set //! of roots which are directly accessible from the stack, and then use those roots to find the set //! of all accessible allocations. //! However, because Rust does not allow us to hook into when a value is moved, it's quite difficult //! to detect when a garbage-collected value stops being a root. //! //! `dumpster` takes a different approach. //! It begins by using simple reference counting, then automatically detects cycles. //! Allocations are freed when their reference count reaches zero or when they are only accessible //! via their descendants. //! //! Garbage-collected pointers can be created and destroyed in _O(1)_ amortized time, but destroying //! a garbage-collected pointer may take _O(r)_, where _r_ is the number of existing //! garbage-collected references, on occasion. //! However, the cleanups that require _O(r)_ performance are performed once every _O(1/r)_ times //! a reference is dropped, yielding an amortized _O(1)_ runtime. //! //! # Why should you use this crate? //! //! In short, `dumpster` offers a great mix of usability, performance, and flexibility. //! //! - `dumpster`'s API is a drop-in replacement for `std`'s reference-counted shared allocations //! (`Rc` and `Arc`). //! - It's very performant and has builtin implementations of both thread-local and concurrent //! garbage collection. //! - There are no restrictions on the reference structure within a garbage-collected allocation //! (references may point in any way you like). //! - It's trivial to make a custom type Trace using the provided derive macros. //! - You can even store `?Sized` data in a garbage-collected pointer! //! //! # Module structure //! //! `dumpster` contains 3 core modules: the root (this module), as well as [`sync`] and [`unsync`]. //! `sync` contains an implementation of thread-safe garbage-collected pointers, while `unsync` //! contains an implementation of thread-local garbage-collected pointers which cannot be shared //! across threads. //! Thread-safety requires some synchronization overhead, so for a single-threaded application, //! it is recommended to use `unsync`. //! //! The project root contains common definitions across both `sync` and `unsync`. //! Types which implement [`Trace`] can immediately be used in `unsync`, but in order to use //! `sync`'s garbage collector, the types must also implement [`Sync`]. //! //! # Examples //! //! If your code is meant to run as a single thread, or if your data doesn't need to be shared //! across threads, you should use [`unsync::Gc`] to store your allocations. //! //! ``` //! use dumpster::unsync::Gc; //! use std::cell::Cell; //! //! let my_gc = Gc::new(Cell::new(0451)); //! //! let other_gc = my_gc.clone(); // shallow copy //! other_gc.set(512); //! //! assert_eq!(my_gc.get(), 512); //! ``` //! //! For data which is shared across threads, you can use [`sync::Gc`] with the exact same API. //! //! ``` //! use dumpster::sync::Gc; //! use std::sync::Mutex; //! //! let my_shared_gc = Gc::new(Mutex::new(25)); //! let other_shared_gc = my_shared_gc.clone(); //! //! std::thread::scope(|s| { //! s.spawn(move || { //! *other_shared_gc.lock().unwrap() = 35; //! }); //! }); //! //! println!("{}", *my_shared_gc.lock().unwrap()); //! ``` //! //! It's trivial to use custom data structures with the provided derive macro. //! //! ``` //! use dumpster::{unsync::Gc, Trace}; //! use std::cell::RefCell; //! //! #[derive(Trace)] //! struct Foo { //! refs: RefCell>>, //! } //! //! let foo = Gc::new(Foo { //! refs: RefCell::new(Vec::new()), //! }); //! //! foo.refs.borrow_mut().push(foo.clone()); //! //! drop(foo); //! //! // even though foo had a self reference, it still got collected! //! ``` //! //! # Installation //! //! To use `dumpster`, add the following lines to your `Cargo.toml`. //! //! ```toml //! [dependencies] //! dumpster = "2.1.0" //! ``` //! //! # Optional features //! //! ## `derive` //! //! `derive` is enabled by default. //! It enables the derive macro for `Trace`, which makes it easy for users to implement their //! own Trace types. //! //! ``` //! use dumpster::{unsync::Gc, Trace}; //! use std::cell::RefCell; //! //! #[derive(Trace)] // no manual implementation required //! struct Foo(RefCell>>); //! //! let my_foo = Gc::new(Foo(RefCell::new(None))); //! *my_foo.0.borrow_mut() = Some(my_foo.clone()); //! //! drop(my_foo); // my_foo will be automatically cleaned up //! ``` //! //! ## `either` //! //! `either` is disabled by default. It adds support for the [`either`](https://crates.io/crates/either) crate, //! specifically by implementing [`Trace`] for [`either::Either`](https://docs.rs/either/1.13.0/either/enum.Either.html). //! //! ## `coerce-unsized` //! //! `coerce-unsized` is disabled by default. //! This enables the implementation of [`std::ops::CoerceUnsized`] for each garbage collector, //! making it possible to use `Gc` with `!Sized` types conveniently. #![cfg_attr( feature = "coerce-unsized", doc = r#" ``` // this only works with "coerce-unsized" enabled while compiling on nightly Rust use dumpster::unsync::Gc; let gc1: Gc<[u8]> = Gc::new([1, 2, 3]); ``` "# )] //! To use `coerce-unsized`, edit your installation to `Cargo.toml` to include the feature. //! //! ```toml //! [dependencies] //! dumpster = { version = "2.1.0", features = ["coerce-unsized"]} //! ``` //! //! ## Loom support //! //! `dumpster` has experimental support for permutation testing under [`loom`](https://github.com/tokio-rs/loom). //! It is expected to be unstable and buggy. //! To compile `dumpster` using `loom`, add `--cfg loom` to `RUSTFLAGS` when compiling, for example: //! //! ```sh //! RUSTFLAGS='--cfg loom' cargo test //! ``` //! //! # License //! //! `dumpster` is licensed under the Mozilla Public License, version 2.0. //! For more details, refer to //! [LICENSE.md](https://github.com/claytonwramsey/dumpster/blob/master/LICENSE.md). //! //! This project includes portions of code derived from the Rust standard library, //! which is dual-licensed under the MIT and Apache 2.0 licenses. //! Copyright (c) The Rust Project Developers. #![warn(clippy::pedantic)] #![warn(clippy::cargo)] #![warn(missing_docs)] #![warn(clippy::missing_docs_in_private_items)] #![warn(clippy::allow_attributes, reason = "prefer expect over allow")] #![allow(clippy::multiple_crate_versions, clippy::result_unit_err)] #![cfg_attr(feature = "coerce-unsized", feature(coerce_unsized))] #![cfg_attr(feature = "coerce-unsized", feature(unsize))] mod impls; mod ptr; pub mod sync; pub mod unsync; /// Contains the sealed trait for [`Trace`]. mod trace { use crate::{sync::TraceSync, unsync::TraceUnsync, ContainsGcs, TraceWith}; /// The sealed trait for [`Trace`](crate::Trace), /// hiding away the implementation details and making it /// impossible to manually implement `Trace`. #[expect(clippy::missing_safety_doc)] #[expect(private_bounds)] pub unsafe trait TraceWithV: TraceWith + TraceSync + TraceUnsync {} unsafe impl TraceWithV for T where T: ?Sized + TraceWith + TraceSync + TraceUnsync {} } /// The trait that any garbage-collected data must implement. /// /// This trait should usually be implemented by using `#[derive(Trace)]`, using the provided /// macro. /// Only data structures using raw pointers or other magic should manually implement `Trace`. /// /// To manually implement `Trace` you need to implement [`TraceWith`]. /// Any type that implements `TraceWith` for all V: [Visitor] /// automatically implements `Trace`. /// /// # Examples /// /// Implementing `Trace` for a scalar type which contains no garbage-collected references /// is very easy. /// Accepting a visitor is simply a no-op. /// /// ``` /// use dumpster::{TraceWith, Visitor}; /// /// struct Foo(u8); /// /// unsafe impl TraceWith for Foo { /// fn accept(&self, visitor: &mut V) -> Result<(), ()> { /// Ok(()) /// } /// } /// ``` /// /// However, if a data structure contains a garbage collected pointer, it must delegate to its /// fields in `accept`. /// /// ``` /// use dumpster::{unsync::Gc, TraceWith, Visitor}; /// /// struct Bar(Gc); /// /// unsafe impl TraceWith for Bar { /// fn accept(&self, visitor: &mut V) -> Result<(), ()> { /// self.0.accept(visitor) /// } /// } /// ``` /// /// A data structure with two or more fields which could own a garbage-collected pointer should /// delegate to both fields in a consistent order: /// /// ``` /// use dumpster::{unsync::Gc, TraceWith, Visitor}; /// /// struct Baz { /// a: Gc, /// b: Gc, /// } /// /// unsafe impl TraceWith for Baz { /// fn accept(&self, visitor: &mut V) -> Result<(), ()> { /// self.a.accept(visitor)?; /// self.b.accept(visitor)?; /// Ok(()) /// } /// } /// ``` /// /// `Trace` is dyn-compatible, so you can use it as a subtrait /// to allocate your own trait object. /// /// ``` /// use dumpster::{ /// unsync::{coerce_gc, Gc}, /// Trace, /// }; /// /// trait MyTrait: Trace {} /// impl MyTrait for T {} /// /// let gc: Gc = Gc::new(5); /// let gc: Gc = coerce_gc!(gc); /// ``` pub trait Trace: trace::TraceWithV {} impl Trace for T where T: trace::TraceWithV + ?Sized {} /// The underlying tracing implementation powering the [`Trace`] trait. /// /// # Safety /// /// If the implementation of this trait is incorrect, this will result in undefined behavior, /// typically double-frees or use-after-frees. /// This includes [`TraceWith::accept`], even though it is a safe function, since its correctness /// is required for safety. /// /// The garbage collector in `dumpster` requires strong assumptions about the values inside of a /// `Gc`; by implementing `TraceWith`, you are responsible for these assumptions. /// Specifically, in order to be `TraceWith`, a value must have a _tree-like_ ownership structure. /// If some type `T` implements `TraceWith`, it means that no references to a value inside `T` will /// remain valid while `T` is moved. For instance, this means that `Rc` can never be `Trace`, as /// moving one `Rc` will not invalidate other `Rc`s pointing to the same allocation. /// We allow exceptions for fields of `T` that are not visited by the implementation of /// [`TraceWith::accept`], such as borrows (see the implementation of `TraceWith` for `&T`) and /// naturally for [`unsync::Gc`] and [`sync::Gc`]. /// /// Any structure whose implementation of `TraceWith` comes from `#[derive(Trace)]` satisfies the /// tree-like requirement. pub unsafe trait TraceWith { /// Accept a visitor to this garbage-collected value. /// /// Implementors of this function need only delegate to all fields owned by this value which /// may contain a garbage-collected reference (either a [`sync::Gc`] or a [`unsync::Gc`]). /// This delegation must be done in a consistent order. /// /// For structures which have more than one field, they should return immediately after the /// first `Err` is returned from one of its fields. /// To do so efficiently, we recommend using the try operator (`?`) on each field and then /// returning `Ok(())` after delegating to each field. /// /// # Errors /// /// Errors are returned from this function whenever a field of this object returns an error /// after delegating acceptance to it, or if this value's data is inaccessible (such as /// attempting to borrow from a [`RefCell`](std::cell::RefCell) which has already been /// mutably borrowed). fn accept(&self, visitor: &mut V) -> Result<(), ()>; } /// A visitor for a garbage collected value. /// /// This visitor allows us to hide details of the implementation of the garbage-collection procedure /// from implementors of [`Trace`]. /// /// When accepted by a `Trace`, this visitor will be delegated down until it reaches a /// garbage-collected pointer. /// Then, the garbage-collected pointer will call one of `visit_sync` or `visit_unsync`, depending /// on which type of pointer it is. /// /// In general, it's not expected for consumers of this library to write their own visitors. pub trait Visitor { /// Visit a synchronized garbage-collected pointer. /// /// This function is called for every [`sync::Gc`] owned by the value that accepted this /// visitor. fn visit_sync(&mut self, gc: &sync::Gc) where T: Trace + Send + Sync + ?Sized; /// Visit a thread-local garbage-collected pointer. /// /// This function is called for every [`unsync::Gc`] owned by the value that accepted this /// visitor. fn visit_unsync(&mut self, gc: &unsync::Gc) where T: Trace + ?Sized; } // Re-export #[derive(Trace)]. // // The reason re-exporting is not enabled by default is that disabling it would // be annoying for crates that provide handwritten impls or data formats. They // would need to disable default features and then explicitly re-enable std. #[cfg(feature = "derive")] extern crate dumpster_derive; #[cfg(feature = "derive")] /// The derive macro for implementing `Trace`. /// /// This enables users of `dumpster` to easily store custom types inside a `Gc`. /// To do so, simply annotate your type with `#[derive(Trace)]`. /// /// # Examples /// /// ``` /// use dumpster::Trace; /// /// #[derive(Trace)] /// struct Foo { /// bar: Option>, /// } /// ``` /// /// You can specify the crate path for the `dumpster` crate using the `dumpster` attribute: /// /// ``` /// use dumpster as dumpster_renamed; /// use dumpster_renamed::Trace; /// /// #[derive(Trace)] /// #[dumpster(crate = dumpster_renamed)] /// struct Foo { /// bar: Option>, /// } /// ``` pub use dumpster_derive::Trace; /// Determine whether some value contains a garbage-collected pointer. /// /// This function will return one of three values: /// - `Ok(true)`: The data structure contains a garbage-collected pointer. /// - `Ok(false)`: The data structure contains no garbage-collected pointers. /// - `Err(())`: The data structure was accessed while we checked it for garbage-collected pointers. fn contains_gcs(x: &T) -> Result { let mut visit = ContainsGcs(false); x.accept(&mut visit)?; Ok(visit.0) } /// A visitor structure used for determining whether some garbage-collected pointer contains a /// `Gc` in its pointed-to value. struct ContainsGcs(bool); impl Visitor for ContainsGcs { fn visit_sync(&mut self, _: &sync::Gc) where T: Trace + Send + Sync + ?Sized, { self.0 = true; } fn visit_unsync(&mut self, _: &unsync::Gc) where T: Trace + ?Sized, { self.0 = true; } } /// Panics with a message that explains that the gc object has already been collected. #[cold] #[inline(never)] fn panic_deref_of_collected_object() -> ! { panic!( "Attempt to dereference Gc to already-collected object. \ This means a Gc escaped from a Drop implementation, likely implying a bug in your code.", ); } ================================================ FILE: dumpster/src/ptr.rs ================================================ /* dumpster, a cycle-tracking garbage collector for Rust. Copyright (C) 2023 Clayton Ramsey. This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! Custom pointer types used by this garbage collector. use std::{ fmt, mem::{size_of, MaybeUninit}, ptr::{addr_of, addr_of_mut, copy_nonoverlapping, null, NonNull}, }; #[repr(C)] #[derive(Clone, Copy)] /// A pointer for an allocation, extracted out as raw data. /// This contains both the pointer and all the pointer's metadata, but hidden behind an unknown /// interpretation. /// We trust that all pointers (even to `?Sized` or `dyn` types) are 2 words or fewer in size. /// This is a hack! Like, a big hack! pub(crate) struct Erased([*const u8; 2]); unsafe impl Send for Erased {} unsafe impl Sync for Erased {} impl Erased { /// Construct a new erased pointer to some data from a reference /// /// # Panics /// /// This function will panic if the size of a reference is larger than the size of an /// `ErasedPtr`. /// To my knowledge, there are no pointer types with this property. pub fn new(reference: NonNull) -> Erased { let mut ptr = Erased([null(); 2]); let ptr_size = size_of::>(); // Extract out the pointer as raw memory assert!( ptr_size <= size_of::(), "pointers to T are too big for storage" ); unsafe { // SAFETY: We know that `cleanup` has at least as much space as `ptr_size`, and that // `box_ref` has size equal to `ptr_size`. copy_nonoverlapping( addr_of!(reference).cast::(), addr_of_mut!(ptr.0).cast::(), ptr_size, ); } ptr } /// Specify this pointer into a pointer of a particular type. /// /// # Safety /// /// This function must only be specified to the type that the pointer was constructed with /// via [`Erased::new`]. pub unsafe fn specify(self) -> NonNull { let mut box_ref: MaybeUninit> = MaybeUninit::zeroed(); // For some reason, switching the ordering of casts causes this to create wacky undefined // behavior. Why? I don't know. I have better things to do than pontificate on this on a // Sunday afternoon. copy_nonoverlapping( addr_of!(self.0).cast::(), addr_of_mut!(box_ref).cast::(), size_of::>(), ); box_ref.assume_init() } } impl fmt::Debug for Erased { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "ErasedPtr({:x?})", self.0) } } /// A nullable pointer to an `?Sized` type. /// /// We need this because it's actually impossible to create a null `*mut T` if `T` is `?Sized`. pub(crate) struct Nullable(*mut T); impl Nullable { /// Create a new nullable pointer from a non-null pointer. pub fn new(ptr: NonNull) -> Nullable { Nullable(ptr.as_ptr()) } /// Convert this pointer to a null pointer. pub fn as_null(self) -> Nullable { Nullable(self.0.with_addr(0)) } /// Determine whether this pointer is null. pub fn is_null(self) -> bool { self.as_option().is_none() } /// Convert this pointer to an `Option>`. pub fn as_option(self) -> Option> { NonNull::new(self.0) } /// Convert this pointer to a `*mut T`. pub fn as_ptr(self) -> *mut T { self.0 } /// Create a new nullable pointer from a pointer. pub fn from_ptr(ptr: *mut T) -> Self { Self(ptr) } /// Convert this pointer to a `NonNull`, panicking if this pointer is null with message /// `msg`. pub fn expect(self, msg: &str) -> NonNull { self.as_option().expect(msg) } /// Convert this pointer to a `NonNull`, panicking if this pointer is null. pub fn unwrap(self) -> NonNull { self.as_option().unwrap() } /// Convert this pointer to a `NonNull`. /// /// # Safety /// /// The pointer must not be null. pub unsafe fn unwrap_unchecked(self) -> NonNull { self.as_option().unwrap_unchecked() } } impl Clone for Nullable { fn clone(&self) -> Self { *self } } impl Copy for Nullable {} #[cfg(feature = "coerce-unsized")] impl std::ops::CoerceUnsized> for Nullable where T: std::marker::Unsize + ?Sized, U: ?Sized, { } impl fmt::Debug for Nullable { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "Nullable({:x?})", self.0) } } #[cfg(test)] mod tests { use core::any::Any; use std::alloc::{dealloc, Layout}; use super::*; #[test] fn erased_alloc() { let orig_ptr: &mut u8 = Box::leak(Box::new(7)); let erased_ptr = Erased::new(NonNull::from(orig_ptr)); unsafe { let remade_ptr = erased_ptr.specify::(); assert_eq!(*remade_ptr.as_ref(), 7); dealloc(remade_ptr.as_ptr(), Layout::for_value(remade_ptr.as_ref())); } } #[test] fn erased_alloc_slice() { let orig_ptr: &mut [u8] = Box::leak(Box::new([7, 8, 9])); let erased_ptr = Erased::new(NonNull::from(orig_ptr)); unsafe { let remade_ptr = erased_ptr.specify::<[u8]>(); assert_eq!(remade_ptr.as_ref(), [7, 8, 9]); dealloc( remade_ptr.as_ptr().cast(), Layout::for_value(remade_ptr.as_ref()), ); } } #[test] fn erased_alloc_dyn() { let orig_ptr: &mut dyn Any = Box::leak(Box::new(7u8)); let erased_ptr = Erased::new(NonNull::from(orig_ptr)); unsafe { let remade_ptr = erased_ptr.specify::(); assert_eq!(*remade_ptr.as_ref().downcast_ref::().unwrap(), 7); dealloc( remade_ptr.as_ptr().cast(), Layout::for_value(remade_ptr.as_ref()), ); } } } ================================================ FILE: dumpster/src/sync/cell.rs ================================================ /* dumpster, a cycle-tracking garbage collector for Rust. Copyright (C) 2023 Clayton Ramsey. This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! A shim for using either Loom or the standard library in garbage-collected environments. #[cfg(loom)] use loom::cell::UnsafeCell; #[cfg(not(loom))] use std::cell::UnsafeCell; #[derive(Debug)] /// An unsafe cell that is agnostic over using `std` or `loom` as its backing implementation. /// It is intended to only be used with [`Copy`] data. pub struct UCell(UnsafeCell); impl UCell { /// Construct a `UCell` containing the value. pub fn new(x: T) -> Self { Self(UnsafeCell::new(x)) } /// Get the value inside the `UCell`. /// /// # Safety /// /// This function can only be called when no other code is calling [`UCell::set`]. pub unsafe fn get(&self) -> T where T: Copy, { #[cfg(loom)] { *self.0.get().deref() } #[cfg(not(loom))] { *self.0.get() } } /// Overwrite the value inside this cell. /// /// # Safety /// /// This function can only be called when no other code is calling [`UCell::set`] or /// [`UCell::get`]. pub unsafe fn set(&self, x: T) { #[cfg(loom)] { *self.0.get_mut().deref() = x; } #[cfg(not(loom))] { *self.0.get() = x; } } } #[cfg(not(loom))] #[cfg(feature = "coerce-unsized")] impl std::ops::CoerceUnsized>> for UCell> where T: std::marker::Unsize + ?Sized, U: ?Sized, { } ================================================ FILE: dumpster/src/sync/collect.rs ================================================ /* dumpster, a cycle-tracking garbage collector for Rust. Copyright (C) 2023 Clayton Ramsey. This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! A synchronized collection algorithm. use std::{ alloc::{dealloc, Layout}, cell::{Cell, LazyCell, RefCell}, collections::hash_map::Entry, hash::Hash, mem::{replace, swap, take, transmute}, ptr::{drop_in_place, NonNull}, }; #[cfg(not(loom))] use std::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; use foldhash::{HashMap, HashMapExt}; #[cfg(loom)] use loom::{ lazy_static, sync::atomic::{AtomicPtr, AtomicUsize, Ordering}, thread_local, }; #[cfg(not(loom))] use parking_lot::{Mutex, RwLock}; #[cfg(loom)] use crate::sync::loom_ext::{Mutex, RwLock}; use crate::{ptr::Erased, Trace, Visitor}; use super::{default_collect_condition, CollectCondition, CollectInfo, Gc, GcBox, CURRENT_TAG}; /// The garbage truck, which is a global data structure containing information about allocations /// which might need to be collected. struct GarbageTruck { /// The contents of the garbage truck, containing all the allocations which need to be /// collected and have already been delivered by a [`Dumpster`]. contents: Mutex>>, /// A lock used for synchronizing threads that are awaiting completion of a collection process. /// This lock should be acquired for reads by threads running a collection and for writes by /// threads awaiting collection completion. collecting_lock: RwLock<()>, /// The number of [`Gc`]s dropped since the last time [`GarbageTruck::collect_all()`] was /// called. n_gcs_dropped: AtomicUsize, /// The number of [`Gc`]s currently existing (which have not had their internals replaced with /// `None`). n_gcs_existing: AtomicUsize, /// The function which determines whether a collection should be triggered. /// This pointer value should always be cast to a [`CollectCondition`], but since `AtomicPtr` /// doesn't handle function pointers correctly, we just cast to `*mut ()`. collect_condition: AtomicPtr<()>, } /// A structure containing the global information for the garbage collector. pub(super) struct Dumpster { /// A lookup table for the allocations which may need to be cleaned up later. pub contents: RefCell>, /// The number of times an allocation on this thread has been dropped. n_drops: Cell, } #[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)] /// A unique identifier for an allocation. pub(super) struct AllocationId(NonNull>); #[derive(Debug)] /// The information which describes an allocation that may need to be cleaned up later. pub(super) struct TrashCan { /// A pointer to the allocation to be cleaned up. ptr: Erased, /// The function which can be used to build a reference graph. /// This function is safe to call on `ptr`. dfs_fn: unsafe fn(Erased, &mut HashMap), } #[derive(Debug)] /// A node in the reference graph, which is constructed while searching for unreachable allocations. struct AllocationInfo { /// An erased pointer to the allocation. ptr: Erased, /// Function for dropping the allocation when its weak and strong count hits zero. /// Should have the same behavior as dropping a Gc normally to a reference count of zero. weak_drop_fn: unsafe fn(Erased), /// Information about this allocation's reachability. reachability: Reachability, } #[derive(Debug)] /// The state of whether an allocation is reachable or of unknown reachability. enum Reachability { /// The information describing an allocation whose accessibility is unknown. Unknown { /// The IDs for the allocations directly accessible from this allocation. children: Vec, /// The number of references in the reference count for this allocation which are /// "unaccounted," which have not been found while constructing the graph. /// It is the difference between the allocations indegree in the "true" reference graph vs /// the one we are currently building. n_unaccounted: usize, /// A function used to destroy the allocation. destroy_fn: unsafe fn(Erased, &HashMap), }, /// The allocation here is reachable. /// No further information is needed. Reachable, } #[cfg(not(loom))] /// The global garbage truck. /// All [`TrashCan`]s should eventually end up in here. static GARBAGE_TRUCK: GarbageTruck = GarbageTruck::new(); #[cfg(loom)] lazy_static! { static ref GARBAGE_TRUCK: GarbageTruck = GarbageTruck::new(); } thread_local! { /// The dumpster for this thread. /// Allocations which are "dirty" will be transferred to this dumpster before being moved into /// the garbage truck for final collection. pub(super) static DUMPSTER: Dumpster = Dumpster { contents: RefCell::new(HashMap::new()), n_drops: Cell::new(0), }; } #[cfg(not(loom))] thread_local! { /// Whether the currently-running thread is doing a cleanup. /// This cannot be stored in `DUMPSTER` because otherwise it would cause weird use-after-drop /// behavior. static CLEANING: Cell = const { Cell::new(false) }; } #[cfg(loom)] thread_local! { /// Whether the currently-running thread is doing a cleanup. /// This cannot be stored in `DUMPSTER` because otherwise it would cause weird use-after-drop /// behavior. static CLEANING: Cell = Cell::new(false); } /// Collect all allocations in the garbage truck (but not necessarily the dumpster), then await /// completion of the collection. /// Ensures that all allocations dropped on the calling thread are cleaned up pub fn collect_all_await() { _ = DUMPSTER.try_with(|d| d.deliver_to(&GARBAGE_TRUCK)); GARBAGE_TRUCK.collect_all(); drop(GARBAGE_TRUCK.collecting_lock.read()); } /// Notify that a `Gc` was destroyed, and update the tracking count for the number of dropped and /// existing `Gc`s. /// /// This may trigger a linear-time cleanup of all allocations, but this will be guaranteed to /// occur with less-than-linear frequency, so it's always O(1). pub fn notify_dropped_gc() { GARBAGE_TRUCK.n_gcs_existing.fetch_sub(1, Ordering::Relaxed); GARBAGE_TRUCK.n_gcs_dropped.fetch_add(1, Ordering::Relaxed); // Do not do deliver or collect if we are currently cleaning or this thread is dying. // This prevents deadlocks. if !CLEANING.try_with(Cell::get).is_ok_and(|x| !x) { return; } _ = DUMPSTER.try_with(|dumpster| { dumpster.n_drops.set(dumpster.n_drops.get() + 1); if dumpster.is_full() { dumpster.deliver_to(&GARBAGE_TRUCK); } }); let collect_cond = unsafe { // SAFETY: we only ever store collection conditions in the collect-condition box transmute::<*mut (), CollectCondition>( GARBAGE_TRUCK.collect_condition.load(Ordering::Relaxed), ) }; if collect_cond(&CollectInfo { _private: () }) { GARBAGE_TRUCK.collect_all(); } } /// Notify that a [`Gc`] was created, and increment the number of total existing `Gc`s. pub fn notify_created_gc() { GARBAGE_TRUCK.n_gcs_existing.fetch_add(1, Ordering::Relaxed); } /// Mark an allocation as "dirty," implying that it may or may not be inaccessible and need to /// be cleaned up. /// /// # Safety /// /// When calling this method, you have to ensure that `allocation` /// is [convertible to a reference](core::ptr#pointer-to-reference-conversion). pub(super) unsafe fn mark_dirty(allocation: NonNull>) where T: Trace + Send + Sync + ?Sized, { _ = DUMPSTER.try_with(|dumpster| { if dumpster .contents .borrow_mut() .insert( AllocationId::from(allocation), TrashCan { ptr: Erased::new(allocation), dfs_fn: dfs::, }, ) .is_none() { // SAFETY: the caller must guarantee that `allocation` meets all the // requirements for a reference. unsafe { allocation.as_ref() } .weak .fetch_add(1, Ordering::Acquire); } }); } /// Mark an allocation as "clean," implying that it has already been cleaned up and does not /// need to be cleaned again. pub(super) fn mark_clean(allocation: &GcBox) where T: Trace + Send + Sync + ?Sized, { _ = DUMPSTER.try_with(|dumpster| { if dumpster .contents .borrow_mut() .remove(&AllocationId::from(allocation)) .is_some() { allocation.weak.fetch_sub(1, Ordering::Release); } }); } #[cfg(test)] /// Deliver all [`TrashCan`]s from this thread's dumpster into the garbage truck. /// /// This function is available to to support testing, but currently is not part of the public API. pub(super) fn deliver_dumpster() { _ = DUMPSTER.try_with(|d| d.deliver_to(&GARBAGE_TRUCK)); } /// Set the function which determines whether the garbage collector should be run. /// /// `f` will be periodically called by the garbage collector to determine whether it should perform /// a full traversal of the heap. /// When `f` returns true, a traversal will begin. /// /// # Examples /// /// ``` /// use dumpster::sync::{set_collect_condition, CollectInfo}; /// /// /// This function will make sure a GC traversal never happens unless directly activated. /// fn never_collect(_: &CollectInfo) -> bool { /// false /// } /// /// set_collect_condition(never_collect); /// ``` pub fn set_collect_condition(f: CollectCondition) { GARBAGE_TRUCK .collect_condition .store(f as *mut (), Ordering::Relaxed); } /// Get the number of `[Gc]`s dropped since the last collection. pub fn n_gcs_dropped() -> usize { GARBAGE_TRUCK.n_gcs_dropped.load(Ordering::Relaxed) } /// Get the number of `[Gc]`s currently existing in the entire program. pub fn n_gcs_existing() -> usize { GARBAGE_TRUCK.n_gcs_existing.load(Ordering::Relaxed) } impl Dumpster { /// Deliver all [`TrashCan`]s contained by this dumpster to the garbage collect, removing them /// from the local dumpster storage and adding them to the global truck. fn deliver_to(&self, garbage_truck: &GarbageTruck) { let mut guard = garbage_truck.contents.lock(); self.n_drops.set(0); self.deliver_to_contents(&mut guard); } /// Deliver the entries in this dumpster to `contents`. fn deliver_to_contents(&self, contents: &mut HashMap) { for (id, can) in self.contents.borrow_mut().drain() { if contents.insert(id, can).is_some() { unsafe { // SAFETY: an allocation can only be in the dumpster if it still exists and its // header is valid id.0.as_ref() } .weak .fetch_sub(1, Ordering::Release); } } } /// Determine whether this dumpster is full (and therefore should have its contents delivered to /// the garbage truck). fn is_full(&self) -> bool { self.contents.borrow().len() > 100_000 || self.n_drops.get() > 100_000 } } impl GarbageTruck { /// Construct a new, empty garbage truck. /// /// Since the `GarbageTruck` is meant to be a single global value, this function should only be /// called once in the initialization of `GARBAGE_TRUCK`. #[cfg(not(loom))] const fn new() -> Self { Self { contents: Mutex::new(LazyCell::new(HashMap::new)), collecting_lock: RwLock::new(()), n_gcs_dropped: AtomicUsize::new(0), n_gcs_existing: AtomicUsize::new(0), collect_condition: AtomicPtr::new(default_collect_condition as *mut ()), } } /// Construct a new, empty garbage truck. /// /// Since the `GarbageTruck` is meant to be a single global value, this function should only be /// called once in the initialization of `GARBAGE_TRUCK`. #[cfg(loom)] fn new() -> Self { Self { contents: Mutex::new(LazyCell::new(HashMap::new)), collecting_lock: RwLock::new(()), n_gcs_dropped: AtomicUsize::new(0), n_gcs_existing: AtomicUsize::new(0), collect_condition: AtomicPtr::new(default_collect_condition as *mut ()), } } /// Search through the set of existing allocations which have been marked inaccessible, and see /// if they are inaccessible. /// If so, drop those allocations. fn collect_all(&self) { let collecting_guard = self.collecting_lock.write(); self.n_gcs_dropped.store(0, Ordering::Relaxed); let to_collect = take(&mut **self.contents.lock()); let mut ref_graph = HashMap::with_capacity(to_collect.len()); CURRENT_TAG.fetch_add(1, Ordering::Release); for (_, TrashCan { ptr, dfs_fn }) in to_collect { unsafe { // SAFETY: `ptr` may only be in `to_collect` if it was a valid pointer // and `dfs_fn` must have been created with the intent of referring to // the erased type of `ptr`. dfs_fn(ptr, &mut ref_graph); } } let root_ids = ref_graph .iter() .filter_map(|(&k, v)| match v.reachability { Reachability::Reachable => Some(k), Reachability::Unknown { n_unaccounted, .. } => (n_unaccounted > 0 || unsafe { // SAFETY: we found `k` in the reference graph, // so it must still be an extant allocation k.0.as_ref().weak.load(Ordering::Acquire) > 1 }) .then_some(k), }) .collect::>(); for root_id in root_ids { mark(root_id, &mut ref_graph); } CLEANING.with(|c| c.set(true)); // set of allocations which must be destroyed because we were the last weak pointer to it let mut weak_destroys = Vec::new(); for (id, node) in &ref_graph { let header_ref = unsafe { id.0.as_ref() }; match node.reachability { Reachability::Unknown { destroy_fn, .. } => unsafe { // SAFETY: `destroy_fn` must have been created with `node.ptr` in mind, // and we have proven that no other references to `node.ptr` exist destroy_fn(node.ptr, &ref_graph); }, Reachability::Reachable => { if header_ref.weak.fetch_sub(1, Ordering::Release) == 1 && header_ref.strong.load(Ordering::Acquire) == 0 { // we are the last reference to the allocation. // mark to be cleaned up later // no real synchronization loss to storing the guard because we had the last // reference anyway weak_destroys.push((node.weak_drop_fn, node.ptr)); } } } } CLEANING.with(|c| c.set(false)); for (drop_fn, ptr) in weak_destroys { unsafe { // SAFETY: we have proven (via header_ref.weak = 1) that the cleaning // process had the last reference to the allocation. // `drop_fn` must have been created with the true value of `ptr` in mind. drop_fn(ptr); }; } drop(collecting_guard); } } /// Build out a part of the reference graph, making note of all allocations which are reachable from /// the one described in `ptr`. /// /// # Inputs /// /// - `ptr`: A pointer to the allocation that we should start constructing from. /// - `ref_graph`: A lookup from allocation IDs to node information about that allocation. /// /// # Effects /// /// `ref_graph` will be expanded to include all allocations reachable from `ptr`. /// /// # Safety /// /// `ptr` must have been created as a pointer to a `GcBox`. unsafe fn dfs( ptr: Erased, ref_graph: &mut HashMap, ) { let box_ref = unsafe { // SAFETY: We require `ptr` to be a an erased pointer to `GcBox`. ptr.specify::>().as_ref() }; let starting_id = AllocationId::from(box_ref); let Entry::Vacant(v) = ref_graph.entry(starting_id) else { // the weak count was incremented by another DFS operation elsewhere. // Decrement it to have only one from us. box_ref.weak.fetch_sub(1, Ordering::Release); return; }; let strong_count = box_ref.strong.load(Ordering::Acquire); v.insert(AllocationInfo { ptr, weak_drop_fn: drop_weak_zero::, reachability: Reachability::Unknown { children: Vec::new(), n_unaccounted: strong_count, destroy_fn: destroy_erased::, }, }); if box_ref .value .accept(&mut Dfs { ref_graph, current_id: starting_id, }) .is_err() || box_ref.generation.load(Ordering::Acquire) >= CURRENT_TAG.load(Ordering::Relaxed) { // box_ref.value was accessed while we worked // mark this allocation as reachable mark(starting_id, ref_graph); } } #[derive(Debug)] /// The visitor structure used for building the found-reference-graph of allocations. pub(super) struct Dfs<'a> { /// The reference graph. /// Each allocation is assigned a node. ref_graph: &'a mut HashMap, /// The allocation ID currently being visited. /// Used for knowing which node is the parent of another. current_id: AllocationId, } impl Visitor for Dfs<'_> { fn visit_sync(&mut self, gc: &Gc) where T: Trace + Send + Sync + ?Sized, { if Gc::is_dead(gc) { return; } // must not use deref operators since we don't want to update the generation let ptr = unsafe { // SAFETY: This is the same as the deref implementation, but avoids // incrementing the generation count. gc.ptr.get().unwrap() }; let box_ref = unsafe { // SAFETY: same as above. ptr.as_ref() }; let current_tag = CURRENT_TAG.load(Ordering::Relaxed); if gc.tag.swap(current_tag, Ordering::Relaxed) >= current_tag || box_ref.generation.load(Ordering::Acquire) >= current_tag { // This pointer was already tagged by this sweep, so it must have been moved by mark(self.current_id, self.ref_graph); return; } let mut new_id = AllocationId::from(box_ref); let Reachability::Unknown { ref mut children, .. } = self .ref_graph .get_mut(&self.current_id) .unwrap() .reachability else { // this node has been proven reachable by something higher up. No need to keep building // its ref graph return; }; children.push(new_id); match self.ref_graph.entry(new_id) { Entry::Occupied(mut o) => match o.get_mut().reachability { Reachability::Unknown { ref mut n_unaccounted, .. } => { *n_unaccounted -= 1; } Reachability::Reachable => (), }, Entry::Vacant(v) => { // This allocation has never been visited by the reference graph builder let strong_count = box_ref.strong.load(Ordering::Acquire); box_ref.weak.fetch_add(1, Ordering::Acquire); v.insert(AllocationInfo { ptr: Erased::new(ptr), weak_drop_fn: drop_weak_zero::, reachability: Reachability::Unknown { children: Vec::new(), n_unaccounted: strong_count - 1, destroy_fn: destroy_erased::, }, }); // Save the previously visited ID, then carry on to the next one swap(&mut new_id, &mut self.current_id); if box_ref.value.accept(self).is_err() || box_ref.generation.load(Ordering::Acquire) >= current_tag { // On failure, this means `**gc` is accessible, and should be marked // as such mark(self.current_id, self.ref_graph); } // Restore current_id and carry on swap(&mut new_id, &mut self.current_id); } } } fn visit_unsync(&mut self, _: &crate::unsync::Gc) where T: Trace + ?Sized, { unreachable!("sync Gc cannot own an unsync Gc"); } } /// Traverse the reference graph, marking `root` and any allocations reachable from `root` as /// reachable. fn mark(root: AllocationId, graph: &mut HashMap) { let node = graph.get_mut(&root).unwrap(); if let Reachability::Unknown { children, .. } = replace(&mut node.reachability, Reachability::Reachable) { for child in children { mark(child, graph); } } } /// A visitor for decrementing the reference count of pointees. pub(super) struct PrepareForDestruction<'a> { /// The reference graph. /// Must have been populated with reachability already. graph: &'a HashMap, } impl Visitor for PrepareForDestruction<'_> { fn visit_sync(&mut self, gc: &crate::sync::Gc) where T: Trace + Send + Sync + ?Sized, { if Gc::is_dead(gc) { return; } let id = AllocationId::from(unsafe { // SAFETY: This is the same as dereferencing the GC. gc.ptr.get().unwrap() }); if matches!(self.graph[&id].reachability, Reachability::Reachable) { unsafe { // SAFETY: This is the same as dereferencing the GC. id.0.as_ref().strong.fetch_sub(1, Ordering::Release); } } unsafe { // SAFETY: we have a unique reference to `gc` as we are destroying the structure. gc.kill(); } } fn visit_unsync(&mut self, _: &crate::unsync::Gc) where T: Trace + ?Sized, { unreachable!("no unsync members of sync Gc possible!"); } } /// Destroy an allocation, obliterating its GCs, dropping it, and deallocating it. /// /// # Safety /// /// `ptr` must have been created from a pointer to a `GcBox`. unsafe fn destroy_erased( ptr: Erased, graph: &HashMap, ) { let specified = ptr.specify::>().as_mut(); specified .value .accept(&mut PrepareForDestruction { graph }) .expect("allocation assumed to be unreachable but somehow was accessed"); let layout = Layout::for_value(specified); drop_in_place(specified); dealloc(std::ptr::from_mut::>(specified).cast(), layout); } /// Function for handling dropping an allocation when its weak and strong reference count reach /// zero. /// /// # Safety /// /// `ptr` must have been created as a pointer to a `GcBox`. unsafe fn drop_weak_zero(ptr: Erased) { let mut specified = ptr.specify::>(); assert_eq!(specified.as_ref().weak.load(Ordering::Relaxed), 0); assert_eq!(specified.as_ref().strong.load(Ordering::Relaxed), 0); let layout = Layout::for_value(specified.as_ref()); drop_in_place(specified.as_mut()); dealloc(specified.as_ptr().cast(), layout); } unsafe impl Send for AllocationId {} unsafe impl Sync for AllocationId {} impl From<&GcBox> for AllocationId where T: Trace + Send + Sync + ?Sized, { fn from(value: &GcBox) -> Self { AllocationId(NonNull::from(value).cast()) } } impl From>> for AllocationId where T: Trace + Send + Sync + ?Sized, { fn from(value: NonNull>) -> Self { AllocationId(value.cast()) } } #[cfg(not(loom))] // cannot access lazy static in drop impl Drop for Dumpster { fn drop(&mut self) { self.deliver_to(&GARBAGE_TRUCK); // collect_all(); } } ================================================ FILE: dumpster/src/sync/loom_ext.rs ================================================ /* dumpster, a cycle-tracking garbage collector for Rust. Copyright (C) 2023 Clayton Ramsey. This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! Tests for running under loom. #![cfg_attr(not(test), allow(dead_code))] use std::{ mem::MaybeUninit, ops::Deref, sync::{PoisonError, TryLockError}, }; use loom::{ cell::UnsafeCell, sync::{ Mutex as MutexImpl, MutexGuard, RwLock as RwLockImpl, RwLockReadGuard, RwLockWriteGuard, }, }; use crate::{TraceWith, Visitor}; /// Simple wrapper mutex type. pub struct Mutex(MutexImpl); unsafe impl + ?Sized> TraceWith for Mutex { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.0 .try_lock() .map_err(|e| match e { TryLockError::Poisoned(_) => panic!(), TryLockError::WouldBlock => (), })? .deref() .accept(visitor) } } impl Mutex { /// Construct a new mutex. pub fn new(value: T) -> Self { Self(MutexImpl::new(value)) } /// Lock the mutex. pub fn lock(&self) -> MutexGuard<'_, T> { self.0.lock().unwrap_or_else(PoisonError::into_inner) } #[expect(dead_code)] /// Is the mutex locked? pub fn is_locked(&self) -> bool { !matches!(self.0.try_lock(), Err(TryLockError::WouldBlock)) } } /// A read-write lock pub struct RwLock(RwLockImpl); impl RwLock { /// Construct a rwlock. pub fn new(value: T) -> Self { Self(RwLockImpl::new(value)) } /// Get a read guard. pub fn read(&self) -> RwLockReadGuard<'_, T> { self.0.read().unwrap_or_else(PoisonError::into_inner) } /// Get a write guard. pub fn write(&self) -> RwLockWriteGuard<'_, T> { self.0.write().unwrap_or_else(PoisonError::into_inner) } } /// A once-object. struct Once { /// Completed? is_completed: Mutex, } impl Once { /// Construct a once. fn new() -> Self { Self { is_completed: Mutex::new(false), } } /// Call a function once. fn call_once(&self, f: impl FnOnce()) { let mut is_completed = self.is_completed.lock(); if *is_completed { return; } f(); *is_completed = true; } /// Determine if we are completed. fn is_completed(&self) -> bool { *self.is_completed.lock() } } /// A once-lock. pub struct OnceLock { /// A thing that does it once. once: Once, /// The data. value: UnsafeCell>, } unsafe impl Sync for OnceLock {} unsafe impl Send for OnceLock {} unsafe impl> TraceWith for OnceLock { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.with(|value| value.accept(visitor)).unwrap_or(Ok(())) } } impl OnceLock { /// Construct a once-lock. pub fn new() -> Self { Self { once: Once::new(), value: UnsafeCell::new(MaybeUninit::uninit()), } } /// Call a function uncheckedly. unsafe fn with_unchecked(&self, f: impl FnOnce(&T) -> R) -> R { self.value .with(|ptr| f(unsafe { (*ptr).assume_init_ref() })) } /// Apply a function. pub fn with(&self, f: impl FnOnce(&T) -> R) -> Option { if self.once.is_completed() { Some(unsafe { self.with_unchecked(f) }) } else { None } } /// Apply or initialize. pub fn with_or_init(&self, init: impl FnOnce() -> T, f: impl FnOnce(&T) -> R) -> R { self.once.call_once(|| { self.value.with_mut(|ptr| unsafe { (*ptr).write(init()); }); }); unsafe { self.with_unchecked(f) } } /// Set the value. pub fn set(&self, value: T) { self.with_or_init(|| value, |_| {}); } } #[test] fn test_once() { use loom::sync::{ atomic::{AtomicUsize, Ordering}, Arc, }; loom::model(|| { let once = Arc::new(Once::new()); let counter = Arc::new(AtomicUsize::new(0)); let mut join_handles = vec![]; for _ in 0..2 { let once = once.clone(); let counter = counter.clone(); join_handles.push(loom::thread::spawn(move || { once.call_once(|| { counter.fetch_add(1, Ordering::Relaxed); }); })); } for join_handle in join_handles { join_handle.join().unwrap(); } assert_eq!(counter.load(Ordering::Relaxed), 1); }); } #[test] fn test_once_lock() { use loom::sync::{ atomic::{AtomicUsize, Ordering}, Arc, }; loom::model(|| { let once_lock = Arc::new(OnceLock::::new()); let counter = Arc::new(AtomicUsize::new(0)); let mut join_handles = vec![]; for _ in 0..2 { let once_lock = once_lock.clone(); let counter = counter.clone(); join_handles.push(loom::thread::spawn({ move || { once_lock.with_or_init( || { counter.fetch_add(1, Ordering::Relaxed); String::from("test") }, |value| { assert_eq!(value, "test"); }, ); } })); } for join_handle in join_handles { join_handle.join().unwrap(); } assert_eq!(counter.load(Ordering::Relaxed), 1); }); } ================================================ FILE: dumpster/src/sync/loom_tests.rs ================================================ /* dumpster, a cycle-tracking garbage collector for Rust. Copyright (C) 2023 Clayton Ramsey. This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use loom::{ lazy_static, sync::atomic::{AtomicUsize, Ordering}, }; use loom_ext::{Mutex, OnceLock}; use crate::Visitor; use super::*; struct DropCount<'a>(&'a AtomicUsize); impl Drop for DropCount<'_> { fn drop(&mut self) { self.0.fetch_add(1, Ordering::Release); } } unsafe impl TraceWith for DropCount<'_> { fn accept(&self, _: &mut V) -> Result<(), ()> { Ok(()) } } struct MultiRef { refs: Mutex>>, #[expect(unused)] count: DropCount<'static>, } unsafe impl TraceWith for MultiRef { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.refs.accept(visitor) } } #[test] fn loom_single_alloc() { lazy_static! { static ref DROP_COUNT: AtomicUsize = AtomicUsize::new(0); } loom::model(|| { let gc1 = Gc::new(DropCount(&DROP_COUNT)); collect(); assert_eq!(DROP_COUNT.load(Ordering::Acquire), 0); drop(gc1); collect(); assert_eq!(DROP_COUNT.load(Ordering::Acquire), 1); }); } #[test] fn loom_self_referential() { struct Foo(Mutex>>); lazy_static! { static ref DROP_COUNT: AtomicUsize = AtomicUsize::new(0); } unsafe impl TraceWith for Foo { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.0.accept(visitor) } } impl Drop for Foo { fn drop(&mut self) { // println!("begin increment of the drop count!"); DROP_COUNT.fetch_add(1, Ordering::Release); } } loom::model(|| { let gc1 = Gc::new(Foo(Mutex::new(None))); *gc1.0.lock() = Some(Gc::clone(&gc1)); assert_eq!(DROP_COUNT.load(Ordering::Acquire), 0); drop(gc1); collect(); assert_eq!(DROP_COUNT.load(Ordering::Acquire), 1); }); } #[test] fn loom_two_cycle() { lazy_static! { static ref DROP_0: AtomicUsize = AtomicUsize::new(0); static ref DROP_1: AtomicUsize = AtomicUsize::new(0); } loom::model(|| { let gc0 = Gc::new(MultiRef { refs: Mutex::new(Vec::new()), count: DropCount(&DROP_0), }); let gc1 = Gc::new(MultiRef { refs: Mutex::new(vec![Gc::clone(&gc0)]), count: DropCount(&DROP_1), }); gc0.refs.lock().push(Gc::clone(&gc1)); collect(); assert_eq!(DROP_0.load(Ordering::Acquire), 0); assert_eq!(DROP_0.load(Ordering::Acquire), 0); drop(gc0); collect(); assert_eq!(DROP_0.load(Ordering::Acquire), 0); assert_eq!(DROP_0.load(Ordering::Acquire), 0); drop(gc1); collect(); assert_eq!(DROP_0.load(Ordering::Acquire), 1); assert_eq!(DROP_0.load(Ordering::Acquire), 1); }); } #[test] #[ignore = "not going to fix this for now"] /// Test that creating a `Gc` during a `Drop` implementation will still not leak the `Gc`. fn loom_sync_leak_by_creation_in_drop() { lazy_static! { static ref BAR_DROP_COUNT: [AtomicUsize; 2] = [AtomicUsize::new(0), AtomicUsize::new(0)]; } struct Foo(OnceLock>, usize); struct Bar(OnceLock>, usize); unsafe impl TraceWith for Foo { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.0.accept(visitor) } } unsafe impl TraceWith for Bar { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.0.accept(visitor) } } impl Drop for Foo { fn drop(&mut self) { println!("calling drop for foo"); let gcbar = Gc::new(Bar(OnceLock::new(), self.1)); gcbar.0.set(gcbar.clone()); drop(gcbar); // MUST be included for the test to succeed (in case Foo is collected on separate // thread) crate::sync::collect::deliver_dumpster(); println!("drop for foo done"); } } impl Drop for Bar { fn drop(&mut self) { println!("drop Bar"); BAR_DROP_COUNT[self.1].fetch_add(1, Ordering::Relaxed); } } loom::model(|| { println!("=========== NEW MODEL ITERATION ==============="); let mut join_handles = vec![]; for i in 0..2 { join_handles.push(loom::thread::spawn(move || { let foo = Gc::new(Foo(OnceLock::new(), i)); foo.0.set(foo.clone()); drop(foo); println!("===== collect from {i} number 1"); collect(); // causes Bar to be created and then leaked println!("===== collect from {i} number 2"); collect(); // cleans up Bar (eventually) assert_eq!( BAR_DROP_COUNT[i].load(Ordering::Relaxed), 1, "failed to collect on thread 0" ); collect::DUMPSTER.with(|d| println!("{:?}", d.contents)); assert!(collect::DUMPSTER.with(|d| d.contents.borrow().is_empty())); })); } for join_handle in join_handles { join_handle.join().unwrap(); } }); } ================================================ FILE: dumpster/src/sync/mod.rs ================================================ /* dumpster, a cycle-tracking garbage collector for Rust. Copyright (C) 2023 Clayton Ramsey. This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! Thread-safe shared garbage collection. //! //! Most users of this module will be interested in using [`Gc`] directly out of the box - this will //! just work. //! Those with more particular needs (such as benchmarking) should turn toward //! [`set_collect_condition`] in order to tune exactly when the garbage collector does cleanups. //! //! # Examples //! //! ``` //! use dumpster::sync::Gc; //! //! let my_gc = Gc::new(100); //! let other_gc = my_gc.clone(); //! //! drop(my_gc); //! drop(other_gc); //! //! // contents of the Gc are automatically freed //! ``` mod cell; mod collect; #[cfg(loom)] mod loom_ext; #[cfg(all(loom, test))] mod loom_tests; #[cfg(all(test, not(loom)))] mod tests; #[cfg(loom)] use loom::{ lazy_static, sync::atomic::{fence, AtomicUsize, Ordering}, }; use std::fmt::Display; #[cfg(not(loom))] use std::sync::atomic::{fence, AtomicUsize, Ordering}; use std::{ alloc::{dealloc, handle_alloc_error, Layout}, any::TypeId, borrow::{Borrow, Cow}, fmt::Debug, mem::{self, ManuallyDrop, MaybeUninit}, num::NonZeroUsize, ops::Deref, ptr::{self, addr_of, addr_of_mut, drop_in_place, NonNull}, slice, }; use crate::{ contains_gcs, panic_deref_of_collected_object, ptr::Nullable, sync::{ cell::UCell, collect::{Dfs, PrepareForDestruction}, }, Trace, TraceWith, Visitor, }; use self::collect::{ collect_all_await, mark_clean, mark_dirty, n_gcs_dropped, n_gcs_existing, notify_created_gc, notify_dropped_gc, }; /// A soft limit on the amount of references that may be made to a `Gc`. /// /// Going above this limit will abort your program (although not /// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references. /// /// See comment in `Gc::clone`. const MAX_STRONG_COUNT: usize = (isize::MAX) as usize; /// Allows tracing with all sync visitors. #[expect(private_bounds)] pub(crate) trait TraceSync: for<'a> TraceWith> + for<'a> TraceWith> + TraceWith { } impl TraceSync for T where T: ?Sized + for<'a> TraceWith> + for<'a> TraceWith> + TraceWith { } /// A thread-safe garbage-collected pointer. /// /// This pointer can be duplicated and then shared across threads. /// Garbage collection is performed concurrently. /// /// # Examples /// /// ``` /// use dumpster::sync::Gc; /// use std::sync::atomic::{AtomicUsize, Ordering}; /// /// let shared = Gc::new(AtomicUsize::new(0)); /// /// std::thread::scope(|s| { /// s.spawn(|| { /// let other_gc = shared.clone(); /// other_gc.store(1, Ordering::Relaxed); /// }); /// /// shared.store(2, Ordering::Relaxed); /// }); /// /// println!("{}", shared.load(Ordering::Relaxed)); /// ``` /// /// # Interaction with `Drop` /// /// While collecting cycles, it's possible for a `Gc` to exist that points to some deallocated /// object. /// To prevent undefined behavior, these `Gc`s are marked as dead during collection and rendered /// inaccessible. /// Dereferencing or cloning a `Gc` during the `Drop` implementation of a `Trace` type could /// result in the program panicking to keep the program from accessing memory after freeing it. /// If you're accessing a `Gc` during a `Drop` implementation, make sure to use the fallible /// operations [`Gc::try_deref`] and [`Gc::try_clone`]. pub struct Gc { /// The pointer to the allocation. ptr: UCell>>, /// The tag information of this pointer, used for mutation detection when marking. tag: AtomicUsize, } #[cfg(not(loom))] /// The tag of the current sweep operation. /// All new allocations are minted with the current tag. static CURRENT_TAG: AtomicUsize = AtomicUsize::new(0); #[cfg(loom)] lazy_static! { static ref CURRENT_TAG: AtomicUsize = AtomicUsize::new(0); } #[repr(C)] // This is only public to make the `sync_coerce_gc` macro work. #[doc(hidden)] /// The backing allocation for a [`Gc`]. pub struct GcBox where T: Trace + Send + Sync + ?Sized, { /// The "strong" count, which is the number of extant `Gc`s to this allocation. /// If the strong count is zero, a value contained in the allocation may be dropped, but the /// allocation itself must still be valid. strong: AtomicUsize, /// The "weak" count, which is the number of references to this allocation stored in to-collect /// buffers by the collection algorithm. /// If the weak count is zero, the allocation may be destroyed. weak: AtomicUsize, /// The current generation number of the allocation. /// The generation number is assigned to the global generation every time a strong reference is /// created or destroyed or a `Gc` pointing to this allocation is dereferenced. generation: AtomicUsize, /// The actual data stored in the allocation. value: T, } unsafe impl Send for Gc where T: Trace + Send + Sync + ?Sized {} unsafe impl Sync for Gc where T: Trace + Send + Sync + ?Sized {} /// Begin a collection operation of the allocations on the heap. /// /// Due to concurrency issues, this might not collect every single unreachable allocation that /// currently exists, but often calling `collect()` will get allocations made by this thread. /// /// # Examples /// /// ``` /// use dumpster::sync::{collect, Gc}; /// /// let gc = Gc::new(vec![1, 2, 3]); /// drop(gc); /// /// collect(); // the vector originally in `gc` _might_ be dropped now, but could be dropped later /// ``` pub fn collect() { collect_all_await(); } #[derive(Debug)] /// Information passed to a [`CollectCondition`] used to determine whether the garbage collector /// should start collecting. /// /// A `CollectInfo` is exclusively created by being passed as an argument to the collection /// condition. /// To set a custom collection condition, refer to [`set_collect_condition`]. /// /// # Examples /// /// ``` /// use dumpster::sync::{set_collect_condition, CollectInfo}; /// /// fn my_collect_condition(info: &CollectInfo) -> bool { /// (info.n_gcs_dropped_since_last_collect() + info.n_gcs_existing()) % 2 == 0 /// } /// /// set_collect_condition(my_collect_condition); /// ``` pub struct CollectInfo { /// Dummy value so this is a private structure. _private: (), } /// A function which determines whether the garbage collector should start collecting. /// This type primarily exists so that it can be used with [`set_collect_condition`]. /// /// # Examples /// /// ```rust /// use dumpster::sync::{set_collect_condition, CollectInfo}; /// /// fn always_collect(_: &CollectInfo) -> bool { /// true /// } /// /// set_collect_condition(always_collect); /// ``` pub type CollectCondition = fn(&CollectInfo) -> bool; #[must_use] /// The default collection condition used by the garbage collector. /// /// There are no guarantees about what this function returns, other than that it will return `true` /// with sufficient frequency to ensure that all `Gc` operations are amortized _O(1)_ in runtime. /// /// This function isn't really meant to be called by users, but rather it's supposed to be handed /// off to [`set_collect_condition`] to return to the default operating mode of the library. /// /// This collection condition applies globally, i.e. to every thread. /// /// # Examples /// /// ```rust /// use dumpster::sync::{default_collect_condition, set_collect_condition, CollectInfo}; /// /// fn other_collect_condition(info: &CollectInfo) -> bool { /// info.n_gcs_existing() >= 25 || default_collect_condition(info) /// } /// /// // Use my custom collection condition. /// set_collect_condition(other_collect_condition); /// /// // I'm sick of the custom collection condition. /// // Return to the original. /// set_collect_condition(default_collect_condition); /// ``` pub fn default_collect_condition(info: &CollectInfo) -> bool { info.n_gcs_dropped_since_last_collect() > info.n_gcs_existing() } pub use collect::set_collect_condition; impl Gc where T: Trace + Send + Sync + ?Sized, { /// Construct a new garbage-collected value. /// /// # Examples /// /// ``` /// use dumpster::sync::Gc; /// /// let _ = Gc::new(0); /// ``` pub fn new(value: T) -> Gc where T: Sized, { notify_created_gc(); Gc { ptr: UCell::new(Nullable::new(NonNull::from(Box::leak(Box::new(GcBox { strong: AtomicUsize::new(1), weak: AtomicUsize::new(0), generation: AtomicUsize::new(CURRENT_TAG.load(Ordering::Acquire)), value, }))))), tag: AtomicUsize::new(0), } } /// Construct a self-referencing `Gc`. /// /// `new_cyclic` first allocates memory for `T`, then constructs a dead `Gc`. /// The dead `Gc` is then passed to `data_fn` to construct a value of `T`, which /// is stored in the allocation. Finally, `new_cyclic` will update the dead self-referential /// `Gc`s and rehydrate them to produce the final value. /// /// # Panics /// /// If `data_fn` panics, the panic is propagated to the caller. /// The allocation is cleaned up normally. /// /// Additionally, if, when attempting to rehydrate the `Gc` members of `F`, the visitor fails to /// reach a `Gc`, this function will panic and reserve the allocation to be cleaned up /// later. /// /// # Notes on safety /// /// Incorrect implementations of `data_fn` may have unusual or strange results. /// Although `dumpster` guarantees that it will be safe, and will do its best to ensure correct /// results, it is generally unwise to allow dead `Gc`s to exist for long. /// If you implement `data_fn` wrong, this may cause panics later on inside of the collection /// process. /// /// # Examples /// /// ``` /// use dumpster::{sync::Gc, Trace}; /// /// #[derive(Trace)] /// struct Cycle { /// this: Gc, /// } /// /// let gc = Gc::new_cyclic(|this| Cycle { this }); /// assert!(Gc::ptr_eq(&gc, &gc.this)); /// ``` pub fn new_cyclic T>(data_fn: F) -> Self where T: Sized, { /// A struct containing an uninitialized value of `T`. /// May only be used inside `new_cyclic`. #[repr(transparent)] struct Uninitialized(MaybeUninit); unsafe impl TraceWith for Uninitialized { fn accept(&self, _: &mut V) -> Result<(), ()> { Ok(()) } } /// Data structure for cleaning up the allocation in case we panic along the way. struct CleanUp { /// Is `true` if the [`GcBox::value`] is initialized. initialized: bool, /// Pointer to the `GcBox` with a maybe uninitialized value. ptr: NonNull>, } impl Drop for CleanUp { fn drop(&mut self) { if self.initialized { // push this `Gc` into the destruction queue unsafe { mark_dirty(self.ptr) }; } else { // deallocate because this `Gc` is not initialized unsafe { dealloc( self.ptr.as_ptr().cast::(), Layout::for_value(self.ptr.as_ref()), ); } } } } // make an uninitialized allocation notify_created_gc(); let mut gcbox = NonNull::from(Box::leak(Box::new(GcBox { strong: AtomicUsize::new(1), weak: AtomicUsize::new(0), generation: AtomicUsize::new(CURRENT_TAG.load(Ordering::Acquire)), value: Uninitialized(MaybeUninit::::uninit()), }))); let mut cleanup = CleanUp { ptr: gcbox, initialized: false, }; // nilgc is a dead Gc let nilgc = Gc { tag: AtomicUsize::new(0), ptr: UCell::new(Nullable::new(gcbox.cast::>()).as_null()), }; assert!(Gc::is_dead(&nilgc)); unsafe { // SAFETY: `gcbox` is a valid pointer to an uninitialized datum that we have allocated. gcbox.as_mut().value = Uninitialized(MaybeUninit::new(data_fn(nilgc))); } cleanup.initialized = true; let gcbox = gcbox.cast::>(); let res = unsafe { // SAFETY: the above unsafe block correctly constructed the Uninitialized value, so it // is safe to cast `gcbox` and then construct a reference. gcbox.as_ref().value.accept(&mut Rehydrate { ptr: Nullable::new(gcbox.cast()), type_id: TypeId::of::(), }) }; assert!( res.is_ok(), "visitor must be able to access all Gc fields of structure when rehydrating dead Gcs" ); let gc = Gc { ptr: UCell::new(Nullable::new(gcbox)), tag: AtomicUsize::new(CURRENT_TAG.load(Ordering::Acquire)), }; let _ = ManuallyDrop::new(cleanup); gc } /// Attempt to dereference this `Gc`. /// /// This function will return `None` if `self` is a "dead" `Gc`, which points to an /// already-deallocated object. /// This can only occur if a `Gc` is accessed during the `Drop` implementation of a /// [`Trace`] object. /// /// For a version which panics instead of returning `None`, consider using [`Deref`]. /// /// # Examples /// /// For a still-living `Gc`, this always returns `Some`. /// /// ``` /// use dumpster::sync::Gc; /// /// let gc1 = Gc::new(0); /// assert!(Gc::try_deref(&gc1).is_some()); /// ``` /// /// The only way to get a `Gc` that fails on `try_deref` is by accessing a `Gc` during its /// `Drop` implementation. /// /// ``` /// use dumpster::{sync::Gc, Trace}; /// use std::sync::Mutex; /// /// #[derive(Trace)] /// struct Cycle(Mutex>>); /// /// impl Drop for Cycle { /// fn drop(&mut self) { /// let guard = self.0.lock().unwrap(); /// let maybe_ref = Gc::try_deref(guard.as_ref().unwrap()); /// assert!(maybe_ref.is_none()); /// } /// } /// /// let gc1 = Gc::new(Cycle(Mutex::new(None))); /// *gc1.0.lock().unwrap() = Some(gc1.clone()); /// # drop(gc1); /// # dumpster::sync::collect(); /// ``` pub fn try_deref(gc: &Gc) -> Option<&T> { unsafe { (!gc.ptr.get().is_null()).then(|| &**gc) } } /// Attempt to clone this `Gc`. /// /// This function will return `None` if `self` is a "dead" `Gc`, which does not point to an /// existing object. For details on dead `Gc`s, refer to [`Gc::is_dead`]. /// /// For a version that simply clones the dead `Gc`, use [`Clone`]. /// /// # Examples /// /// For a still-living `Gc`, this always returns `Some`. /// /// ``` /// use dumpster::sync::Gc; /// /// let gc1 = Gc::new(0); /// let gc2 = Gc::try_clone(&gc1).unwrap(); /// ``` /// /// The only way to get a `Gc` which fails on `try_clone` is by accessing a `Gc` during its /// `Drop` implementation. /// /// ``` /// use dumpster::{sync::Gc, Trace}; /// /// #[derive(Trace)] /// struct Cycle(Gc); /// /// impl Drop for Cycle { /// fn drop(&mut self) { /// let cloned = Gc::try_clone(&self.0); /// assert!(cloned.is_none()); /// } /// } /// /// let gc1 = Gc::new_cyclic(|gc| Cycle(gc)); /// # drop(gc1); /// # dumpster::sync::collect(); /// ``` pub fn try_clone(gc: &Gc) -> Option> { unsafe { (!gc.ptr.get().is_null()).then(|| gc.clone()) } } /// Provides a raw pointer to the data. /// /// Panics if `self` is a "dead" `Gc`, /// which points to an already-deallocated object. /// This can only occur if a `Gc` is accessed during the `Drop` implementation of a /// [`Trace`] object. /// /// # Examples /// /// ``` /// use dumpster::sync::Gc; /// let x = Gc::new("hello".to_owned()); /// let y = Gc::clone(&x); /// let x_ptr = Gc::as_ptr(&x); /// assert_eq!(x_ptr, Gc::as_ptr(&x)); /// assert_eq!(unsafe { &*x_ptr }, "hello"); /// ``` pub fn as_ptr(gc: &Gc) -> *const T { unsafe { let ptr = NonNull::as_ptr(gc.ptr.get().unwrap()); addr_of_mut!((*ptr).value) } } /// Determine whether two `Gc`s are equivalent by reference. /// Returns `true` if both `this` and `other` point to the same value, in the same style as /// [`std::ptr::eq`]. /// /// # Examples /// /// ``` /// use dumpster::sync::Gc; /// /// let gc1 = Gc::new(0); /// let gc2 = Gc::clone(&gc1); // points to same spot as `gc1` /// let gc3 = Gc::new(0); // same value, but points to a different object than `gc1` /// /// assert!(Gc::ptr_eq(&gc1, &gc2)); /// assert!(!Gc::ptr_eq(&gc1, &gc3)); /// ``` pub fn ptr_eq(this: &Gc, other: &Gc) -> bool { unsafe { this.ptr.get() }.as_option() == unsafe { other.ptr.get() }.as_option() } /// Get the number of references to the value pointed to by this `Gc`. /// /// This does not include internal references generated by the garbage collector. /// /// # Panics /// /// This function may panic if the `Gc` whose reference count we are loading is "dead" (i.e. /// generated through a `Drop` implementation). For further reference, take a look at /// [`Gc::is_dead`]. /// /// # Examples /// /// ``` /// use dumpster::sync::Gc; /// /// let gc = Gc::new(()); /// assert_eq!(Gc::ref_count(&gc).get(), 1); /// let gc2 = gc.clone(); /// assert_eq!(Gc::ref_count(&gc).get(), 2); /// drop(gc); /// drop(gc2); /// ``` pub fn ref_count(gc: &Self) -> NonZeroUsize { let box_ptr = unsafe { gc.ptr.get() }.expect( "Attempt to dereference Gc to already-collected object. \ This means a Gc escaped from a Drop implementation, likely implying a bug in your code.", ); let box_ref = unsafe { box_ptr.as_ref() }; NonZeroUsize::new(box_ref.strong.load(Ordering::Relaxed)) .expect("strong count to a GcBox may never be zero while a Gc to it exists") } /// Determine whether this is a dead `Gc`. /// /// A `Gc` is dead if it is not usable as a reference to any value. /// Currently, a dead `Gc` may only be produced by accessing a `Gc` inside of the `Drop` /// implementation of a garbage-collected value or by using the `Gc` provided to the /// construction function in [`Gc::new_cyclic`]. /// /// # Examples /// /// ``` /// use dumpster::{sync::Gc, Trace}; /// /// #[derive(Trace)] /// struct Cycle(Gc); /// /// impl Drop for Cycle { /// fn drop(&mut self) { /// assert!(Gc::is_dead(&self.0)); /// } /// } /// /// let gc1 = Gc::new_cyclic(|gc| Cycle(gc)); /// # drop(gc1); /// # dumpster::sync::collect(); /// ``` #[inline] pub fn is_dead(gc: &Self) -> bool { unsafe { gc.ptr.get() }.is_null() } /// Consumes the `Gc`, returning the inner `GcBox` pointer and tag. #[inline] #[must_use] fn into_ptr(this: Self) -> (*const GcBox, usize) { let this = ManuallyDrop::new(this); let tag = &raw const this.tag; let ptr = unsafe { this.ptr.get().as_ptr() }; let tag = unsafe { tag.read() }.into_inner(); (ptr, tag) } /// Constructs a `Gc` from the innner `GcBox` pointer and tag. #[inline] #[must_use] unsafe fn from_ptr(ptr: *const GcBox, tag: usize) -> Self { Self { ptr: UCell::new(Nullable::from_ptr(ptr.cast_mut())), tag: AtomicUsize::new(tag), } } /// Kill this `Gc`, making it dead. /// /// # Safety /// /// The caller is responsible for making sure that no other code can access this `Gc` while /// `kill` is running. unsafe fn kill(&self) { self.ptr.set(self.ptr.get().as_null()); } /// Exists solely for the [`coerce_gc`] macro. #[inline] #[must_use] #[doc(hidden)] pub fn __private_into_ptr(this: Self) -> (*const GcBox, usize) { Self::into_ptr(this) } /// Exists solely for the [`coerce_gc`] macro. #[inline] #[must_use] #[doc(hidden)] pub unsafe fn __private_from_ptr(ptr: *const GcBox, tag: usize) -> Self { Self::from_ptr(ptr, tag) } } /// A struct for converting dead `Gc`s into live ones. /// /// This is used in [`Gc::new_cyclic`]. pub(super) struct Rehydrate { /// The pointer to the currently hydrating [`GcBox`]. ptr: Nullable>, /// The [`TypeId`] of `T` in `Gc` to be hydrated. type_id: TypeId, } impl Visitor for Rehydrate { fn visit_sync(&mut self, gc: &Gc) where T: Trace + Send + Sync + ?Sized, { if Gc::is_dead(gc) && TypeId::of::() == self.type_id { unsafe { // SAFETY: it is safe to transmute these pointers because we have checked // that they are of the same type. // Additionally, the `GcBox` has been fully initialized, so it is safe to // create a reference here. let cell_ptr = (&raw const gc.ptr).cast::>>>(); (*cell_ptr).set(self.ptr); let box_ref = &*self.ptr.as_ptr(); let old_strong = box_ref.strong.fetch_add(1, Ordering::Relaxed); // Check for overflow. See implementation of clone for details. if old_strong > MAX_STRONG_COUNT { std::process::abort(); } box_ref .generation .store(CURRENT_TAG.load(Ordering::Acquire), Ordering::Release); notify_created_gc(); } } } fn visit_unsync(&mut self, _: &crate::unsync::Gc) where T: Trace + ?Sized, { } } impl Gc { /// Makes a mutable reference to the given `Gc`. /// /// If there are other `Gc` pointers to the same allocation, then `make_mut` will /// [`clone`] the inner value to a new allocation to ensure unique ownership. This is also /// referred to as clone-on-write. /// /// [`clone`]: Clone::clone /// /// # Panics /// /// This function may panic if the `Gc` whose reference count we are loading is "dead" (i.e. /// generated through a `Drop` implementation). For further reference, take a look at /// [`Gc::is_dead`]. /// /// # Examples /// /// ``` /// use dumpster::sync::Gc; /// /// let mut data = Gc::new(5); /// /// *Gc::make_mut(&mut data) += 1; // Won't clone anything /// let mut other_data = Gc::clone(&data); // Won't clone inner data /// *Gc::make_mut(&mut data) += 1; // Clones inner data /// *Gc::make_mut(&mut data) += 1; // Won't clone anything /// *Gc::make_mut(&mut other_data) *= 2; // Won't clone anything /// /// // Now `data` and `other_data` point to different allocations. /// assert_eq!(*data, 8); /// assert_eq!(*other_data, 12); /// ``` #[inline] pub fn make_mut(this: &mut Self) -> &mut T { if Gc::is_dead(this) { panic_deref_of_collected_object(); } // SAFETY: we checked above that the object is alive (not null) let box_ref = unsafe { this.ptr.get().unwrap_unchecked().as_ref() }; let strong = box_ref.strong.load(Ordering::Acquire); let weak = box_ref.weak.load(Ordering::Acquire); if strong != 1 || weak != 0 { // We don't have unique access to the value so we need to clone it. *this = Gc::new(box_ref.value.clone()); } // SAFETY: we have exclusive access to this `GcBox` because we ensured // that we hold the only reference to this allocation. // No other `Gc`s point to this allocation because the strong count is 1, and there are no // loose pointers internal to the collector because the weak count is 0. unsafe { &mut (*this.ptr.get().as_ptr()).value } } } /// Allows coercing `T` of [`Gc`](Gc). /// /// This means that you can convert a `Gc` containing a strictly-sized type (such as `[T; N]`) into /// a `Gc` containing its unsized version (such as `[T]`), all without using nightly-only features. /// /// This is one of two easy ways to create a `Gc<[T]>`; the other method is to use [`FromIterator`]. /// /// # Examples /// /// ``` /// use dumpster::sync::{coerce_gc, Gc}; /// /// let gc1: Gc<[u8; 3]> = Gc::new([7, 8, 9]); /// let gc2: Gc<[u8]> = coerce_gc!(gc1); /// assert_eq!(&gc2[..], &[7, 8, 9]); /// ``` /// /// Note that although this macro allows for type conversion, it _cannot_ be used for converting /// between incompatible types. /// /// ```compile_fail /// // This program is incorrect! /// use dumpster::sync::{Gc, coerce_gc}; /// /// let gc1: Gc = Gc::new(1); /// let gc2: Gc = coerce_gc!(gc1); /// ``` #[doc(hidden)] #[macro_export] macro_rules! __sync_coerce_gc { ($gc:expr) => {{ // Temporarily convert the `Gc` into a raw pointer to allow for coercion to occur. let (ptr, tag): (*const _, usize) = $crate::sync::Gc::__private_into_ptr($gc); unsafe { $crate::sync::Gc::__private_from_ptr(ptr, tag) } }}; } #[doc(inline)] pub use crate::__sync_coerce_gc as coerce_gc; impl Clone for Gc where T: Trace + Send + Sync + ?Sized, { /// Clone a garbage-collected reference. /// This does not clone the underlying data. /// If this `Gc` is [dead](`Gc::is_dead`), this will produce another dead `Gc`. /// /// For a fallible version, refer to [`Gc::try_clone`]. /// /// # Examples /// /// ``` /// use dumpster::sync::Gc; /// use std::sync::atomic::{AtomicU8, Ordering}; /// /// let gc1 = Gc::new(AtomicU8::new(0)); /// let gc2 = gc1.clone(); /// /// gc1.store(1, Ordering::Relaxed); /// assert_eq!(gc2.load(Ordering::Relaxed), 1); /// ``` /// /// Note that you can also clone a dead `Gc`. /// /// ``` /// use dumpster::{sync::Gc, Trace}; /// use std::sync::Mutex; /// /// #[derive(Trace)] /// struct Cycle(Gc); /// /// impl Drop for Cycle { /// fn drop(&mut self) { /// let gc = self.0.clone(); /// assert!(Gc::is_dead(&gc)); /// } /// } /// /// let gc1 = Gc::new_cyclic(|gc| Cycle(gc)); /// # drop(gc1); /// # dumpster::sync::collect(); /// ``` fn clone(&self) -> Gc { if Gc::is_dead(self) { // Clone dead Gcs by doing a naive copy. return unsafe { ptr::read(self) }; } let box_ref = unsafe { self.ptr.get().unwrap().as_ref() }; // increment strong count before generation to ensure cleanup never underestimates ref count let old_strong = box_ref.strong.fetch_add(1, Ordering::Acquire); // We need to guard against massive refcounts in case someone is `mem::forget`ing // Gcs. If we don't do this the count can overflow and users will use-after free. This // branch will never be taken in any realistic program. We abort because such a program is // incredibly degenerate, and we don't care to support it. // // This check is not 100% water-proof: we error when the refcount grows beyond `isize::MAX`. // But we do that check *after* having done the increment, so there is a chance here that // the worst already happened and we actually do overflow the `usize` counter. However, that // requires the counter to grow from `isize::MAX` to `usize::MAX` between the increment // above and the `abort` below, which seems exceedingly unlikely. if old_strong > MAX_STRONG_COUNT { std::process::abort(); } box_ref .generation .store(CURRENT_TAG.load(Ordering::Acquire), Ordering::Release); notify_created_gc(); // mark_clean(box_ref); // causes performance drops Gc { ptr: UCell::new(unsafe { self.ptr.get() }), tag: AtomicUsize::new(CURRENT_TAG.load(Ordering::Acquire)), } } } impl Drop for Gc where T: Trace + Send + Sync + ?Sized, { fn drop(&mut self) { let Some(mut ptr) = unsafe { self.ptr.get() }.as_option() else { return; }; let box_ref = unsafe { ptr.as_ref() }; box_ref.weak.fetch_add(1, Ordering::AcqRel); // ensures that this allocation wasn't freed // while we weren't looking box_ref .generation .store(CURRENT_TAG.load(Ordering::Relaxed), Ordering::Release); match box_ref.strong.fetch_sub(1, Ordering::AcqRel) { 0 => unreachable!("strong cannot reach zero while a Gc to it exists"), 1 => { mark_clean(box_ref); if box_ref.weak.fetch_sub(1, Ordering::Release) == 1 { // destroyed the last weak reference! we can safely deallocate this let layout = Layout::for_value(box_ref); fence(Ordering::Acquire); unsafe { drop_in_place(ptr.as_mut()); dealloc(ptr.as_ptr().cast(), layout); } } } _ => { if contains_gcs(&box_ref.value).unwrap_or(true) { // SAFETY: `ptr` is convertible to a reference // We don't use `box_ref` here because that pointer // only has `SharedReadOnly` permissions under the stacked borrows model // when we need `Unique` for the `TrashCan`. unsafe { mark_dirty(ptr) }; } box_ref.weak.fetch_sub(1, Ordering::Release); } } notify_dropped_gc(); } } impl CollectInfo { #[must_use] /// Get the number of times that a [`Gc`] has been dropped since the last time a collection /// operation was performed. /// /// # Examples /// /// ``` /// use dumpster::sync::{set_collect_condition, CollectInfo}; /// /// // Collection condition for whether many Gc's have been dropped. /// fn have_many_gcs_dropped(info: &CollectInfo) -> bool { /// info.n_gcs_dropped_since_last_collect() > 100 /// } /// /// set_collect_condition(have_many_gcs_dropped); /// ``` pub fn n_gcs_dropped_since_last_collect(&self) -> usize { n_gcs_dropped() } #[must_use] /// Get the total number of [`Gc`]s which currently exist. /// /// # Examples /// /// ``` /// use dumpster::sync::{set_collect_condition, CollectInfo}; /// /// // Collection condition for whether many Gc's currently exist. /// fn do_many_gcs_exist(info: &CollectInfo) -> bool { /// info.n_gcs_existing() > 100 /// } /// /// set_collect_condition(do_many_gcs_exist); /// ``` pub fn n_gcs_existing(&self) -> usize { n_gcs_existing() } } impl Gc { /// Allocates an `GcBox` with sufficient space for /// a value of the provided layout. /// /// The function `mem_to_gc_box` is called with the data pointer /// and must return back a pointer for the `GcBox`. unsafe fn allocate_for_layout( value_layout: Layout, mem_to_gc_box: impl FnOnce(*mut u8) -> *mut GcBox, ) -> *mut GcBox { let layout = Layout::new::>() .extend(value_layout) .unwrap() .0 .pad_to_align(); Self::allocate_for_layout_of_box(layout, mem_to_gc_box) } /// Allocates an `GcBox` with the given layout. /// /// The function `mem_to_gc_box` is called with the data pointer /// and must return back a pointer for the `GcBox`. unsafe fn allocate_for_layout_of_box( layout: Layout, mem_to_gc_box: impl FnOnce(*mut u8) -> *mut GcBox, ) -> *mut GcBox { // SAFETY: layout has non-zero size because of the `ref_count` field let ptr = unsafe { std::alloc::alloc(layout) }; if ptr.is_null() { handle_alloc_error(layout); } let inner = mem_to_gc_box(ptr); unsafe { (&raw mut (*inner).strong).write(AtomicUsize::new(1)); (&raw mut (*inner).weak).write(AtomicUsize::new(0)); (&raw mut (*inner).generation).write(AtomicUsize::new(0)); } inner } } impl Gc<[T]> { /// Allocates an `GcBox<[T]>` with the given length. fn allocate_for_slice(len: usize) -> *mut GcBox<[T]> { unsafe { Self::allocate_for_layout(Layout::array::(len).unwrap(), |mem| { ptr::slice_from_raw_parts_mut(mem.cast::(), len) as *mut GcBox<[T]> }) } } } unsafe impl TraceWith for Gc { fn accept(&self, visitor: &mut V) -> Result<(), ()> { visitor.visit_sync(self); Ok(()) } } impl Deref for Gc { type Target = T; /// Dereference this pointer, creating a reference to the contained value `T`. /// /// # Panics /// /// This function may panic if it is called from within the implementation of `std::ops::Drop` /// of its owning value, since returning such a reference could cause a use-after-free. /// It is not guaranteed to panic. /// /// # Examples /// /// The following is a correct time to dereference a `Gc`. /// /// ``` /// use dumpster::sync::Gc; /// /// let my_gc = Gc::new(0u8); /// let my_ref: &u8 = &my_gc; /// ``` /// /// Dereferencing a `Gc` while dropping is not correct. /// /// ```should_panic /// // This is wrong! /// use dumpster::{sync::Gc, Trace}; /// use std::sync::Mutex; /// /// #[derive(Trace)] /// struct Bad { /// s: String, /// cycle: Mutex>>, /// } /// /// impl Drop for Bad { /// fn drop(&mut self) { /// println!("{}", self.cycle.lock().unwrap().as_ref().unwrap().s) /// } /// } /// /// let foo = Gc::new(Bad { /// s: "foo".to_string(), /// cycle: Mutex::new(None), /// }); /// ``` fn deref(&self) -> &Self::Target { let box_ref = unsafe { self.ptr.get().expect( "Attempting to dereference Gc to already-deallocated object.\ This is caused by accessing a Gc during a Drop implementation, likely implying a bug in your code." ).as_ref() }; let current_tag = CURRENT_TAG.load(Ordering::Acquire); self.tag.store(current_tag, Ordering::Release); box_ref.generation.store(current_tag, Ordering::Release); &box_ref.value } } impl PartialEq> for Gc where T: Trace + Send + Sync + ?Sized + PartialEq, { /// Test for equality on two `Gc`s. /// /// Two `Gc`s are equal if their inner values are equal, even if they are stored in different /// allocations. /// Because `PartialEq` does not imply reflexivity, and there is no current path for trait /// specialization, this function does not do a "fast-path" check for reference equality. /// Therefore, if two `Gc`s point to the same allocation, the implementation of `eq` will still /// require a direct call to `eq` on the values. /// /// # Panics /// /// This function may panic if it is called from within the implementation of `std::ops::Drop` /// of its owning value, since returning such a reference could cause a use-after-free. /// It is not guaranteed to panic. /// Additionally, if this `Gc` is moved out of an allocation during a `Drop` implementation, it /// could later cause a panic. /// For further details, refer to the main documentation for `Gc`. /// /// ``` /// use dumpster::sync::Gc; /// /// let gc = Gc::new(6); /// assert!(gc == Gc::new(6)); /// ``` fn eq(&self, other: &Gc) -> bool { self.as_ref() == other.as_ref() } } impl Eq for Gc where T: Trace + Send + Sync + ?Sized + PartialEq {} impl AsRef for Gc { fn as_ref(&self) -> &T { self } } impl Borrow for Gc { fn borrow(&self) -> &T { self } } impl Default for Gc { fn default() -> Self { Gc::new(T::default()) } } impl std::fmt::Pointer for Gc { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { std::fmt::Pointer::fmt(&addr_of!(**self), f) } } #[cfg(not(loom))] #[cfg(feature = "coerce-unsized")] impl std::ops::CoerceUnsized> for Gc where T: std::marker::Unsize + Trace + Send + Sync + ?Sized, U: Trace + Send + Sync + ?Sized, { } impl Debug for Gc { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, "Gc({:?}, {})", self.ptr, self.tag.load(Ordering::Acquire) ) } } impl Display for Gc { /// Formats the value using its `Display` implementation. /// /// # Note /// /// If `T` contains cyclic references through `Gc` pointers and its `Display` implementation /// attempts to traverse them, this may cause infinite recursion. Types with potential cycles /// should implement `Display` to avoid following cyclic references. fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { Display::fmt(&**self, f) } } impl From for Gc { /// Converts a generic type `T` into an `Gc` /// /// The conversion allocates on the heap and moves `t` /// from the stack into it. /// /// # Example /// ```rust /// # use dumpster::sync::Gc; /// let x = 5; /// let rc = Gc::new(5); /// /// assert_eq!(Gc::from(x), rc); /// ``` fn from(value: T) -> Self { Gc::new(value) } } impl From<[T; N]> for Gc<[T]> { /// Converts a [`[T; N]`](prim@array) into an `Gc<[T]>`. /// /// The conversion moves the array into a newly allocated `Gc`. /// /// # Example /// /// ``` /// # use dumpster::sync::Gc; /// let original: [i32; 3] = [1, 2, 3]; /// let shared: Gc<[i32]> = Gc::from(original); /// assert_eq!(&[1, 2, 3], &shared[..]); /// ``` #[inline] fn from(v: [T; N]) -> Gc<[T]> { coerce_gc!(Gc::<[T; N]>::from(v)) } } impl From<&[T]> for Gc<[T]> { /// Allocates a garbage-collected slice and fills it by cloning `slice`'s items. /// /// # Example /// /// ``` /// # use dumpster::sync::Gc; /// let original: &[i32] = &[1, 2, 3]; /// let shared: Gc<[i32]> = Gc::from(original); /// assert_eq!(&[1, 2, 3], &shared[..]); /// ``` #[inline] fn from(slice: &[T]) -> Gc<[T]> { // Panic guard while cloning T elements. // In the event of a panic, elements that have been written // into the new GcBox will be dropped, then the memory freed. struct Guard { /// pointer to `GcBox` to deallocate on panic mem: *mut u8, /// layout of the `GcBox` to deallocate on panic layout: Layout, /// pointer to the `GcBox`'s value elems: *mut T, /// the number of elements cloned so far n_elems: usize, } impl Drop for Guard { fn drop(&mut self) { unsafe { let slice = slice::from_raw_parts_mut(self.elems, self.n_elems); ptr::drop_in_place(slice); dealloc(self.mem, self.layout); } } } unsafe { let value_layout = Layout::array::(slice.len()).unwrap(); let layout = Layout::new::>() .extend(value_layout) .unwrap() .0 .pad_to_align(); let ptr = Self::allocate_for_layout_of_box(layout, |mem| { ptr::slice_from_raw_parts_mut(mem.cast::(), slice.len()) as *mut GcBox<[T]> }); // Pointer to first element let elems = (&raw mut (*ptr).value).cast::(); let mut guard = Guard { mem: ptr.cast::(), layout, elems, n_elems: 0, }; for (i, item) in slice.iter().enumerate() { ptr::write(elems.add(i), item.clone()); guard.n_elems += 1; } // All clear. Forget the guard so it doesn't free the new GcBox. mem::forget(guard); notify_created_gc(); Self { ptr: UCell::new(Nullable::from_ptr(ptr)), tag: AtomicUsize::new(0), } } } } impl From<&mut [T]> for Gc<[T]> { /// Allocates a garbage-collected slice and fills it by cloning `v`'s items. /// /// # Example /// /// ``` /// # use dumpster::sync::Gc; /// let mut original = [1, 2, 3]; /// let original: &mut [i32] = &mut original; /// let shared: Gc<[i32]> = Gc::from(original); /// assert_eq!(&[1, 2, 3], &shared[..]); /// ``` #[inline] fn from(value: &mut [T]) -> Self { Gc::from(&*value) } } impl From<&str> for Gc { /// Allocates a garbage-collected string slice and copies `v` into it. /// /// # Example /// /// ``` /// # use dumpster::sync::Gc; /// let shared: Gc = Gc::from("statue"); /// assert_eq!("statue", &shared[..]); /// ``` #[inline] fn from(v: &str) -> Self { let bytes = Gc::<[u8]>::from(v.as_bytes()); let (ptr, tag) = Gc::into_ptr(bytes); unsafe { Gc::from_ptr(ptr as *const GcBox, tag) } } } impl From<&mut str> for Gc { /// Allocates a garbage-collected string slice and copies `v` into it. /// /// # Example /// /// ``` /// # use dumpster::sync::Gc; /// let mut original = String::from("statue"); /// let original: &mut str = &mut original; /// let shared: Gc = Gc::from(original); /// assert_eq!("statue", &shared[..]); /// ``` #[inline] fn from(v: &mut str) -> Self { Gc::from(&*v) } } impl From> for Gc<[u8]> { /// Converts a garbage-collected string slice into a byte slice. /// /// # Example /// /// ``` /// # use dumpster::sync::Gc; /// let string: Gc = Gc::from("eggplant"); /// let bytes: Gc<[u8]> = Gc::from(string); /// assert_eq!("eggplant".as_bytes(), bytes.as_ref()); /// ``` #[inline] fn from(value: Gc) -> Self { let (ptr, tag) = Gc::into_ptr(value); unsafe { Gc::from_ptr(ptr as *const GcBox<[u8]>, tag) } } } impl From for Gc { /// Allocates a garbage-collected string slice and copies `v` into it. /// /// # Example /// /// ``` /// # use dumpster::sync::Gc; /// let original: String = "statue".to_owned(); /// let shared: Gc = Gc::from(original); /// assert_eq!("statue", &shared[..]); /// ``` #[inline] fn from(value: String) -> Self { Self::from(&value[..]) } } impl From> for Gc { /// Move a boxed object to a new, garbage collected, allocation. /// /// # Example /// /// ``` /// # use dumpster::sync::Gc; /// let original: Box = Box::new(1); /// let shared: Gc = Gc::from(original); /// assert_eq!(1, *shared); /// ``` #[inline] fn from(src: Box) -> Self { unsafe { let layout = Layout::for_value(&*src); let gc_ptr = Gc::allocate_for_layout(layout, <*mut u8>::cast::>); // Copy value as bytes ptr::copy_nonoverlapping( (&raw const *src).cast::(), (&raw mut (*gc_ptr).value).cast::(), layout.size(), ); // Free the allocation without dropping its contents let bptr = Box::into_raw(src); let src = Box::from_raw(bptr.cast::>()); drop(src); notify_created_gc(); Self::from_ptr(gc_ptr, 0) } } } impl From> for Gc<[T]> { /// Allocates a garbage-collected slice and moves `vec`'s items into it. /// /// # Example /// /// ``` /// # use dumpster::sync::Gc; /// let unique: Vec = vec![1, 2, 3]; /// let shared: Gc<[i32]> = Gc::from(unique); /// assert_eq!(&[1, 2, 3], &shared[..]); /// ``` #[inline] fn from(vec: Vec) -> Self { let mut vec = ManuallyDrop::new(vec); let vec_cap = vec.capacity(); let vec_len = vec.len(); let vec_ptr = vec.as_mut_ptr(); let gc_ptr = Self::allocate_for_slice(vec_len); unsafe { let dst_ptr = (&raw mut (*gc_ptr).value).cast::(); ptr::copy_nonoverlapping(vec_ptr, dst_ptr, vec_len); let _ = Vec::from_raw_parts(vec_ptr, 0, vec_cap); notify_created_gc(); Self::from_ptr(gc_ptr, 0) } } } impl<'a, B: Trace + Send + Sync> From> for Gc where B: ToOwned + ?Sized, Gc: From<&'a B> + From, { /// Creates a garbage-collected pointer from a clone-on-write pointer by /// copying its content. /// /// # Example /// /// ```rust /// # use dumpster::sync::Gc; /// # use std::borrow::Cow; /// let cow: Cow<'_, str> = Cow::Borrowed("eggplant"); /// let shared: Gc = Gc::from(cow); /// assert_eq!("eggplant", &shared[..]); /// ``` #[inline] fn from(cow: Cow<'a, B>) -> Gc { match cow { Cow::Borrowed(s) => Gc::from(s), Cow::Owned(s) => Gc::from(s), } } } impl FromIterator for Gc<[T]> where T: Trace + Send + Sync, { fn from_iter>(iter: I) -> Self { // Collect into a `Vec` for O(n) performance. // TODO: this could be slightly optimized by using the `Gc<[]>` layout for perf, but this is // a later problem. Self::from(iter.into_iter().collect::>()) } } ================================================ FILE: dumpster/src/sync/tests.rs ================================================ /* dumpster, a cycle-tracking garbage collector for Rust. Copyright (C) 2023 Clayton Ramsey. This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use std::{ collections::hash_map::Entry, mem::{swap, take, transmute, MaybeUninit}, ptr::NonNull, sync::{ atomic::{AtomicUsize, Ordering}, Mutex, OnceLock, }, }; use foldhash::{HashMap, HashMapExt}; use crate::{sync::coerce_gc, Visitor}; use super::*; struct DropCount<'a>(&'a AtomicUsize); impl Drop for DropCount<'_> { fn drop(&mut self) { self.0.fetch_add(1, Ordering::Release); } } unsafe impl TraceWith for DropCount<'_> { fn accept(&self, _: &mut V) -> Result<(), ()> { Ok(()) } } struct MultiRef { refs: Mutex>>, #[expect(unused)] count: DropCount<'static>, } unsafe impl TraceWith for MultiRef { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.refs.accept(visitor) } } #[test] fn single_alloc() { static DROP_COUNT: AtomicUsize = AtomicUsize::new(0); let gc1 = Gc::new(DropCount(&DROP_COUNT)); collect(); assert_eq!(DROP_COUNT.load(Ordering::Acquire), 0); drop(gc1); collect(); assert_eq!(DROP_COUNT.load(Ordering::Acquire), 1); } #[test] fn ref_count() { static DROP_COUNT: AtomicUsize = AtomicUsize::new(0); let gc1 = Gc::new(DropCount(&DROP_COUNT)); let gc2 = Gc::clone(&gc1); assert_eq!(DROP_COUNT.load(Ordering::Acquire), 0); drop(gc1); assert_eq!(DROP_COUNT.load(Ordering::Acquire), 0); drop(gc2); assert_eq!(DROP_COUNT.load(Ordering::Acquire), 1); } #[test] fn self_referential() { struct Foo(Mutex>>); static DROP_COUNT: AtomicUsize = AtomicUsize::new(0); unsafe impl TraceWith for Foo { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.0.accept(visitor) } } impl Drop for Foo { fn drop(&mut self) { println!("begin increment of the drop count!"); DROP_COUNT.fetch_add(1, Ordering::Release); } } let gc1 = Gc::new(Foo(Mutex::new(None))); *gc1.0.lock().unwrap() = Some(Gc::clone(&gc1)); assert_eq!(DROP_COUNT.load(Ordering::Acquire), 0); drop(gc1); collect(); assert_eq!(DROP_COUNT.load(Ordering::Acquire), 1); } #[test] fn two_cycle() { static DROP_0: AtomicUsize = AtomicUsize::new(0); static DROP_1: AtomicUsize = AtomicUsize::new(0); let gc0 = Gc::new(MultiRef { refs: Mutex::new(Vec::new()), count: DropCount(&DROP_0), }); let gc1 = Gc::new(MultiRef { refs: Mutex::new(vec![Gc::clone(&gc0)]), count: DropCount(&DROP_1), }); gc0.refs.lock().unwrap().push(Gc::clone(&gc1)); collect(); assert_eq!(DROP_0.load(Ordering::Acquire), 0); assert_eq!(DROP_0.load(Ordering::Acquire), 0); drop(gc0); collect(); assert_eq!(DROP_0.load(Ordering::Acquire), 0); assert_eq!(DROP_0.load(Ordering::Acquire), 0); drop(gc1); collect(); assert_eq!(DROP_0.load(Ordering::Acquire), 1); assert_eq!(DROP_0.load(Ordering::Acquire), 1); } #[test] fn self_ref_two_cycle() { static DROP_0: AtomicUsize = AtomicUsize::new(0); static DROP_1: AtomicUsize = AtomicUsize::new(0); let gc0 = Gc::new(MultiRef { refs: Mutex::new(Vec::new()), count: DropCount(&DROP_0), }); let gc1 = Gc::new(MultiRef { refs: Mutex::new(vec![Gc::clone(&gc0)]), count: DropCount(&DROP_1), }); gc0.refs.lock().unwrap().extend([gc0.clone(), gc1.clone()]); gc1.refs.lock().unwrap().push(gc1.clone()); collect(); assert_eq!(DROP_0.load(Ordering::Acquire), 0); assert_eq!(DROP_0.load(Ordering::Acquire), 0); drop(gc0); collect(); assert_eq!(DROP_0.load(Ordering::Acquire), 0); assert_eq!(DROP_0.load(Ordering::Acquire), 0); drop(gc1); collect(); assert_eq!(DROP_0.load(Ordering::Acquire), 1); assert_eq!(DROP_0.load(Ordering::Acquire), 1); } #[test] fn parallel_loop() { static COUNT_1: AtomicUsize = AtomicUsize::new(0); static COUNT_2: AtomicUsize = AtomicUsize::new(0); static COUNT_3: AtomicUsize = AtomicUsize::new(0); static COUNT_4: AtomicUsize = AtomicUsize::new(0); let gc1 = Gc::new(MultiRef { count: DropCount(&COUNT_1), refs: Mutex::new(Vec::new()), }); let gc2 = Gc::new(MultiRef { count: DropCount(&COUNT_2), refs: Mutex::new(vec![Gc::clone(&gc1)]), }); let gc3 = Gc::new(MultiRef { count: DropCount(&COUNT_3), refs: Mutex::new(vec![Gc::clone(&gc1)]), }); let gc4 = Gc::new(MultiRef { count: DropCount(&COUNT_4), refs: Mutex::new(vec![Gc::clone(&gc2), Gc::clone(&gc3)]), }); gc1.refs.lock().unwrap().push(Gc::clone(&gc4)); assert_eq!(COUNT_1.load(Ordering::Acquire), 0); assert_eq!(COUNT_2.load(Ordering::Acquire), 0); assert_eq!(COUNT_3.load(Ordering::Acquire), 0); assert_eq!(COUNT_4.load(Ordering::Acquire), 0); drop(gc1); collect(); assert_eq!(COUNT_1.load(Ordering::Acquire), 0); assert_eq!(COUNT_2.load(Ordering::Acquire), 0); assert_eq!(COUNT_3.load(Ordering::Acquire), 0); assert_eq!(COUNT_4.load(Ordering::Acquire), 0); drop(gc2); collect(); assert_eq!(COUNT_1.load(Ordering::Acquire), 0); assert_eq!(COUNT_2.load(Ordering::Acquire), 0); assert_eq!(COUNT_3.load(Ordering::Acquire), 0); assert_eq!(COUNT_4.load(Ordering::Acquire), 0); drop(gc3); collect(); assert_eq!(COUNT_1.load(Ordering::Acquire), 0); assert_eq!(COUNT_2.load(Ordering::Acquire), 0); assert_eq!(COUNT_3.load(Ordering::Acquire), 0); assert_eq!(COUNT_4.load(Ordering::Acquire), 0); drop(gc4); collect(); assert_eq!(COUNT_1.load(Ordering::Acquire), 1); assert_eq!(COUNT_2.load(Ordering::Acquire), 1); assert_eq!(COUNT_3.load(Ordering::Acquire), 1); assert_eq!(COUNT_4.load(Ordering::Acquire), 1); } #[test] /// Test that we can drop a Gc which points to some allocation with a locked Mutex inside it // note: I tried using `ntest::timeout` but for some reason that caused this test to trivially pass. fn deadlock() { let gc1 = Gc::new(Mutex::new(())); let gc2 = gc1.clone(); let guard = gc1.lock(); drop(gc2); collect(); drop(guard); } #[test] fn open_drop() { static COUNT_1: AtomicUsize = AtomicUsize::new(0); let gc1 = Gc::new(MultiRef { refs: Mutex::new(Vec::new()), count: DropCount(&COUNT_1), }); gc1.refs.lock().unwrap().push(gc1.clone()); let guard = gc1.refs.lock(); collect(); assert_eq!(COUNT_1.load(Ordering::Acquire), 0); drop(guard); drop(gc1); collect(); assert_eq!(COUNT_1.load(Ordering::Acquire), 1); } #[test] #[cfg_attr(miri, ignore = "miri is too slow")] fn eventually_collect() { static COUNT_1: AtomicUsize = AtomicUsize::new(0); static COUNT_2: AtomicUsize = AtomicUsize::new(0); let gc1 = Gc::new(MultiRef { refs: Mutex::new(Vec::new()), count: DropCount(&COUNT_1), }); let gc2 = Gc::new(MultiRef { refs: Mutex::new(vec![gc1.clone()]), count: DropCount(&COUNT_2), }); gc1.refs.lock().unwrap().push(gc2.clone()); assert_eq!(COUNT_1.load(Ordering::Acquire), 0); assert_eq!(COUNT_2.load(Ordering::Acquire), 0); drop(gc1); drop(gc2); for _ in 0..200_000 { let gc = Gc::new(()); drop(gc); } // after enough time, gc1 and gc2 should have been collected assert_eq!(COUNT_1.load(Ordering::Acquire), 1); assert_eq!(COUNT_2.load(Ordering::Acquire), 1); } #[test] #[cfg(feature = "coerce-unsized")] fn coerce_array() { let gc1: Gc<[u8; 3]> = Gc::new([0, 0, 0]); let gc2: Gc<[u8]> = gc1; assert_eq!(gc2.len(), 3); assert_eq!( std::mem::size_of::>(), 3 * std::mem::size_of::() ); } #[test] fn coerce_array_using_macro() { let gc1: Gc<[u8; 3]> = Gc::new([0, 0, 0]); let gc2: Gc<[u8]> = coerce_gc!(gc1); assert_eq!(gc2.len(), 3); assert_eq!( std::mem::size_of::>(), 3 * std::mem::size_of::() ); } #[test] fn malicious() { static EVIL: AtomicUsize = AtomicUsize::new(0); static A_DROP_DETECT: AtomicUsize = AtomicUsize::new(0); struct A { x: Gc, y: Gc, } struct X { a: Mutex>>, y: NonNull, } struct Y { a: Mutex>>, } unsafe impl Send for X {} unsafe impl TraceWith for A { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.x.accept(visitor)?; self.y.accept(visitor) } } unsafe impl TraceWith for X { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.a.accept(visitor)?; if EVIL.fetch_add(1, Ordering::Relaxed) == 1 { println!("committing evil..."); // simulates a malicious thread let y = unsafe { self.y.as_ref() }; *y.a.lock().unwrap() = (*self.a.lock().unwrap()).take(); } Ok(()) } } unsafe impl TraceWith for Y { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.a.accept(visitor) } } unsafe impl Sync for X {} impl Drop for A { fn drop(&mut self) { A_DROP_DETECT.fetch_add(1, Ordering::Relaxed); } } let y = Gc::new(Y { a: Mutex::new(None), }); let x = Gc::new(X { a: Mutex::new(None), y: NonNull::from(y.as_ref()), }); let a = Gc::new(A { x, y }); *a.x.a.lock().unwrap() = Some(a.clone()); collect(); drop(a.clone()); EVIL.store(1, Ordering::Relaxed); collect(); assert_eq!(A_DROP_DETECT.load(Ordering::Relaxed), 0); drop(a); collect(); assert_eq!(A_DROP_DETECT.load(Ordering::Relaxed), 1); } #[test] #[cfg_attr(miri, ignore = "miri is too slow")] #[expect(clippy::too_many_lines)] fn fuzz() { const N: usize = 20_000; static DROP_DETECTORS: [AtomicUsize; N] = { let mut detectors: [MaybeUninit; N] = unsafe { transmute(MaybeUninit::<[AtomicUsize; N]>::uninit()) }; let mut i = 0; while i < N { detectors[i] = MaybeUninit::new(AtomicUsize::new(0)); i += 1; } unsafe { transmute(detectors) } }; #[derive(Debug)] struct Alloc { refs: Mutex>>, id: usize, } impl Drop for Alloc { fn drop(&mut self) { DROP_DETECTORS[self.id].fetch_add(1, Ordering::Relaxed); } } unsafe impl TraceWith for Alloc { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.refs.accept(visitor) } } fn dfs(alloc: &Gc, graph: &mut HashMap>) { if let Entry::Vacant(v) = graph.entry(alloc.id) { if alloc.id == 2822 || alloc.id == 2814 { println!("{} - {alloc:?}", alloc.id); } v.insert(Vec::new()); alloc.refs.lock().unwrap().iter().for_each(|a| { graph.get_mut(&alloc.id).unwrap().push(a.id); dfs(a, graph); }); } } fastrand::seed(12345); let mut gcs = (0..50) .map(|i| { Gc::new(Alloc { refs: Mutex::new(Vec::new()), id: i, }) }) .collect::>(); let mut next_detector = 50; for _ in 0..N { if gcs.is_empty() { gcs.push(Gc::new(Alloc { refs: Mutex::new(Vec::new()), id: next_detector, })); next_detector += 1; } match fastrand::u8(0..4) { 0 => { println!("add gc {next_detector}"); gcs.push(Gc::new(Alloc { refs: Mutex::new(Vec::new()), id: next_detector, })); next_detector += 1; } 1 => { if gcs.len() > 1 { let from = fastrand::usize(0..gcs.len()); let to = fastrand::usize(0..gcs.len()); println!("add ref {} -> {}", gcs[from].id, gcs[to].id); let new_gc = gcs[to].clone(); let mut guard = gcs[from].refs.lock().unwrap(); guard.push(new_gc); } } 2 => { let idx = fastrand::usize(0..gcs.len()); println!("remove gc {}", gcs[idx].id); gcs.swap_remove(idx); } 3 => { let from = fastrand::usize(0..gcs.len()); let mut guard = gcs[from].refs.lock().unwrap(); if !guard.is_empty() { let to = fastrand::usize(0..guard.len()); println!("drop ref {} -> {}", gcs[from].id, guard[to].id); guard.swap_remove(to); } } _ => unreachable!(), } } let mut graph = HashMap::new(); graph.insert(9999, Vec::new()); for alloc in &gcs { graph.get_mut(&9999).unwrap().push(alloc.id); dfs(alloc, &mut graph); } println!("{graph:#?}"); drop(gcs); collect(); let mut n_missing = 0; for (id, count) in DROP_DETECTORS[..next_detector].iter().enumerate() { let num = count.load(Ordering::Relaxed); if num != 1 { println!("expected 1 for id {id} but got {num}"); n_missing += 1; } } assert_eq!(n_missing, 0); } #[test] fn root_canal() { struct A { b: Gc, } struct B { a0: Mutex>>, a1: Mutex>>, a2: Mutex>>, a3: Mutex>>, } unsafe impl TraceWith for A { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.b.accept(visitor) } } unsafe impl TraceWith for B { fn accept(&self, visitor: &mut V) -> Result<(), ()> { let n_prior_visits = B_VISIT_COUNT.fetch_add(1, Ordering::Relaxed); self.a0.accept(visitor)?; self.a1.accept(visitor)?; // simulate a malicious thread swapping things around if n_prior_visits == 1 { println!("committing evil..."); swap( &mut *SMUGGLED_POINTERS[0].lock().unwrap(), &mut *SMUGGLED_POINTERS[1] .lock() .unwrap() .as_ref() .unwrap() .b .a0 .lock() .unwrap(), ); swap(&mut *self.a0.lock().unwrap(), &mut *self.a2.lock().unwrap()); swap( &mut *SMUGGLED_POINTERS[0].lock().unwrap(), &mut *SMUGGLED_POINTERS[1] .lock() .unwrap() .as_ref() .unwrap() .b .a1 .lock() .unwrap(), ); swap(&mut *self.a1.lock().unwrap(), &mut *self.a3.lock().unwrap()); } self.a2.accept(visitor)?; self.a3.accept(visitor)?; // smuggle out some pointers if n_prior_visits == 0 { println!("smuggling..."); *SMUGGLED_POINTERS[0].lock().unwrap() = take(&mut *self.a2.lock().unwrap()); *SMUGGLED_POINTERS[1].lock().unwrap() = take(&mut *self.a3.lock().unwrap()); } Ok(()) } } impl Drop for B { fn drop(&mut self) { B_DROP_DETECT.fetch_add(1, Ordering::Relaxed); } } static SMUGGLED_POINTERS: [Mutex>>; 2] = [Mutex::new(None), Mutex::new(None)]; static B_VISIT_COUNT: AtomicUsize = AtomicUsize::new(0); static B_DROP_DETECT: AtomicUsize = AtomicUsize::new(0); let a = Gc::new(A { b: Gc::new(B { a0: Mutex::new(None), a1: Mutex::new(None), a2: Mutex::new(None), a3: Mutex::new(None), }), }); *a.b.a0.lock().unwrap() = Some(a.clone()); *a.b.a1.lock().unwrap() = Some(a.clone()); *a.b.a2.lock().unwrap() = Some(a.clone()); *a.b.a3.lock().unwrap() = Some(a.clone()); drop(a.clone()); collect(); println!("{}", CURRENT_TAG.load(Ordering::Relaxed)); assert!(dbg!(SMUGGLED_POINTERS[0].lock().unwrap().as_ref()).is_some()); assert!(SMUGGLED_POINTERS[1].lock().unwrap().as_ref().is_some()); println!("{}", B_VISIT_COUNT.load(Ordering::Relaxed)); assert_eq!(B_DROP_DETECT.load(Ordering::Relaxed), 0); drop(a); assert_eq!(B_DROP_DETECT.load(Ordering::Relaxed), 0); collect(); println!("{}", CURRENT_TAG.load(Ordering::Relaxed)); assert_eq!(B_DROP_DETECT.load(Ordering::Relaxed), 0); *SMUGGLED_POINTERS[0].lock().unwrap() = None; *SMUGGLED_POINTERS[1].lock().unwrap() = None; collect(); assert_eq!(B_DROP_DETECT.load(Ordering::Relaxed), 1); } #[test] #[should_panic = "Attempting to dereference Gc to already-deallocated object.This is caused by accessing a Gc during a Drop implementation, likely implying a bug in your code."] fn escape_dead_pointer() { static ESCAPED: Mutex>> = Mutex::new(None); struct Escape { x: u8, ptr: Mutex>>, } impl Drop for Escape { fn drop(&mut self) { let mut escaped_guard = ESCAPED.lock().unwrap(); if escaped_guard.is_none() { *escaped_guard = self.ptr.lock().unwrap().take(); } } } unsafe impl TraceWith for Escape { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.ptr.accept(visitor) } } let esc = Gc::new(Escape { x: 0, ptr: Mutex::new(None), }); *(*esc).ptr.lock().unwrap() = Some(esc.clone()); drop(esc); collect(); println!("{}", ESCAPED.lock().unwrap().as_ref().unwrap().x); } #[test] fn from_box() { let gc: Gc = Gc::from(Box::new(String::from("hello"))); // The `From>` implementation executes a different code path to // construct the `Gc`. // // Here we ensure that the metadata is initialized to a valid state. unsafe { let gc_box = gc.ptr.get().unwrap().as_ref(); assert_eq!(gc_box.strong.load(Ordering::SeqCst), 1); assert_eq!(gc_box.weak.load(Ordering::SeqCst), 0); } assert_eq!(&*gc, "hello"); } #[test] fn from_slice() { let gc: Gc<[String]> = Gc::from(&[String::from("hello"), String::from("world")][..]); // The `From<&[T]>` implementation executes a different code path to // construct the `Gc`. // // Here we ensure that the metadata is initialized to a valid state. unsafe { let gc_box = gc.ptr.get().unwrap().as_ref(); assert_eq!(gc_box.strong.load(Ordering::SeqCst), 1); assert_eq!(gc_box.weak.load(Ordering::SeqCst), 0); } assert_eq!(&*gc, ["hello", "world"]); } #[test] #[should_panic = "told you"] fn from_slice_panic() { struct MayPanicOnClone { value: String, panic: bool, } impl Clone for MayPanicOnClone { fn clone(&self) -> Self { assert!(!self.panic, "told you"); Self { value: self.value.clone(), panic: self.panic, } } } unsafe impl TraceWith for MayPanicOnClone { fn accept(&self, _: &mut V) -> Result<(), ()> { Ok(()) } } let slice: &[MayPanicOnClone] = &[ MayPanicOnClone { value: String::from("a"), panic: false, }, MayPanicOnClone { value: String::from("b"), panic: false, }, MayPanicOnClone { value: String::from("c"), panic: true, }, ]; let _: Gc<[MayPanicOnClone]> = Gc::from(slice); } #[test] fn from_vec() { let gc: Gc<[String]> = Gc::from(vec![String::from("hello"), String::from("world")]); // The `From>` implementation executes a different code path to // construct the `Gc`. // // Here we ensure that the metadata is initialized to a valid state. unsafe { let gc_box = gc.ptr.get().unwrap().as_ref(); assert_eq!(gc_box.strong.load(Ordering::SeqCst), 1); assert_eq!(gc_box.weak.load(Ordering::SeqCst), 0); } assert_eq!(&*gc, ["hello", "world"]); } #[test] fn make_mut() { let mut a = Gc::new(42); let mut b = a.clone(); let mut c = b.clone(); assert_eq!(*Gc::make_mut(&mut a), 42); assert_eq!(*Gc::make_mut(&mut b), 42); assert_eq!(*Gc::make_mut(&mut c), 42); *Gc::make_mut(&mut a) += 1; *Gc::make_mut(&mut b) += 2; *Gc::make_mut(&mut c) += 3; assert_eq!(*a, 43); assert_eq!(*b, 44); assert_eq!(*c, 45); // they should all be unique assert_eq!(Gc::ref_count(&a).get(), 1); assert_eq!(Gc::ref_count(&b).get(), 1); assert_eq!(Gc::ref_count(&c).get(), 1); } #[test] fn make_mut_2() { let mut a = Gc::new(42); let b = a.clone(); let c = b.clone(); assert_eq!(*a, 42); assert_eq!(*b, 42); assert_eq!(*c, 42); *Gc::make_mut(&mut a) += 1; assert_eq!(*a, 43); assert_eq!(*b, 42); assert_eq!(*c, 42); // a should be unique // b and c should share their object assert_eq!(Gc::ref_count(&a).get(), 1); assert_eq!(Gc::ref_count(&b).get(), 2); assert_eq!(Gc::ref_count(&c).get(), 2); } #[test] fn make_mut_of_object_in_dumpster() { #[derive(Clone)] struct Foo { // just some gc pointer so foo lands in the dumpster something: Gc, } unsafe impl TraceWith for Foo { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.something.accept(visitor) } } let mut foo = Gc::new(Foo { something: Gc::new(5), }); drop(foo.clone()); // now foo is in the dumpster // and its ref count is one assert_eq!(Gc::ref_count(&foo).get(), 1); // we get a mut reference let foo_mut = Gc::make_mut(&mut foo); // now we collect garbage while we're also holding onto a mutable reference to foo // if foo is still in the dumpster then the collection will dereference it and cause UB collect(); // we need to do something with `foo_mut` here so the mutable borrow is actually held // during collection assert_eq!(*foo_mut.something, 5); } #[test] #[should_panic = "panic on visit"] #[cfg_attr(miri, ignore = "intentionally leaks memory")] fn panic_visit() { #[expect(unused)] struct PanicVisit(Gc); /// We technically can make it part of the contract for `Trace` to reject panicking impls, /// but it is good form to accept these even though they are malformed. unsafe impl TraceWith for PanicVisit { fn accept(&self, _: &mut V) -> Result<(), ()> { panic!("panic on visit"); } } let gc = Gc::new_cyclic(PanicVisit); let _ = gc.clone(); drop(gc); collect(); } #[test] /// Test that creating a `Gc` during a `Drop` implementation will still not leak the `Gc`. fn sync_leak_by_creation_in_drop() { static BAR_DROP_COUNT: AtomicUsize = AtomicUsize::new(0); struct Foo(OnceLock>); struct Bar(OnceLock>); unsafe impl TraceWith for Foo { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.0.accept(visitor) } } unsafe impl TraceWith for Bar { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.0.accept(visitor) } } impl Drop for Foo { fn drop(&mut self) { let gcbar = Gc::new(Bar(OnceLock::new())); let _ = gcbar.0.set(gcbar.clone()); drop(gcbar); crate::sync::collect::deliver_dumpster(); // needed to prevent allocation from being // lost in other thread } } impl Drop for Bar { fn drop(&mut self) { BAR_DROP_COUNT.fetch_add(1, Ordering::Relaxed); } } let foo = Gc::new(Foo(OnceLock::new())); let _ = foo.0.set(foo.clone()); drop(foo); collect(); // causes Bar to be created and then leaked collect(); // cleans up Bar (eventually) assert!(super::collect::DUMPSTER.with(|d| d.contents.borrow().is_empty())); assert_eq!(BAR_DROP_COUNT.load(Ordering::Relaxed), 1); } #[test] fn custom_trait_object() { trait MyTrait: Trace + Send + Sync {} impl MyTrait for T {} let gc = Gc::new(5i32); let gc: Gc = coerce_gc!(gc); _ = gc; } #[test] fn new_cyclic_simple() { struct Cycle(Gc); unsafe impl TraceWith for Cycle { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.0.accept(visitor) } } let gc = Gc::new_cyclic(Cycle); assert_eq!(Gc::ref_count(&gc).get(), 2); drop(gc); } #[test] #[should_panic = "told you"] fn panic_new_cyclic() { let _ = Gc::<()>::new_cyclic(|_| panic!("told you")); } #[test] fn gc_from_iter() { let _gc = (0..100).collect::>(); } #[test] fn self_referential_from_iter() { struct Ab { a: Gc, b: Gc, } unsafe impl TraceWith for Ab { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.a.accept(visitor)?; self.b.accept(visitor)?; Ok(()) } } let mut gcs = Vec::>::new(); gcs.push(Gc::new_cyclic(|a: Gc| Ab { a: a.clone(), b: a })); for _ in 0..10 { let b = gcs.last().unwrap().clone(); gcs.push(Gc::new_cyclic(|a: Gc| Ab { a, b })); } let _big_gc = gcs.into_iter().collect::>(); } ================================================ FILE: dumpster/src/unsync/collect.rs ================================================ /* dumpster, a cycle-tracking garbage collector for Rust. Copyright (C) 2023 Clayton Ramsey. This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! Implementations of the single-threaded garbage-collection logic. use std::{ alloc::{dealloc, Layout}, cell::{Cell, RefCell}, collections::hash_map::Entry, mem::take, num::NonZeroUsize, ptr::{drop_in_place, NonNull}, }; use foldhash::{HashMap, HashMapExt, HashSet, HashSetExt}; use crate::{ ptr::Erased, unsync::{default_collect_condition, CollectInfo, Gc}, Trace, Visitor, }; use super::{CollectCondition, GcBox}; thread_local! { /// Whether the current thread is running a cleanup process. static COLLECTING: Cell = const { Cell::new(false) }; /// The global collection of allocation information for this thread. pub(super) static DUMPSTER: Dumpster = Dumpster { to_collect: RefCell::new(HashMap::new()), n_ref_drops: Cell::new(0), n_refs_living: Cell::new(0), collect_condition: Cell::new(default_collect_condition), }; } /// A dumpster is a collection of all the garbage that may or may not need to be cleaned up. /// It also contains information relevant to when a cleanup should be triggered. pub(super) struct Dumpster { /// A map from allocation IDs for allocations which may need to be collected to pointers to /// their allocations. to_collect: RefCell>, /// The number of times a reference has been dropped since the last collection was triggered. pub n_ref_drops: Cell, /// The number of references that currently exist in the entire heap and stack. pub n_refs_living: Cell, /// The function for determining whether a collection should be run. pub collect_condition: Cell, } #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] /// A unique identifier for an allocated garbage-collected block. /// /// It contains a pointer to the reference count of the allocation. struct AllocationId(pub NonNull>); impl From>> for AllocationId where T: Trace + ?Sized, { /// Get an allocation ID from a pointer to an allocation. fn from(value: NonNull>) -> Self { AllocationId(value.cast()) } } #[derive(Debug)] /// The necessary information required to collect some garbage-collected data. /// This data is stored in a map from allocation IDs to the necessary cleanup operation. struct Cleanup { /// The function which is called to build the reference graph and find all allocations /// reachable from this allocation. dfs_fn: unsafe fn(Erased, &mut Dfs), /// The function which is called to mark descendants of this allocation as reachable. mark_fn: unsafe fn(Erased, &mut Mark), /// A function used for dropping the allocation. drop_fn: unsafe fn(Erased, &mut DropAlloc<'_>), /// An erased pointer to the allocation. ptr: Erased, } /// Creates a function that applies a visitor to some erased pointer. /// /// # Safety /// /// `T` must be the same type that `ptr` was created with via [`Erased::new`]. macro_rules! apply_visitor { () => { |ptr, visitor| unsafe { _ = ptr.specify::>().as_ref().value.accept(visitor); } }; } impl Cleanup { /// Construct a new cleanup for an allocation. fn new(box_ptr: NonNull>) -> Cleanup { Cleanup { dfs_fn: apply_visitor!(), mark_fn: apply_visitor!(), drop_fn: drop_assist::, ptr: Erased::new(box_ptr), } } } impl Dumpster { /// Collect all unreachable allocations that this dumpster is responsible for. pub fn collect_all(&self) { if COLLECTING.get() { return; // Do not double-collect. } self.n_ref_drops.set(0); unsafe { let mut dfs = Dfs { visited: HashSet::with_capacity(self.to_collect.borrow().len()), ref_graph: HashMap::with_capacity(self.to_collect.borrow().len()), }; for (k, v) in &*self.to_collect.borrow() { if dfs.visited.insert(*k) { (v.dfs_fn)(v.ptr, &mut dfs); } } let mut mark = Mark { visited: HashSet::with_capacity(dfs.visited.len()), }; for (id, reachability) in dfs .ref_graph .iter() .filter(|(_, reachability)| reachability.n_unaccounted != 0) { mark.visited.insert(*id); (reachability.mark_fn)(reachability.ptr, &mut mark); } // any allocations which we didn't find must also be roots for (id, cleanup) in self .to_collect .borrow() .iter() .filter(|(id, _)| !dfs.ref_graph.contains_key(id)) { mark.visited.insert(*id); (cleanup.mark_fn)(cleanup.ptr, &mut mark); } dfs.visited.clear(); let mut decrementer = DropAlloc { visited: dfs.visited, reachable: &mark.visited, }; COLLECTING.set(true); // Do not hold mutable reference, as it is possible for a `Gc` to be marked dirty during // collection by dropping. let mut collectees = take(&mut *self.to_collect.borrow_mut()); for cleanup in collectees .drain() .filter_map(|(id, cleanup)| (!mark.visited.contains(&id)).then_some(cleanup)) { (cleanup.drop_fn)(cleanup.ptr, &mut decrementer); } COLLECTING.set(false); assert!(collectees.is_empty()); let mut new_to_collect = self.to_collect.borrow_mut(); if new_to_collect.is_empty() { // Reuse allocation from `collectees` *new_to_collect = collectees; } } } /// Mark an allocation as "dirty," implying that it may need to be swept through later to find /// out if it has any references pointing to it. pub fn mark_dirty(&self, box_ptr: NonNull>) { self.to_collect .borrow_mut() .entry(AllocationId::from(box_ptr)) .or_insert_with(|| Cleanup::new(box_ptr)); } /// Mark an allocation as "cleaned," implying that the allocation is about to be destroyed and /// therefore should not be cleaned up later. pub fn mark_cleaned(&self, box_ptr: NonNull>) { self.to_collect .borrow_mut() .remove(&AllocationId::from(box_ptr)); } /// Notify the dumpster that a garbage-collected pointer has been dropped. /// /// This may trigger a cleanup of the heap, but is guaranteed to be amortized to _O(1)_. pub fn notify_dropped_gc(&self) { self.n_ref_drops.set(self.n_ref_drops.get() + 1); let old_refs_living = self.n_refs_living.get(); assert_ne!( old_refs_living, 0, "underflow on unsync::Gc number of living Gcs" ); self.n_refs_living.set(old_refs_living - 1); // check if it's been a long time since the last time we collected all // the garbage. // if so, go and collect it all again (amortized O(1)) if (self.collect_condition.get())(&CollectInfo { _private: () }) { self.collect_all(); } } /// Notify the dumpster that a new [`Gc`] has been created. pub fn notify_created_gc(&self) { self.n_refs_living.set(self.n_refs_living.get() + 1); } } impl Drop for Dumpster { fn drop(&mut self) { // cleanup any leftover allocations self.collect_all(); } } /// The data required to construct the graph of reachable allocations. pub(super) struct Dfs { /// The set of allocations which have already been visited. visited: HashSet, /// A map from allocation identifiers to information about their reachability. ref_graph: HashMap, } #[derive(Debug)] /// Information about the reachability of a structure. struct Reachability { /// The number of unaccounted-for references to this allocation. /// If this number is 0, the reference is not a root. n_unaccounted: usize, /// An erased pointer to the allocation under concern. ptr: Erased, /// A function used to mark descendants of this allocation as accessible. mark_fn: unsafe fn(Erased, &mut Mark), } impl Visitor for Dfs { fn visit_sync(&mut self, _: &crate::sync::Gc) where T: Trace + Send + Sync + ?Sized, { // because `Gc` is `!Sync`, we know we won't find a `Gc` this way and can return // immediately. } fn visit_unsync(&mut self, gc: &Gc) where T: Trace + ?Sized, { if Gc::is_dead(gc) { return; } let ptr = gc.ptr.get().unwrap(); let next_id = AllocationId::from(ptr); match self.ref_graph.entry(next_id) { Entry::Occupied(ref mut o) => { o.get_mut().n_unaccounted -= 1; } Entry::Vacant(v) => { v.insert(Reachability { n_unaccounted: unsafe { next_id.0.as_ref().get().get() - 1 }, ptr: Erased::new(ptr), mark_fn: apply_visitor!(), }); } } if self.visited.insert(next_id) { let _ = unsafe { ptr.as_ref() }.value.accept(self); } } } /// A mark traversal, which marks allocations as reachable. pub(super) struct Mark { /// The set of allocations which have been marked as reachable. visited: HashSet, } impl Visitor for Mark { fn visit_sync(&mut self, _: &crate::sync::Gc) where T: Trace + Send + Sync + ?Sized, { // because `Gc` is `!Sync`, we know we won't find a `Gc` this way and can return // immediately. } fn visit_unsync(&mut self, gc: &Gc) where T: Trace + ?Sized, { if Gc::is_dead(gc) { return; } let ptr = gc.ptr.get().unwrap(); if self.visited.insert(AllocationId::from(ptr)) { let _ = unsafe { ptr.as_ref().value.accept(self) }; } } } /// A visitor for dropping allocations. pub(super) struct DropAlloc<'a> { /// The set of unreachable allocations we've already visited. visited: HashSet, /// The set of unreachable allocations. reachable: &'a HashSet, } impl Visitor for DropAlloc<'_> { fn visit_sync(&mut self, _: &crate::sync::Gc) where T: Trace + Send + Sync + ?Sized, { // do nothing } fn visit_unsync(&mut self, gc: &Gc) where T: Trace + ?Sized, { if Gc::is_dead(gc) { return; } let ptr = gc.ptr.get().unwrap(); let id = AllocationId::from(ptr); gc.kill(); if self.reachable.contains(&id) { unsafe { let cell_ref = &ptr.as_ref().ref_count; cell_ref.set(NonZeroUsize::new(cell_ref.get().get() - 1).expect( "reachable allocation cannot be rendered unreachable by deleting lost alloc", )); } return; } if self.visited.insert(id) { unsafe { ptr.as_ref().value.accept(self).unwrap(); let layout = Layout::for_value(ptr.as_ref()); drop_in_place(ptr.as_ptr()); dealloc(ptr.as_ptr().cast(), layout); } } } } /// Decrement the outbound reference counts for any reachable allocations which this allocation can /// find. /// Also, drop the allocation when done. unsafe fn drop_assist(ptr: Erased, visitor: &mut DropAlloc<'_>) { let mut spec = ptr.specify::>(); if visitor.visited.insert(AllocationId::from(spec)) { spec.as_ref().value.accept(visitor).unwrap(); let mut_spec = spec.as_mut(); let layout = Layout::for_value(mut_spec); drop_in_place(mut_spec); dealloc(std::ptr::from_mut::>(mut_spec).cast(), layout); } } ================================================ FILE: dumpster/src/unsync/mod.rs ================================================ /* dumpster, a cycle-tracking garbage collector for Rust. Copyright (C) 2023 Clayton Ramsey. This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! Thread-local garbage collection. //! //! Most users of this library will want to direct their attention to [`Gc`]. //! If you want to tune the garbage collector's cleanup frequency, take a look at //! [`set_collect_condition`]. //! //! # Examples //! //! ``` //! use dumpster::{unsync::Gc, Trace}; //! use std::cell::RefCell; //! //! #[derive(Trace)] //! struct Foo { //! refs: RefCell>>, //! } //! //! let foo = Gc::new(Foo { //! refs: RefCell::new(Vec::new()), //! }); //! //! // If you had used `Rc`, this would be a memory leak. //! // `Gc` can collect it, though! //! foo.refs.borrow_mut().push(foo.clone()); //! ``` use crate::{ contains_gcs, panic_deref_of_collected_object, ptr::Nullable, Trace, TraceWith, Visitor, }; use std::{ alloc::{dealloc, handle_alloc_error, Layout}, any::TypeId, borrow::{Borrow, Cow}, cell::Cell, fmt::Display, mem::{self, ManuallyDrop, MaybeUninit}, num::NonZeroUsize, ops::Deref, ptr::{self, addr_of, addr_of_mut, drop_in_place, NonNull}, slice, }; use self::collect::{Dfs, DropAlloc, Dumpster, Mark, DUMPSTER}; mod collect; #[cfg(test)] mod tests; /// Allows tracing with all unsync visitors. #[expect(private_bounds)] pub(crate) trait TraceUnsync: TraceWith + TraceWith + for<'a> TraceWith> + TraceWith { } impl TraceUnsync for T where T: ?Sized + TraceWith + TraceWith + for<'a> TraceWith> + TraceWith { } #[derive(Debug)] /// A garbage-collected pointer. /// /// This garbage-collected pointer may be used for data which is not safe to share across threads /// (such as a [`std::cell::RefCell`]). /// It can also be used for variably sized data. /// /// # Examples /// /// ``` /// use dumpster::unsync::Gc; /// /// let x: Gc = Gc::new(3); /// /// println!("{}", *x); // prints '3' /// // x is then freed automatically! /// ``` /// /// # Interaction with `Drop` /// /// While collecting cycles, it's possible for a `Gc` to exist that points to some deallocated /// object. /// To prevent undefined behavior, these `Gc`s are marked as dead during collection and rendered /// inaccessible. /// Dereferencing or cloning a `Gc` during the `Drop` implementation of a `Trace` type could /// result in the program panicking to keep the program from accessing memory after freeing it. /// If you're accessing a `Gc` during a `Drop` implementation, make sure to use the fallible /// operations [`Gc::try_deref`] and [`Gc::try_clone`]. pub struct Gc { /// A pointer to the heap allocation containing the data under concern. /// The pointee box should never be mutated. /// /// If `ptr` is `None`, then this is a dead `Gc`, meaning that the allocation it points to has /// been dropped. /// This can only happen observably if this `Gc` is accessed during the [`Drop`] implementation /// of a [`Trace`] type. ptr: Cell>>, } /// Collect all existing unreachable allocations. /// /// This operation is most useful for making sure that the `Drop` implementation for some data has /// been called before moving on (such as for a file handle or mutex guard), because the garbage /// collector is not eager under normal conditions. /// This only collects the allocations local to the caller's thread. /// /// # Examples /// /// ``` /// # fn main() -> Result<(), Box> { /// use dumpster::unsync::{collect, Gc}; /// use std::sync::Mutex; /// /// static MY_MUTEX: Mutex<()> = Mutex::new(()); /// /// let guard_gc = Gc::new(MY_MUTEX.lock()?); /// drop(guard_gc); /// // We're not certain that the handle that was contained in `guard_gc` has been dropped, so we /// // should force a collection to make sure. /// collect(); /// /// // We know this won't cause a deadlock because we made sure to run a collection. /// let _x = MY_MUTEX.lock()?; /// # Ok(()) /// # } /// ``` pub fn collect() { _ = DUMPSTER.try_with(Dumpster::collect_all); } /// Information passed to a [`CollectCondition`] used to determine whether the garbage collector /// should start collecting. pub struct CollectInfo { /// Dummy value so this is a private structure. _private: (), } /// A function which determines whether the garbage collector should start collecting. /// This function primarily exists so that it can be used with [`set_collect_condition`]. /// /// # Examples /// /// ```rust /// use dumpster::unsync::{set_collect_condition, CollectInfo}; /// /// fn always_collect(_: &CollectInfo) -> bool { /// true /// } /// /// set_collect_condition(always_collect); /// ``` pub type CollectCondition = fn(&CollectInfo) -> bool; #[must_use] /// The default collection condition used by the garbage collector. /// /// There are no guarantees about what this function returns, other than that it will return `true` /// with sufficient frequency to ensure that all `Gc` operations are amortized _O(1)_ in runtime. /// /// This function isn't really meant to be called by users, but rather it's supposed to be handed /// off to [`set_collect_condition`] to return to the default operating mode of the library. /// /// This collection condition applies locally, i.e. only to this thread. /// If you want it to apply globally, you'll have to update it every time you spawn a thread. /// /// # Examples /// /// ```rust /// use dumpster::unsync::{default_collect_condition, set_collect_condition}; /// /// set_collect_condition(default_collect_condition); /// ``` pub fn default_collect_condition(info: &CollectInfo) -> bool { info.n_gcs_dropped_since_last_collect() > info.n_gcs_existing() } /// Set the function which determines whether the garbage collector should be run. /// /// `f` will be periodically called by the garbage collector to determine whether it should perform /// a full cleanup of the heap. /// When `f` returns true, a cleanup will begin. /// /// # Examples /// /// ``` /// use dumpster::unsync::{set_collect_condition, CollectInfo}; /// /// /// This function will make sure a GC cleanup never happens unless directly activated. /// fn never_collect(_: &CollectInfo) -> bool { /// false /// } /// /// set_collect_condition(never_collect); /// ``` pub fn set_collect_condition(f: CollectCondition) { _ = DUMPSTER.try_with(|d| d.collect_condition.set(f)); } #[repr(C)] // This is only public to make the `unsync_coerce_gc` macro work. #[doc(hidden)] /// The underlying heap allocation for a [`Gc`]. pub struct GcBox { /// The number of extant references to this garbage-collected data. ref_count: Cell, /// The stored value inside this garbage-collected box. value: T, } impl Gc { /// Construct a new garbage-collected allocation, with `value` as its value. /// /// # Examples /// /// ``` /// use dumpster::unsync::Gc; /// /// let gc = Gc::new(0); /// ``` pub fn new(value: T) -> Gc where T: Sized, { _ = DUMPSTER.try_with(Dumpster::notify_created_gc); Gc { ptr: Cell::new(Nullable::new(NonNull::from(Box::leak(Box::new(GcBox { ref_count: Cell::new(NonZeroUsize::MIN), value, }))))), } } /// Construct a self-referencing `Gc`. /// /// `new_cyclic` first allocates memory for `T`, then constructs a dead `Gc` pointing to the /// allocation. The dead `Gc` is then passed to `data_fn` to construct a value of `T`, which /// is stored in the allocation. Finally, `new_cyclic` will update the dead self-referential /// `Gc`s and rehydrate them to produce the final value. /// /// # Panics /// /// If `data_fn` panics, the panic is propagated to the caller. /// The allocation is cleaned up normally. /// /// Additionally, if, when attempting to rehydrate the `Gc` members of `F`, the visitor fails to /// reach a `Gc`, this function will panic and reserve the allocation to be cleaned up /// later. /// /// # Notes on safety /// /// Incorrect implementations of `data_fn` may have unusual or strange results. /// Although `dumpster` guarantees that it will be safe, and will do its best to ensure correct /// results, it is generally unwise to allow dead `Gc`s to exist for long. /// If you implement `data_fn` wrong, this may cause panics later on inside of the collection /// process. /// /// # Examples /// /// ``` /// use dumpster::{unsync::Gc, Trace}; /// /// #[derive(Trace)] /// struct Cycle { /// this: Gc, /// } /// /// let gc = Gc::new_cyclic(|this| Cycle { this }); /// assert!(Gc::ptr_eq(&gc, &gc.this)); /// ``` pub fn new_cyclic) -> T>(data_fn: F) -> Self where T: Sized, { /// A struct containing an uninitialized value of `T`. /// May only be used inside `new_cyclic`. #[repr(transparent)] struct Uninitialized(MaybeUninit); unsafe impl TraceWith for Uninitialized { fn accept(&self, _: &mut V) -> Result<(), ()> { Ok(()) } } /// Data structure for cleaning up the allocation in case we panic along the way. struct CleanUp { /// Is `true` if the [`GcBox::value`] is initialized. initialized: bool, /// Pointer to the `GcBox` with a maybe uninitialized value. ptr: NonNull>, } impl Drop for CleanUp { fn drop(&mut self) { if self.initialized { // push this `Gc` into the destruction queue _ = DUMPSTER.try_with(|d| d.mark_dirty(self.ptr)); } else { // deallocate unsafe { dealloc( self.ptr.as_ptr().cast::(), Layout::for_value(self.ptr.as_ref()), ); } } } } // make an uninitialized allocation _ = DUMPSTER.try_with(Dumpster::notify_created_gc); let mut gcbox = NonNull::from(Box::leak(Box::new(GcBox { ref_count: Cell::new(NonZeroUsize::MIN), value: Uninitialized(MaybeUninit::::uninit()), }))); let mut cleanup = CleanUp { ptr: gcbox, initialized: false, }; // nilgc is a dead Gc let nilgc = Gc { ptr: Cell::new(Nullable::new(gcbox.cast::>()).as_null()), }; assert!(Gc::is_dead(&nilgc)); unsafe { // SAFETY: `gcbox` is a valid pointer to an uninitialized datum that we have allocated. gcbox.as_mut().value = Uninitialized(MaybeUninit::new(data_fn(nilgc))); } cleanup.initialized = true; let gcbox = gcbox.cast::>(); let res = unsafe { // SAFETY: the above unsafe block correctly constructed the Uninitialized value, so it // is safe to cast `gcbox` and then construct a reference. gcbox.as_ref().value.accept(&mut Rehydrate { ptr: Nullable::new(gcbox.cast()), type_id: TypeId::of::(), }) }; assert!( res.is_ok(), "visitor must be able to access all Gc fields of structure when rehydrating dead Gcs" ); let gc = Gc { ptr: Cell::new(Nullable::new(gcbox)), }; let _ = ManuallyDrop::new(cleanup); gc } /// Attempt to dereference this `Gc`. /// /// This function will return `None` if `self` is a "dead" `Gc`, which points to an /// already-deallocated object. /// This can only occur if a `Gc` is accessed during the `Drop` implementation of a /// [`Trace`] object. /// /// For a version which panics instead of returning `None`, consider using [`Deref`]. /// /// # Examples /// /// For a still-living `Gc`, this always returns `Some`. /// /// ``` /// use dumpster::unsync::Gc; /// /// let gc1 = Gc::new(0); /// assert!(Gc::try_deref(&gc1).is_some()); /// ``` /// /// The only way to get a `Gc` which fails on `try_clone` is by accessing a `Gc` during its /// `Drop` implementation. /// /// ``` /// use dumpster::{unsync::Gc, Trace}; /// /// #[derive(Trace)] /// struct Cycle(Gc); /// /// impl Drop for Cycle { /// fn drop(&mut self) { /// let maybe_ref = Gc::try_deref(&self.0); /// assert!(maybe_ref.is_none()); /// } /// } /// /// let gc1 = Gc::new_cyclic(|this| Cycle(this)); /// # drop(gc1); /// # dumpster::unsync::collect(); /// ``` pub fn try_deref(gc: &Gc) -> Option<&T> { (!gc.ptr.get().is_null()).then(|| &**gc) } /// Attempt to clone this `Gc`. /// /// This function will return `None` if `self` is a "dead" `Gc`, which points to an /// already-deallocated object. /// This can only occur if a `Gc` is accessed during the `Drop` implementation of a /// [`Trace`] object. /// /// For a version which panics instead of returning `None`, consider using [`Clone`]. /// /// # Examples /// /// For a still-living `Gc`, this always returns `Some`. /// /// ``` /// use dumpster::unsync::Gc; /// /// let gc1 = Gc::new(0); /// let gc2 = Gc::try_clone(&gc1).unwrap(); /// ``` /// /// The only way to get a `Gc` which fails on `try_clone` is by accessing a `Gc` during its /// `Drop` implementation. /// /// ``` /// use dumpster::{unsync::Gc, Trace}; /// /// #[derive(Trace)] /// struct Cycle(Gc); /// /// impl Drop for Cycle { /// fn drop(&mut self) { /// let cloned = Gc::try_clone(&self.0); /// assert!(cloned.is_none()); /// } /// } /// /// let gc1 = Gc::new_cyclic(|this| Cycle(this)); /// # drop(gc1); /// # dumpster::unsync::collect(); /// ``` pub fn try_clone(gc: &Gc) -> Option> { (!gc.ptr.get().is_null()).then(|| gc.clone()) } /// Provides a raw pointer to the data. /// /// Panics if `self` is a "dead" `Gc`, /// which points to an already-deallocated object. /// This can only occur if a `Gc` is accessed during the `Drop` implementation of a /// [`Trace`] object. /// /// # Examples /// /// ``` /// use dumpster::unsync::Gc; /// let x = Gc::new("hello".to_owned()); /// let y = Gc::clone(&x); /// let x_ptr = Gc::as_ptr(&x); /// assert_eq!(x_ptr, Gc::as_ptr(&x)); /// assert_eq!(unsafe { &*x_ptr }, "hello"); /// ``` pub fn as_ptr(gc: &Gc) -> *const T { let ptr = NonNull::as_ptr(gc.ptr.get().unwrap()); unsafe { addr_of_mut!((*ptr).value) } } /// Determine whether two `Gc`s are equivalent by reference. /// Returns `true` if both `this` and `other` point to the same value, in the same style as /// [`std::ptr::eq`]. /// /// # Examples /// /// ``` /// use dumpster::unsync::Gc; /// /// let gc1 = Gc::new(0); /// let gc2 = Gc::clone(&gc1); // points to same spot as `gc1` /// let gc3 = Gc::new(0); // same value, but points to a different object than `gc1` /// /// assert!(Gc::ptr_eq(&gc1, &gc2)); /// assert!(!Gc::ptr_eq(&gc1, &gc3)); /// ``` pub fn ptr_eq(this: &Gc, other: &Gc) -> bool { this.ptr.get().as_option() == other.ptr.get().as_option() } /// Get the number of references to the value pointed to by this `Gc`. /// /// This does not include internal references generated by the garbage collector. /// /// # Panics /// /// This function may panic if the `Gc` whose reference count we are loading is "dead" (i.e. /// generated through a `Drop` implementation). For further reference, take a look at /// [`Gc::is_dead`]. /// /// # Examples /// /// ``` /// use dumpster::unsync::Gc; /// /// let gc = Gc::new(()); /// assert_eq!(Gc::ref_count(&gc).get(), 1); /// let gc2 = gc.clone(); /// assert_eq!(Gc::ref_count(&gc).get(), 2); /// drop(gc); /// drop(gc2); /// ``` pub fn ref_count(gc: &Self) -> NonZeroUsize { let box_ptr = gc.ptr.get().expect( "Attempt to dereference Gc to already-collected object. \ This means a Gc escaped from a Drop implementation, likely implying a bug in your code.", ); let box_ref = unsafe { box_ptr.as_ref() }; box_ref.ref_count.get() } /// Determine whether this is a dead `Gc`. /// /// A `Gc` is dead if it does not point to a valid value. /// Such a `Gc` can only be made in one of two ways: first, if a `Gc` is accessed during the /// `Drop` implementation of a structure, and second, if a `Gc` leaks out of [`Gc::new_cyclic`]. /// /// # Examples /// /// ``` /// use dumpster::{unsync::Gc, Trace}; /// /// #[derive(Trace)] /// struct Cycle(Gc); /// /// impl Drop for Cycle { /// fn drop(&mut self) { /// assert!(Gc::is_dead(&self.0)); /// } /// } /// /// let gc1 = Gc::new_cyclic(|this| Cycle(this)); /// # drop(gc1); /// # dumpster::unsync::collect(); /// ``` pub fn is_dead(gc: &Self) -> bool { gc.ptr.get().is_null() } /// Consumes the `Gc`, returning the inner `GcBox` pointer. #[inline] #[must_use] fn into_ptr(this: Self) -> *const GcBox { let this = ManuallyDrop::new(this); this.ptr.get().as_ptr() } /// Constructs a `Gc` from the innner `GcBox` pointer. #[inline] #[must_use] unsafe fn from_ptr(ptr: *const GcBox) -> Self { Self { ptr: Cell::new(Nullable::from_ptr(ptr.cast_mut())), } } /// Exists solely for the [`coerce_gc`] macro. #[inline] #[must_use] #[doc(hidden)] pub fn __private_into_ptr(this: Self) -> *const GcBox { Self::into_ptr(this) } /// Exists solely for the [`coerce_gc`] macro. #[inline] #[must_use] #[doc(hidden)] pub unsafe fn __private_from_ptr(ptr: *const GcBox) -> Self { Self::from_ptr(ptr) } /// Kill this `Gc`, replacing it with a dead `Gc`. fn kill(&self) { self.ptr.set(self.ptr.get().as_null()); } } /// A struct for converting dead `Gc`s into live ones. /// /// This is used in [`Gc::new_cyclic`]. pub(super) struct Rehydrate { /// The pointer to the currently hydrating [`GcBox`]. ptr: Nullable>, /// The [`TypeId`] of `T` in `Gc` to be hydrated. type_id: TypeId, } impl Visitor for Rehydrate { fn visit_sync(&mut self, _: &crate::sync::Gc) where T: Trace + Send + Sync + ?Sized, { } fn visit_unsync(&mut self, gc: &Gc) where T: Trace + ?Sized, { if Gc::is_dead(gc) && TypeId::of::() == self.type_id { unsafe { // SAFETY: it is safe to transmute these pointers because we have checked // that they are of the same type. // Additionally, the `GcBox` has been fully initialized, so it is safe to // create a reference here. let cell_ptr = (&raw const gc.ptr).cast::>>>(); (*cell_ptr).set(self.ptr); let box_ref = &*self.ptr.as_ptr(); box_ref .ref_count .set(box_ref.ref_count.get().saturating_add(1)); _ = DUMPSTER.try_with(Dumpster::notify_created_gc); } } } } impl Gc { /// Makes a mutable reference to the given `Gc`. /// /// If there are other `Gc` pointers to the same allocation, then `make_mut` will /// [`clone`] the inner value to a new allocation to ensure unique ownership. This is also /// referred to as clone-on-write. /// /// [`clone`]: Clone::clone /// /// # Panics /// /// This function may panic if the `Gc` whose reference count we are loading is "dead" (i.e. /// generated through a `Drop` implementation). For further reference, take a look at /// [`Gc::is_dead`]. /// /// # Examples /// /// ``` /// use dumpster::unsync::Gc; /// /// let mut data = Gc::new(5); /// /// *Gc::make_mut(&mut data) += 1; // Won't clone anything /// let mut other_data = Gc::clone(&data); // Won't clone inner data /// *Gc::make_mut(&mut data) += 1; // Clones inner data /// *Gc::make_mut(&mut data) += 1; // Won't clone anything /// *Gc::make_mut(&mut other_data) *= 2; // Won't clone anything /// /// // Now `data` and `other_data` point to different allocations. /// assert_eq!(*data, 8); /// assert_eq!(*other_data, 12); /// ``` #[inline] pub fn make_mut(this: &mut Self) -> &mut T { if Gc::is_dead(this) { panic_deref_of_collected_object(); } // SAFETY: we checked above that the object is alive (not null) let ptr = unsafe { this.ptr.get().unwrap_unchecked() }; let box_ref = unsafe { ptr.as_ref() }; if box_ref.ref_count.get() == NonZeroUsize::MIN { // The dumpster must not contain this allocation while we hold // a mutable reference to its value because on collection // it would dereference the value to trace it. _ = DUMPSTER.try_with(|d| d.mark_cleaned(ptr)); } else { // We don't have unique access to the value so we need to clone it. *this = Gc::new(box_ref.value.clone()); } // SAFETY: we have exclusive access to this `GcBox` because we ensured // that the ref count is 1 and that there are no loose pointers in the // `to_collect` buffer of this thread's dumpster. unsafe { &mut (*this.ptr.get_mut().as_ptr()).value } } } impl Gc { /// Allocates an `GcBox` with sufficient space for /// a value of the provided layout. /// /// The function `mem_to_gc_box` is called with the data pointer /// and must return back a pointer for the `GcBox`. unsafe fn allocate_for_layout( value_layout: Layout, mem_to_gc_box: impl FnOnce(*mut u8) -> *mut GcBox, ) -> *mut GcBox { let layout = Layout::new::>() .extend(value_layout) .unwrap() .0 .pad_to_align(); Self::allocate_for_layout_of_box(layout, mem_to_gc_box) } /// Allocates an `GcBox` with the given layout. /// /// The function `mem_to_gc_box` is called with the data pointer /// and must return back a pointer for the `GcBox`. unsafe fn allocate_for_layout_of_box( layout: Layout, mem_to_gc_box: impl FnOnce(*mut u8) -> *mut GcBox, ) -> *mut GcBox { // SAFETY: layout has non-zero size because of the `ref_count` field let ptr = unsafe { std::alloc::alloc(layout) }; if ptr.is_null() { handle_alloc_error(layout); } let inner = mem_to_gc_box(ptr); unsafe { (&raw mut (*inner).ref_count).write(Cell::new(NonZeroUsize::MIN)); } inner } } impl Gc<[T]> { /// Allocates an `GcBox<[T]>` with the given length. #[inline] fn allocate_for_slice(len: usize) -> *mut GcBox<[T]> { unsafe { Self::allocate_for_layout(Layout::array::(len).unwrap(), |mem| { ptr::slice_from_raw_parts_mut(mem.cast::(), len) as *mut GcBox<[T]> }) } } } /// Allows coercing `T` of [`Gc`](Gc). /// /// This means that you can convert a `Gc` containing a strictly-sized type (such as `[T; N]`) into /// a `Gc` containing its unsized version (such as `[T]`), all without using nightly-only features. /// /// This is one of two easy ways to create a `Gc<[T]>`; the other method is to use [`FromIterator`]. /// /// # Examples /// /// ``` /// use dumpster::unsync::{coerce_gc, Gc}; /// /// let gc1: Gc<[u8; 3]> = Gc::new([7, 8, 9]); /// let gc2: Gc<[u8]> = coerce_gc!(gc1); /// assert_eq!(&gc2[..], &[7, 8, 9]); /// ``` /// /// Note that although this macro allows for type conversion, it _cannot_ be used for converting /// between incompatible types. /// /// ```compile_fail /// // This program is incorrect! /// use dumpster::unsync::{Gc, coerce_gc}; /// /// let gc1: Gc = Gc::new(1); /// let gc2: Gc = coerce_gc!(gc1); /// ``` #[doc(hidden)] #[macro_export] macro_rules! __unsync_coerce_gc { ($gc:expr) => {{ // Temporarily convert the `Gc` into a raw pointer to allow for coercion to occur. let ptr: *const _ = $crate::unsync::Gc::__private_into_ptr($gc); unsafe { $crate::unsync::Gc::__private_from_ptr(ptr) } }}; } #[doc(inline)] pub use crate::__unsync_coerce_gc as coerce_gc; impl Deref for Gc { type Target = T; /// Dereference this pointer, creating a reference to the contained value `T`. /// /// # Panics /// /// This function may panic if it is called from within the implementation of `std::ops::Drop` /// of its owning value, since returning such a reference could cause a use-after-free. /// It is not guaranteed to panic. /// /// For a version which returns `None` instead of panicking, consider [`Gc::try_deref`]. /// /// # Examples /// /// The following is a correct time to dereference a `Gc`. /// /// ``` /// use dumpster::unsync::Gc; /// /// let my_gc = Gc::new(0u8); /// let my_ref: &u8 = &my_gc; /// ``` /// /// Dereferencing a `Gc` while dropping is not correct. /// /// ```should_panic /// // This is wrong! /// use dumpster::{unsync::Gc, Trace}; /// /// #[derive(Trace)] /// struct Bad { /// s: String, /// this: Gc, /// } /// /// impl Drop for Bad { /// fn drop(&mut self) { /// // will panic when dereferencing `this` /// println!("{}", self.this.s) /// } /// } /// /// let foo = Gc::new_cyclic(|this| Bad { /// s: "foo".to_string(), /// this, /// }); /// ``` fn deref(&self) -> &Self::Target { unsafe { &self.ptr.get().expect("dereferencing Gc to already-collected object. \ This means a Gc escaped from a Drop implementation, likely implying a bug in your code.").as_ref().value } } } impl Clone for Gc { /// Create a duplicate reference to the same data pointed to by `self`. /// This does not duplicate the data. /// If this `Gc` [is dead](`Gc::is_dead`), the cloned value will also be a dead `Gc`. /// /// For a fallible version, refer to [`Gc::try_clone`]. /// /// # Examples /// /// ``` /// use dumpster::unsync::Gc; /// use std::sync::atomic::{AtomicU8, Ordering}; /// /// let gc1 = Gc::new(AtomicU8::new(0)); /// let gc2 = gc1.clone(); /// /// gc1.store(1, Ordering::Relaxed); /// assert_eq!(gc2.load(Ordering::Relaxed), 1); /// ``` /// /// You can also clone dead `Gc`s. /// /// ``` /// use dumpster::{unsync::Gc, Trace}; /// /// #[derive(Trace)] /// struct Cycle(Gc); /// /// impl Drop for Cycle { /// fn drop(&mut self) { /// let gc = self.0.clone(); /// assert!(Gc::is_dead(&gc)); /// } /// } /// /// let gc1 = Gc::new_cyclic(|this| Cycle(this)); /// # drop(gc1); /// # dumpster::unsync::collect(); /// ``` fn clone(&self) -> Self { let Some(ptr) = self.ptr.get().as_option() else { return Self { ptr: self.ptr.clone(), }; }; unsafe { let box_ref = ptr.as_ref(); box_ref .ref_count .set(box_ref.ref_count.get().saturating_add(1)); } _ = DUMPSTER.try_with(|d| { d.notify_created_gc(); // d.mark_cleaned(self.ptr); }); Self { ptr: self.ptr.clone(), } } } impl Drop for Gc { /// Destroy this garbage-collected pointer. /// /// If this is the last reference which can reach the pointed-to data, the allocation that it /// points to will be destroyed. fn drop(&mut self) { let Some(mut ptr) = self.ptr.get().as_option() else { return; }; let dumpster_is_destroyed = DUMPSTER .try_with(|d| { let box_ref = unsafe { ptr.as_ref() }; match box_ref.ref_count.get() { NonZeroUsize::MIN => { d.mark_cleaned(ptr); unsafe { // this was the last reference, drop unconditionally drop_in_place(addr_of_mut!(ptr.as_mut().value)); // note: `box_ref` is no longer usable dealloc(ptr.as_ptr().cast::(), Layout::for_value(ptr.as_ref())); } } n => { // decrement the ref count - but another reference to this data still // lives box_ref .ref_count .set(NonZeroUsize::new(n.get() - 1).unwrap()); if contains_gcs(&box_ref.value).unwrap_or(true) { // remaining references could be a cycle - therefore, mark it as dirty // so we can check later d.mark_dirty(ptr); } } } // Notify that a GC has been dropped, potentially triggering a cleanup d.notify_dropped_gc(); }) .is_err(); if dumpster_is_destroyed { // The `DUMPSTER` thread local has already been destroyed. This will only happen // when if `Gc` is itself stored in a thread local or was created in a thread local // destructor. We still do reference counting but won't be able to collect cycles. let box_ref = unsafe { ptr.as_ref() }; match box_ref.ref_count.get() { NonZeroUsize::MIN => { unsafe { // this was the last reference, drop unconditionally drop_in_place(addr_of_mut!(ptr.as_mut().value)); // note: `box_ref` is no longer usable dealloc(ptr.as_ptr().cast::(), Layout::for_value(ptr.as_ref())); } } n => { // decrement the ref count - but another reference to this data still // lives box_ref .ref_count .set(NonZeroUsize::new(n.get() - 1).unwrap()); } } } } } impl PartialEq> for Gc where T: Trace + ?Sized + PartialEq, { /// Test for equality on two `Gc`s. /// /// Two `Gc`s are equal if their inner values are equal, even if they are stored in different /// allocations. /// Because `PartialEq` does not imply reflexivity, and there is no current path for trait /// specialization, this function does not do a "fast-path" check for reference equality. /// Therefore, if two `Gc`s point to the same allocation, the implementation of `eq` will still /// require a direct call to `eq` on the values. /// /// # Panics /// /// This function may panic if it is called from within the implementation of `std::ops::Drop` /// of its owning value, since returning such a reference could cause a use-after-free. /// It is not guaranteed to panic. /// Additionally, if this `Gc` is moved out of an allocation during a `Drop` implementation, it /// could later cause a panic. /// For further details, refer to the main documentation for `Gc`. /// /// # Examples /// /// ``` /// use dumpster::unsync::Gc; /// /// let gc = Gc::new(6); /// assert!(gc == Gc::new(6)); /// ``` fn eq(&self, other: &Gc) -> bool { self.as_ref() == other.as_ref() } } impl Eq for Gc where T: Trace + ?Sized + PartialEq {} impl CollectInfo { #[must_use] /// Get the number of times that a [`Gc`] has been dropped since the last time a collection /// operation was performed. /// /// # Examples /// /// ``` /// use dumpster::unsync::{set_collect_condition, CollectInfo}; /// /// // Collection condition for whether many Gc's have been dropped. /// fn have_many_gcs_dropped(info: &CollectInfo) -> bool { /// info.n_gcs_dropped_since_last_collect() > 100 /// } /// /// set_collect_condition(have_many_gcs_dropped); /// ``` pub fn n_gcs_dropped_since_last_collect(&self) -> usize { DUMPSTER.try_with(|d| d.n_ref_drops.get()).unwrap_or(0) } #[must_use] /// Get the total number of [`Gc`]s which currently exist. /// /// # Examples /// /// ``` /// use dumpster::unsync::{set_collect_condition, CollectInfo}; /// /// // Collection condition for whether many Gc's currently exist. /// fn do_many_gcs_exist(info: &CollectInfo) -> bool { /// info.n_gcs_existing() > 100 /// } /// /// set_collect_condition(do_many_gcs_exist); /// ``` pub fn n_gcs_existing(&self) -> usize { DUMPSTER.try_with(|d| d.n_refs_living.get()).unwrap_or(0) } } unsafe impl TraceWith for Gc { fn accept(&self, visitor: &mut V) -> Result<(), ()> { visitor.visit_unsync(self); Ok(()) } } impl AsRef for Gc { fn as_ref(&self) -> &T { self } } impl Borrow for Gc { fn borrow(&self) -> &T { self } } impl Default for Gc { fn default() -> Self { Gc::new(T::default()) } } impl std::fmt::Pointer for Gc { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { std::fmt::Pointer::fmt(&addr_of!(**self), f) } } #[cfg(feature = "coerce-unsized")] impl std::ops::CoerceUnsized> for Gc where T: std::marker::Unsize + Trace + ?Sized, U: Trace + ?Sized, { } impl Display for Gc { /// Formats the value using its `Display` implementation. /// /// # Note /// /// If `T` contains cyclic references through `Gc` pointers and its `Display` implementation /// attempts to traverse them, this may cause infinite recursion. Types with potential cycles /// should implement `Display` to avoid following cyclic references. fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { Display::fmt(&**self, f) } } impl From for Gc { /// Converts a generic type `T` into an `Gc` /// /// The conversion allocates on the heap and moves `t` /// from the stack into it. /// /// # Example /// ```rust /// # use dumpster::unsync::Gc; /// let x = 5; /// let rc = Gc::new(5); /// /// assert_eq!(Gc::from(x), rc); /// ``` fn from(value: T) -> Self { Gc::new(value) } } impl From<[T; N]> for Gc<[T]> { /// Converts a [`[T; N]`](prim@array) into an `Gc<[T]>`. /// /// The conversion moves the array into a newly allocated `Gc`. /// /// # Example /// /// ``` /// # use dumpster::unsync::Gc; /// let original: [i32; 3] = [1, 2, 3]; /// let shared: Gc<[i32]> = Gc::from(original); /// assert_eq!(&[1, 2, 3], &shared[..]); /// ``` #[inline] fn from(v: [T; N]) -> Gc<[T]> { coerce_gc!(Gc::<[T; N]>::from(v)) } } impl From<&[T]> for Gc<[T]> { /// Allocates a garbage-collected slice and fills it by cloning `slice`'s items. /// /// # Example /// /// ``` /// # use dumpster::unsync::Gc; /// let original: &[i32] = &[1, 2, 3]; /// let shared: Gc<[i32]> = Gc::from(original); /// assert_eq!(&[1, 2, 3], &shared[..]); /// ``` #[inline] fn from(slice: &[T]) -> Gc<[T]> { // Panic guard while cloning T elements. // In the event of a panic, elements that have been written // into the new GcBox will be dropped, then the memory freed. struct Guard { /// pointer to `GcBox` to deallocate on panic mem: *mut u8, /// layout of the `GcBox` to deallocate on panic layout: Layout, /// pointer to the `GcBox`'s value elems: *mut T, /// the number of elements cloned so far n_elems: usize, } impl Drop for Guard { fn drop(&mut self) { unsafe { let slice = slice::from_raw_parts_mut(self.elems, self.n_elems); ptr::drop_in_place(slice); dealloc(self.mem, self.layout); } } } unsafe { let value_layout = Layout::array::(slice.len()).unwrap(); let layout = Layout::new::>() .extend(value_layout) .unwrap() .0 .pad_to_align(); let ptr = Self::allocate_for_layout_of_box(layout, |mem| { ptr::slice_from_raw_parts_mut(mem.cast::(), slice.len()) as *mut GcBox<[T]> }); // Pointer to first element let elems = (&raw mut (*ptr).value).cast::(); let mut guard = Guard { mem: ptr.cast::(), layout, elems, n_elems: 0, }; for (i, item) in slice.iter().enumerate() { ptr::write(elems.add(i), item.clone()); guard.n_elems += 1; } // All clear. Forget the guard so it doesn't free the new GcBox. mem::forget(guard); _ = DUMPSTER.try_with(Dumpster::notify_created_gc); Self { ptr: Cell::new(Nullable::from_ptr(ptr)), } } } } impl From<&mut [T]> for Gc<[T]> { /// Allocates a garbage-collected slice and fills it by cloning `v`'s items. /// /// # Example /// /// ``` /// # use dumpster::unsync::Gc; /// let mut original = [1, 2, 3]; /// let original: &mut [i32] = &mut original; /// let shared: Gc<[i32]> = Gc::from(original); /// assert_eq!(&[1, 2, 3], &shared[..]); /// ``` #[inline] fn from(value: &mut [T]) -> Self { Gc::from(&*value) } } impl From<&str> for Gc { /// Allocates a garbage-collected string slice and copies `v` into it. /// /// # Example /// /// ``` /// # use dumpster::unsync::Gc; /// let shared: Gc = Gc::from("statue"); /// assert_eq!("statue", &shared[..]); /// ``` #[inline] fn from(v: &str) -> Self { let bytes = Gc::<[u8]>::from(v.as_bytes()); unsafe { Gc::from_ptr(Gc::into_ptr(bytes) as *const GcBox) } } } impl From<&mut str> for Gc { /// Allocates a garbage-collected string slice and copies `v` into it. /// /// # Example /// /// ``` /// # use dumpster::unsync::Gc; /// let mut original = String::from("statue"); /// let original: &mut str = &mut original; /// let shared: Gc = Gc::from(original); /// assert_eq!("statue", &shared[..]); /// ``` #[inline] fn from(v: &mut str) -> Self { Gc::from(&*v) } } impl From> for Gc<[u8]> { /// Converts a garbage-collected string slice into a byte slice. /// /// # Example /// /// ``` /// # use dumpster::unsync::Gc; /// let string: Gc = Gc::from("eggplant"); /// let bytes: Gc<[u8]> = Gc::from(string); /// assert_eq!("eggplant".as_bytes(), bytes.as_ref()); /// ``` #[inline] fn from(value: Gc) -> Self { unsafe { Gc::from_ptr(Gc::into_ptr(value) as *const GcBox<[u8]>) } } } impl From for Gc { /// Allocates a garbage-collected string slice and copies `v` into it. /// /// # Example /// /// ``` /// # use dumpster::unsync::Gc; /// let original: String = "statue".to_owned(); /// let shared: Gc = Gc::from(original); /// assert_eq!("statue", &shared[..]); /// ``` #[inline] fn from(value: String) -> Self { Self::from(&value[..]) } } impl From> for Gc { /// Move a boxed object to a new, garbage collected, allocation. /// /// # Example /// /// ``` /// # use dumpster::unsync::Gc; /// let original: Box = Box::new(1); /// let shared: Gc = Gc::from(original); /// assert_eq!(1, *shared); /// ``` #[inline] fn from(src: Box) -> Self { unsafe { let layout = Layout::for_value(&*src); let gc_ptr = Gc::allocate_for_layout(layout, <*mut u8>::cast::>); // Copy value as bytes ptr::copy_nonoverlapping( (&raw const *src).cast::(), (&raw mut (*gc_ptr).value).cast::(), layout.size(), ); // Free the allocation without dropping its contents let bptr = Box::into_raw(src); let src = Box::from_raw(bptr.cast::>()); drop(src); _ = DUMPSTER.try_with(Dumpster::notify_created_gc); Self::from_ptr(gc_ptr) } } } impl From> for Gc<[T]> { /// Allocates a garbage-collected slice and moves `vec`'s items into it. /// /// # Example /// /// ``` /// # use dumpster::unsync::Gc; /// let unique: Vec = vec![1, 2, 3]; /// let shared: Gc<[i32]> = Gc::from(unique); /// assert_eq!(&[1, 2, 3], &shared[..]); /// ``` #[inline] fn from(vec: Vec) -> Self { let mut vec = ManuallyDrop::new(vec); let vec_cap = vec.capacity(); let vec_len = vec.len(); let vec_ptr = vec.as_mut_ptr(); let gc_ptr = Self::allocate_for_slice(vec_len); unsafe { let dst_ptr = (&raw mut (*gc_ptr).value).cast::(); ptr::copy_nonoverlapping(vec_ptr, dst_ptr, vec_len); let _ = Vec::from_raw_parts(vec_ptr, 0, vec_cap); _ = DUMPSTER.try_with(Dumpster::notify_created_gc); Self::from_ptr(gc_ptr) } } } impl<'a, B: Trace> From> for Gc where B: ToOwned + ?Sized, Gc: From<&'a B> + From, { /// Creates a garbage-collected pointer from a clone-on-write pointer by /// copying its content. /// /// # Example /// /// ```rust /// # use dumpster::unsync::Gc; /// # use std::borrow::Cow; /// let cow: Cow<'_, str> = Cow::Borrowed("eggplant"); /// let shared: Gc = Gc::from(cow); /// assert_eq!("eggplant", &shared[..]); /// ``` #[inline] fn from(cow: Cow<'a, B>) -> Gc { match cow { Cow::Borrowed(s) => Gc::from(s), Cow::Owned(s) => Gc::from(s), } } } impl FromIterator for Gc<[T]> where T: Trace, { fn from_iter>(iter: I) -> Self { // Collect into a `Vec` for O(n) performance. // TODO: this could be slightly optimized by using the `Gc<[]>` layout for perf, but this is // a later problem. Self::from(iter.into_iter().collect::>()) } } ================================================ FILE: dumpster/src/unsync/tests.rs ================================================ /* dumpster, a cycle-tracking garbage collector for Rust. Copyright (C) 2023 Clayton Ramsey. This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! Simple tests using manual implementations of [`Trace`]. use foldhash::{HashMap, HashMapExt}; use crate::{unsync::coerce_gc, Visitor}; use super::*; use std::{ cell::{OnceCell, RefCell}, collections::hash_map::Entry, mem::{take, transmute, MaybeUninit}, sync::{ atomic::{AtomicBool, AtomicU8, AtomicUsize, Ordering}, Mutex, }, }; struct DropCount(&'static AtomicUsize); impl Drop for DropCount { fn drop(&mut self) { self.0.fetch_add(1, Ordering::Relaxed); } } unsafe impl TraceWith for DropCount { fn accept(&self, _: &mut V) -> Result<(), ()> { Ok(()) } } #[test] /// Test a simple data structure fn simple() { static DROPPED: AtomicBool = AtomicBool::new(false); struct Foo; impl Drop for Foo { fn drop(&mut self) { DROPPED.store(true, Ordering::Relaxed); } } unsafe impl TraceWith for Foo { fn accept(&self, _: &mut V) -> Result<(), ()> { Ok(()) } } let gc1 = Gc::new(Foo); let gc2 = Gc::clone(&gc1); assert!(!DROPPED.load(Ordering::Relaxed)); drop(gc1); assert!(!DROPPED.load(Ordering::Relaxed)); drop(gc2); assert!(DROPPED.load(Ordering::Relaxed)); } #[derive(Debug)] struct MultiRef { refs: RefCell>>, drop_count: &'static AtomicUsize, } unsafe impl TraceWith for MultiRef { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.refs.accept(visitor) } } impl Drop for MultiRef { fn drop(&mut self) { self.drop_count.fetch_add(1, Ordering::Relaxed); } } #[test] fn self_referential() { static DROPPED: AtomicU8 = AtomicU8::new(0); struct Foo(RefCell>>); unsafe impl TraceWith for Foo { #[inline] fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.0.accept(visitor) } } impl Drop for Foo { fn drop(&mut self) { DROPPED.fetch_add(1, Ordering::Relaxed); } } let gc = Gc::new(Foo(RefCell::new(None))); gc.0.replace(Some(Gc::clone(&gc))); assert_eq!(DROPPED.load(Ordering::Relaxed), 0); drop(gc); collect(); assert_eq!(DROPPED.load(Ordering::Relaxed), 1); } #[test] fn cyclic() { static DROPPED: AtomicU8 = AtomicU8::new(0); struct Foo(RefCell>>); unsafe impl TraceWith for Foo { #[inline] fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.0.accept(visitor) } } impl Drop for Foo { fn drop(&mut self) { DROPPED.fetch_add(1, Ordering::Relaxed); } } let foo1 = Gc::new(Foo(RefCell::new(None))); let foo2 = Gc::new(Foo(RefCell::new(Some(Gc::clone(&foo1))))); foo1.0.replace(Some(Gc::clone(&foo2))); assert_eq!(DROPPED.load(Ordering::Relaxed), 0); drop(foo1); assert_eq!(DROPPED.load(Ordering::Relaxed), 0); drop(foo2); collect(); assert_eq!(DROPPED.load(Ordering::Relaxed), 2); } /// Construct a complete graph of garbage-collected fn complete_graph(detectors: &'static [AtomicUsize]) -> Vec> { let mut gcs = Vec::new(); for d in detectors { let gc = Gc::new(MultiRef { refs: RefCell::new(Vec::new()), drop_count: d, }); for x in &gcs { gc.refs.borrow_mut().push(Gc::clone(x)); x.refs.borrow_mut().push(Gc::clone(&gc)); } gcs.push(gc); } gcs } #[test] fn complete4() { static DETECTORS: [AtomicUsize; 4] = [ AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), ]; let mut gcs = complete_graph(&DETECTORS); for _ in 0..3 { gcs.pop(); } for detector in &DETECTORS { assert_eq!(detector.load(Ordering::Relaxed), 0); } drop(gcs); collect(); for detector in &DETECTORS { assert_eq!(detector.load(Ordering::Relaxed), 1); } } #[test] fn parallel_loop() { static COUNT_1: AtomicUsize = AtomicUsize::new(0); static COUNT_2: AtomicUsize = AtomicUsize::new(0); static COUNT_3: AtomicUsize = AtomicUsize::new(0); static COUNT_4: AtomicUsize = AtomicUsize::new(0); let gc1 = Gc::new(MultiRef { drop_count: &COUNT_1, refs: RefCell::new(Vec::new()), }); let gc2 = Gc::new(MultiRef { drop_count: &COUNT_2, refs: RefCell::new(vec![Gc::clone(&gc1)]), }); let gc3 = Gc::new(MultiRef { drop_count: &COUNT_3, refs: RefCell::new(vec![Gc::clone(&gc1)]), }); let gc4 = Gc::new(MultiRef { drop_count: &COUNT_4, refs: RefCell::new(vec![Gc::clone(&gc2), Gc::clone(&gc3)]), }); gc1.refs.borrow_mut().push(Gc::clone(&gc4)); assert_eq!(COUNT_1.load(Ordering::Relaxed), 0); assert_eq!(COUNT_2.load(Ordering::Relaxed), 0); assert_eq!(COUNT_3.load(Ordering::Relaxed), 0); assert_eq!(COUNT_4.load(Ordering::Relaxed), 0); drop(gc1); assert_eq!(COUNT_1.load(Ordering::Relaxed), 0); assert_eq!(COUNT_2.load(Ordering::Relaxed), 0); assert_eq!(COUNT_3.load(Ordering::Relaxed), 0); assert_eq!(COUNT_4.load(Ordering::Relaxed), 0); drop(gc2); assert_eq!(COUNT_1.load(Ordering::Relaxed), 0); assert_eq!(COUNT_2.load(Ordering::Relaxed), 0); assert_eq!(COUNT_3.load(Ordering::Relaxed), 0); assert_eq!(COUNT_4.load(Ordering::Relaxed), 0); drop(gc3); assert_eq!(COUNT_1.load(Ordering::Relaxed), 0); assert_eq!(COUNT_2.load(Ordering::Relaxed), 0); assert_eq!(COUNT_3.load(Ordering::Relaxed), 0); assert_eq!(COUNT_4.load(Ordering::Relaxed), 0); drop(gc4); collect(); assert_eq!(COUNT_1.load(Ordering::Relaxed), 1); assert_eq!(COUNT_2.load(Ordering::Relaxed), 1); assert_eq!(COUNT_3.load(Ordering::Relaxed), 1); assert_eq!(COUNT_4.load(Ordering::Relaxed), 1); } #[test] /// Check that we can drop a Gc which points to some allocation with a borrowed `RefCell` in it. fn double_borrow() { static DROP_COUNT: AtomicUsize = AtomicUsize::new(0); let gc = Gc::new(MultiRef { refs: RefCell::new(Vec::new()), drop_count: &DROP_COUNT, }); gc.refs.borrow_mut().push(gc.clone()); let mut my_borrow = gc.refs.borrow_mut(); my_borrow.pop(); drop(my_borrow); assert_eq!(DROP_COUNT.load(Ordering::Relaxed), 0); collect(); drop(gc); collect(); assert_eq!(DROP_COUNT.load(Ordering::Relaxed), 1); } #[test] #[cfg(feature = "coerce-unsized")] fn coerce_array() { let gc1: Gc<[u8; 3]> = Gc::new([0, 0, 0]); let gc2: Gc<[u8]> = gc1; assert_eq!(gc2.len(), 3); assert_eq!( std::mem::size_of::>(), 2 * std::mem::size_of::() ); } #[test] fn coerce_array_using_macro() { let gc1: Gc<[u8; 3]> = Gc::new([0, 0, 0]); let gc2: Gc<[u8]> = coerce_gc!(gc1); assert_eq!(gc2.len(), 3); assert_eq!( std::mem::size_of::>(), 2 * std::mem::size_of::() ); } #[test] #[should_panic = "dereferencing Gc to already-collected object. This means a Gc escaped from a Drop implementation, likely implying a bug in your code."] fn escape_dead_pointer() { thread_local! {static ESCAPED: Mutex>> = const { Mutex::new(None) };} struct Escape { x: u8, ptr: Mutex>>, } impl Drop for Escape { fn drop(&mut self) { ESCAPED.with(|e| { let mut escaped_guard = e.lock().unwrap(); if escaped_guard.is_none() { *escaped_guard = (*self.ptr.lock().unwrap()).take(); } }); } } unsafe impl TraceWith for Escape { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.ptr.accept(visitor) } } let esc = Gc::new(Escape { x: 0, ptr: Mutex::new(None), }); *(*esc).ptr.lock().unwrap() = Some(esc.clone()); drop(esc); collect(); let _x = ESCAPED.with(|e| e.lock().unwrap().as_ref().unwrap().x); } #[test] fn from_box() { let gc: Gc = Gc::from(Box::new(String::from("hello"))); // The `From>` implementation executes a different code path to // construct the `Gc`. // // Here we ensure that the metadata is initialized to a valid state. assert_eq!(Gc::ref_count(&gc).get(), 1); assert_eq!(&*gc, "hello"); } #[test] fn from_slice() { let gc: Gc<[String]> = Gc::from(&[String::from("hello"), String::from("world")][..]); // The `From<&[T]>` implementation executes a different code path to // construct the `Gc`. // // Here we ensure that the metadata is initialized to a valid state. assert_eq!(Gc::ref_count(&gc).get(), 1); assert_eq!(&*gc, ["hello", "world"]); } #[test] #[should_panic = "told you"] fn from_slice_panic() { struct MayPanicOnClone { value: String, panic: bool, } impl Clone for MayPanicOnClone { fn clone(&self) -> Self { assert!(!self.panic, "told you"); Self { value: self.value.clone(), panic: self.panic, } } } unsafe impl TraceWith for MayPanicOnClone { fn accept(&self, _: &mut V) -> Result<(), ()> { Ok(()) } } let slice: &[MayPanicOnClone] = &[ MayPanicOnClone { value: String::from("a"), panic: false, }, MayPanicOnClone { value: String::from("b"), panic: false, }, MayPanicOnClone { value: String::from("c"), panic: true, }, ]; let _: Gc<[MayPanicOnClone]> = Gc::from(slice); } #[test] fn from_vec() { let gc: Gc<[String]> = Gc::from(vec![String::from("hello"), String::from("world")]); // The `From>` implementation executes a different code path to // construct the `Gc`. // // Here we ensure that the metadata is initialized to a valid state. assert_eq!(Gc::ref_count(&gc).get(), 1); assert_eq!(&*gc, ["hello", "world"]); } #[test] fn make_mut() { let mut a = Gc::new(42); let mut b = a.clone(); let mut c = b.clone(); assert_eq!(*Gc::make_mut(&mut a), 42); assert_eq!(*Gc::make_mut(&mut b), 42); assert_eq!(*Gc::make_mut(&mut c), 42); *Gc::make_mut(&mut a) += 1; *Gc::make_mut(&mut b) += 2; *Gc::make_mut(&mut c) += 3; assert_eq!(*a, 43); assert_eq!(*b, 44); assert_eq!(*c, 45); // they should all be unique assert_eq!(Gc::ref_count(&a).get(), 1); assert_eq!(Gc::ref_count(&b).get(), 1); assert_eq!(Gc::ref_count(&c).get(), 1); } #[test] fn make_mut_2() { let mut a = Gc::new(42); let b = a.clone(); let c = b.clone(); assert_eq!(*a, 42); assert_eq!(*b, 42); assert_eq!(*c, 42); *Gc::make_mut(&mut a) += 1; assert_eq!(*a, 43); assert_eq!(*b, 42); assert_eq!(*c, 42); // a should be unique // b and c should share their object assert_eq!(Gc::ref_count(&a).get(), 1); assert_eq!(Gc::ref_count(&b).get(), 2); assert_eq!(Gc::ref_count(&c).get(), 2); } #[test] fn make_mut_of_object_in_dumpster() { #[derive(Clone)] struct Foo { // just some gc pointer so foo lands in the dumpster something: Gc, } unsafe impl TraceWith for Foo { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.something.accept(visitor) } } let mut foo = Gc::new(Foo { something: Gc::new(5), }); drop(foo.clone()); // now foo is in the dumpster // and its ref count is one assert_eq!(Gc::ref_count(&foo).get(), 1); // we get a mut reference let foo_mut = Gc::make_mut(&mut foo); // now we collect garbage while we're also holding onto a mutable reference to foo // if foo is still in the dumpster then the collection will dereference it and cause UB collect(); // we need to do something with `foo_mut` here so the mutable borrow is actually held // during collection assert_eq!(*foo_mut.something, 5); } #[test] #[should_panic = "panic on visit"] #[cfg_attr(miri, ignore = "intentionally leaks memory")] fn panic_visit() { #[expect(unused)] struct PanicVisit(Gc); /// We technically can make it part of the contract for `Trace` to reject panicking impls, /// but it is good form to accept these even though they are malformed. unsafe impl TraceWith for PanicVisit { fn accept(&self, _: &mut V) -> Result<(), ()> { panic!("panic on visit"); } } let gc = Gc::new_cyclic(PanicVisit); let _ = gc.clone(); drop(gc); collect(); } #[test] fn new_cyclic_nothing() { static COUNT: AtomicUsize = AtomicUsize::new(0); let gc = Gc::new_cyclic(|_| DropCount(&COUNT)); drop(gc); // collect not necessary since this a drop by reference count assert_eq!(COUNT.load(Ordering::Relaxed), 1); } #[test] fn new_cyclic_one() { static DROP_COUNT: AtomicUsize = AtomicUsize::new(0); #[expect(unused)] struct Cycle(Gc, DropCount); unsafe impl TraceWith for Cycle { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.0.accept(visitor) } } let cyc = Gc::new_cyclic(|gc| Cycle(gc, DropCount(&DROP_COUNT))); assert_eq!(Gc::ref_count(&cyc).get(), 2); drop(cyc); collect(); assert_eq!(DROP_COUNT.load(Ordering::Relaxed), 1); } #[test] #[should_panic = "ehehe"] fn new_cyclic_panic() { let _: Gc<()> = Gc::new_cyclic(|_| panic!("ehehe")); } #[test] fn dead_inside_alive() { struct Cycle(Option>); thread_local! { static ESCAPE: Cell>> = const { Cell::new(None) }; } unsafe impl TraceWith for Cycle { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.0.accept(visitor) } } impl Drop for Cycle { fn drop(&mut self) { ESCAPE.set(take(&mut self.0)); } } let c1 = Gc::new_cyclic(|gc| Cycle(Some(gc))); drop(c1); collect(); // `ESCAPE` is now a dead pointer let alloc = Gc::new(ESCAPE.take().unwrap()); let alloc2 = alloc.clone(); drop(alloc); drop(alloc2); collect(); // if correct, this collection should not panic or encounter UB when collecting // `alloc` } #[test] /// Test that creating a `Gc` during a `Drop` implementation will still not leak the `Gc`. fn leak_by_creation_in_drop() { static DID_BAR_DROP: AtomicBool = AtomicBool::new(false); struct Foo(OnceCell>); struct Bar(OnceCell>); unsafe impl TraceWith for Foo { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.0.accept(visitor) } } unsafe impl TraceWith for Bar { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.0.accept(visitor) } } impl Drop for Foo { fn drop(&mut self) { let gcbar = Gc::new(Bar(OnceCell::new())); let _ = gcbar.0.set(gcbar.clone()); drop(gcbar); } } impl Drop for Bar { fn drop(&mut self) { DID_BAR_DROP.store(true, Ordering::Relaxed); } } let foo = Gc::new(Foo(OnceCell::new())); let _ = foo.0.set(foo.clone()); drop(foo); collect(); // causes Bar to be created and then leaked collect(); // cleans up Bar assert!(DID_BAR_DROP.load(Ordering::Relaxed)); } #[test] #[cfg_attr(miri, ignore = "miri is too slow")] #[expect(clippy::too_many_lines)] fn unsync_fuzz() { const N: usize = 100_000; static DROP_DETECTORS: [AtomicUsize; N] = { let mut detectors: [MaybeUninit; N] = unsafe { transmute(MaybeUninit::<[AtomicUsize; N]>::uninit()) }; let mut i = 0; while i < N { detectors[i] = MaybeUninit::new(AtomicUsize::new(0)); i += 1; } unsafe { transmute(detectors) } }; #[derive(Debug)] struct Alloc { refs: Mutex>>, id: usize, } impl Drop for Alloc { fn drop(&mut self) { let n_drop = DROP_DETECTORS[self.id].fetch_add(1, Ordering::Relaxed); assert_eq!(n_drop, 0, "must not double drop an allocation"); } } unsafe impl TraceWith for Alloc { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.refs.accept(visitor) } } fn dfs(alloc: &Gc, graph: &mut HashMap>) { if let Entry::Vacant(v) = graph.entry(alloc.id) { v.insert(Vec::new()); alloc.refs.lock().unwrap().iter().for_each(|a| { graph.get_mut(&alloc.id).unwrap().push(a.id); dfs(a, graph); }); } } fastrand::seed(12345); let mut gcs = (0..50) .map(|i| { Gc::new(Alloc { refs: Mutex::new(Vec::new()), id: i, }) }) .collect::>(); let mut next_detector = 50; for _ in 0..N { if gcs.is_empty() { gcs.push(Gc::new(Alloc { refs: Mutex::new(Vec::new()), id: next_detector, })); next_detector += 1; } match fastrand::u8(0..4) { 0 => { // println!("add gc {next_detector}"); gcs.push(Gc::new(Alloc { refs: Mutex::new(Vec::new()), id: next_detector, })); next_detector += 1; } 1 => { if gcs.len() > 1 { let from = fastrand::usize(0..gcs.len()); let to = fastrand::usize(0..gcs.len()); // println!("add ref {} -> {}", gcs[from].id, gcs[to].id); let new_gc = gcs[to].clone(); let mut guard = gcs[from].refs.lock().unwrap(); guard.push(new_gc); } } 2 => { let idx = fastrand::usize(0..gcs.len()); // println!("remove gc {}", gcs[idx].id); gcs.swap_remove(idx); } 3 => { let from = fastrand::usize(0..gcs.len()); let mut guard = gcs[from].refs.lock().unwrap(); if !guard.is_empty() { let to = fastrand::usize(0..guard.len()); // println!("drop ref {} -> {}", gcs[from].id, guard[to].id); guard.swap_remove(to); } } _ => unreachable!(), } } let mut graph = HashMap::new(); graph.insert(9999, Vec::new()); for alloc in &gcs { graph.get_mut(&9999).unwrap().push(alloc.id); dfs(alloc, &mut graph); } // println!("{graph:#?}"); drop(gcs); collect(); let mut n_missing = 0; for count in &DROP_DETECTORS[..next_detector] { let num = count.load(Ordering::Relaxed); if num != 1 { // println!("expected 1 for id {id} but got {num}"); n_missing += 1; } } assert_eq!(n_missing, 0); } #[test] fn custom_trait_object() { trait MyTrait: Trace + Send + Sync {} impl MyTrait for T {} let gc = Gc::new(5i32); let gc: Gc = coerce_gc!(gc); _ = gc; } #[test] fn gc_from_iter() { let _gc = (0..100).collect::>(); } #[test] fn self_referential_from_iter() { struct Ab { a: Gc, b: Gc, } unsafe impl TraceWith for Ab { fn accept(&self, visitor: &mut V) -> Result<(), ()> { self.a.accept(visitor)?; self.b.accept(visitor)?; Ok(()) } } let mut gcs = Vec::>::new(); gcs.push(Gc::new_cyclic(|a: Gc| Ab { a: a.clone(), b: a })); for _ in 0..10 { let b = gcs.last().unwrap().clone(); gcs.push(Gc::new_cyclic(|a: Gc| Ab { a, b })); } let _big_gc = gcs.into_iter().collect::>(); } ================================================ FILE: dumpster_bench/.gitignore ================================================ /target /Cargo.lock ================================================ FILE: dumpster_bench/Cargo.toml ================================================ [package] name = "dumpster_bench" version = "0.1.0" edition = "2021" license = "MPL-2.0" authors = ["Clayton Ramsey"] description = "Benchmark for dumpster garbage collection crate" repository = "https://github.com/claytonwramsey/dumpster" readme = "../README.md" keywords = ["dumpster", "garbage_collector", "benchmark"] categories = ["data-structures", "memory-management"] publish = false [dependencies] dumpster = { path = "../dumpster", features = ["derive"] } gc = "0.5.0" bacon_rajan_cc = "0.4.0" fastrand = "2.0.0" shredder = "0.2.0" shredder_derive = "0.2.0" parking_lot = "0.12.3" rust-cc = "0.6.2" tracing-rc = { version = "0.2.0", default-features = false, features = [ "sync", ] } ================================================ FILE: dumpster_bench/scripts/make_plots.py ================================================ # dumpster, a cycle-tracking garbage collector for Rust. # Copyright (C) 2023 Clayton Ramsey. # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import matplotlib.pyplot as plt import sys csv_file = open(sys.argv[1]) multi_times = {} single_times = {} for line in csv_file.read().split('\n'): if len(line) == 0: continue name, test_type, n_threads, n_ops, time = line.split(',') times = single_times if test_type == 'single_threaded' else multi_times if name not in times.keys(): times[name] = ([], []) times[name][0].append(int(n_threads)) times[name][1].append(float(time) / 1000.0) for (name, v) in multi_times.items(): (xs, ys) = v plt.scatter(xs, ys, label=name) plt.xlabel('Number of threads') plt.ylabel('Time taken for 1M ops (ms)') plt.title('Parallel garbage collector scaling') plt.legend() plt.show() multi_times.pop('shredder', None) for (i, (name, v)) in enumerate(multi_times.items()): (xs, ys) = v plt.scatter(xs, ys, label=name, color=f"tab:{['blue', 'orange', 'green', 'purple'][i]}") plt.xlabel('Number of threads') plt.ylabel('Time taken for 1M ops (ms)') plt.title('Parallel garbage collector scaling (sans shredder)') plt.legend() plt.show() def violin(times: dict, name: str): data = [] labels = [] for (label, (_, ys)) in times.items(): data.append(ys) labels.append(label) fig = plt.figure() plt.violinplot(data, range(len(data)), vert=False) plt.yticks(range(len(data)), labels=labels) plt.ylabel('Garbage collector') plt.xlabel('Runtime for 1M ops (ms)') plt.tight_layout(rect=(10, 1.08, 1.08, 1.08)) plt.title(name) plt.show() violin(single_times, 'Single-threaded GC comparison') single_times.pop('shredder', None) violin(single_times, 'Single-threaded GC comparison (sans shredder)') ================================================ FILE: dumpster_bench/src/lib.rs ================================================ /* dumpster, a cycle-tracking garbage collector for Rust. Copyright (C) 2023 Clayton Ramsey. This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #![expect(non_local_definitions)] use std::{ rc::Rc, sync::{Arc, Mutex}, }; /// A garbage-collected structure which points to an arbitrary number of other garbage-collected /// structures. /// /// Cloning a `Multiref` yields a duplicated pointer, not a deep copy. pub trait Multiref: Clone { /// Create a new multiref which points to some data. fn new(points_to: Vec) -> Self; /// Apply some function to the backing set of references owned by this structure. fn apply(&self, f: impl FnOnce(&mut Vec)); /// Collect all the floating GCs out there. fn collect(); } /// A trait for thread-safe synchronized multirefs. pub trait SyncMultiref: Send + Sync + Multiref {} impl SyncMultiref for T where T: Send + Sync + Multiref {} /// A simple multi-reference which uses `Rc`, which is technically not a garbage collector, as a /// baseline. pub struct RcMultiref { refs: Mutex>>, } /// A simple multi-reference which uses `Arc`, which is technically not a garbage collector, as a /// baseline. pub struct ArcMultiref { refs: Mutex>>, } #[derive(dumpster::Trace, Debug)] pub struct DumpsterSyncMultiref { refs: Mutex>>, } #[derive(dumpster::Trace)] pub struct DumpsterUnsyncMultiref { refs: Mutex>>, } pub struct GcMultiref { refs: gc::GcCell>>, } pub struct BaconRajanMultiref { refs: Mutex>>, } #[derive(shredder_derive::Scan)] pub struct ShredderMultiref { refs: Mutex>>, } #[derive(shredder_derive::Scan)] pub struct ShredderSyncMultiref { refs: Mutex>>, } impl bacon_rajan_cc::Trace for BaconRajanMultiref { fn trace(&self, tracer: &mut bacon_rajan_cc::Tracer) { self.refs.lock().unwrap().trace(tracer); } } impl gc::Finalize for GcMultiref {} unsafe impl gc::Trace for GcMultiref { #[inline] unsafe fn trace(&self) { self.refs.trace(); } #[inline] unsafe fn root(&self) { self.refs.root(); } #[inline] unsafe fn unroot(&self) { self.refs.unroot(); } #[inline] fn finalize_glue(&self) { self.refs.finalize_glue() } } #[derive(rust_cc::Finalize)] pub struct RustCcMultiRef { refs: Mutex>>, } unsafe impl rust_cc::Trace for RustCcMultiRef { fn trace(&self, ctx: &mut rust_cc::Context<'_>) { self.refs.lock().unwrap().trace(ctx) } } pub struct TracingRcUnsyncMultiRef { refs: Vec>, } impl tracing_rc::rc::Trace for TracingRcUnsyncMultiRef { fn visit_children(&self, visitor: &mut tracing_rc::rc::GcVisitor) { self.refs.visit_children(visitor) } } pub struct TracingRcSyncMultiRef { refs: Mutex>>, } impl tracing_rc::sync::Trace for TracingRcSyncMultiRef { fn visit_children(&self, visitor: &mut tracing_rc::sync::GcVisitor) { self.refs.lock().unwrap().visit_children(visitor) } } impl Multiref for dumpster::sync::Gc { fn new(points_to: Vec) -> Self { dumpster::sync::Gc::new(DumpsterSyncMultiref { refs: Mutex::new(points_to), }) } fn apply(&self, f: impl FnOnce(&mut Vec)) { f(self.refs.lock().unwrap().as_mut()); } fn collect() { dumpster::sync::collect() } } impl Multiref for dumpster::unsync::Gc { fn new(points_to: Vec) -> Self { dumpster::unsync::Gc::new(DumpsterUnsyncMultiref { refs: Mutex::new(points_to), }) } fn apply(&self, f: impl FnOnce(&mut Vec)) { f(self.refs.lock().unwrap().as_mut()); } fn collect() { dumpster::unsync::collect() } } impl Multiref for gc::Gc { fn new(points_to: Vec) -> Self { gc::Gc::new(GcMultiref { refs: gc::GcCell::new(points_to), }) } fn apply(&self, f: impl FnOnce(&mut Vec)) { f(self.refs.borrow_mut().as_mut()) } fn collect() { gc::force_collect(); } } impl Multiref for bacon_rajan_cc::Cc { fn new(points_to: Vec) -> Self { bacon_rajan_cc::Cc::new(BaconRajanMultiref { refs: Mutex::new(points_to), }) } fn apply(&self, f: impl FnOnce(&mut Vec)) { f(self.refs.lock().unwrap().as_mut()); } fn collect() { bacon_rajan_cc::collect_cycles(); assert_eq!(bacon_rajan_cc::number_of_roots_buffered(), 0); } } impl Multiref for shredder::Gc { fn new(points_to: Vec) -> Self { shredder::Gc::new(ShredderMultiref { refs: Mutex::new(points_to), }) } fn apply(&self, f: impl FnOnce(&mut Vec)) { f(self.get().refs.lock().unwrap().as_mut()); } fn collect() { shredder::synchronize_destructors(); } } impl Multiref for shredder::Gc { fn new(points_to: Vec) -> Self { shredder::Gc::new(ShredderSyncMultiref { refs: Mutex::new(points_to), }) } fn apply(&self, f: impl FnOnce(&mut Vec)) { f(self.get().refs.lock().unwrap().as_mut()); } fn collect() { shredder::synchronize_destructors(); } } impl Multiref for rust_cc::Cc { fn new(points_to: Vec) -> Self { rust_cc::Cc::new(RustCcMultiRef { refs: Mutex::new(points_to), }) } fn apply(&self, f: impl FnOnce(&mut Vec)) { f(self.refs.lock().unwrap().as_mut()); } fn collect() { rust_cc::collect_cycles(); } } impl Multiref for tracing_rc::rc::Gc { fn new(points_to: Vec) -> Self { tracing_rc::rc::Gc::new(TracingRcUnsyncMultiRef { refs: points_to }) } fn apply(&self, f: impl FnOnce(&mut Vec)) { f(self.borrow_mut().refs.as_mut()); } fn collect() { tracing_rc::rc::collect_full(); } } impl Multiref for tracing_rc::sync::Agc { fn new(points_to: Vec) -> Self { tracing_rc::sync::Agc::new(TracingRcSyncMultiRef { refs: Mutex::new(points_to), }) } fn apply(&self, f: impl FnOnce(&mut Vec)) { f(self.read().refs.lock().unwrap().as_mut()); } fn collect() { tracing_rc::sync::collect_full(); } } impl Multiref for Rc { fn new(points_to: Vec) -> Self { Rc::new(RcMultiref { refs: Mutex::new(points_to), }) } fn apply(&self, f: impl FnOnce(&mut Vec)) { f(self.refs.lock().unwrap().as_mut()); } fn collect() {} } impl Multiref for Arc { fn new(points_to: Vec) -> Self { Arc::new(ArcMultiref { refs: Mutex::new(points_to), }) } fn apply(&self, f: impl FnOnce(&mut Vec)) { f(self.refs.lock().unwrap().as_mut()); } fn collect() {} } ================================================ FILE: dumpster_bench/src/main.rs ================================================ /* dumpster, a cycle-tracking garbage collector for Rust. Copyright (C) 2023 Clayton Ramsey. This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! Benchmarks for the `dumpster` garbage collection library. use std::{ fmt::Display, rc::Rc, sync::Arc, thread::{self, available_parallelism, scope}, time::{Duration, Instant}, }; use dumpster_bench::{ ArcMultiref, BaconRajanMultiref, DumpsterSyncMultiref, DumpsterUnsyncMultiref, GcMultiref, Multiref, RcMultiref, RustCcMultiRef, ShredderMultiref, ShredderSyncMultiref, SyncMultiref, TracingRcSyncMultiRef, TracingRcUnsyncMultiRef, }; use parking_lot::Mutex; struct BenchmarkData { name: &'static str, test: &'static str, n_threads: usize, n_ops: usize, duration: Duration, } impl Display for BenchmarkData { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, "{},{},{},{},{}", self.name, self.test, self.n_threads, self.n_ops, self.duration.as_micros() ) } } fn unsync_never_collect(_: &dumpster::unsync::CollectInfo) -> bool { false } fn sync_never_collect(_: &dumpster::sync::CollectInfo) -> bool { false } fn main() { const N_ITERS: usize = 1_000_000; for _ in 0..100 { dumpster::unsync::set_collect_condition(dumpster::unsync::default_collect_condition); println!( "{}", single_threaded::>( "dumpster (unsync)", N_ITERS, ) ); dumpster::unsync::set_collect_condition(unsync_never_collect); println!( "{}", single_threaded::>( "dumpster (unsync/manual)", N_ITERS, ) ); dumpster::sync::set_collect_condition(dumpster::sync::default_collect_condition); println!( "{}", single_threaded::>("dumpster (sync)", N_ITERS) ); dumpster::sync::set_collect_condition(sync_never_collect); println!( "{}", single_threaded::>( "dumpster (sync/manual)", N_ITERS ) ); println!("{}", single_threaded::>("gc", N_ITERS)); println!( "{}", single_threaded::>("bacon-rajan-cc", N_ITERS) ); rust_cc::config::config(|config| { config.set_auto_collect(true); }) .unwrap(); println!( "{}", single_threaded::>("rust-cc", N_ITERS) ); rust_cc::config::config(|config| { config.set_auto_collect(false); }) .unwrap(); println!( "{}", single_threaded::>("rust-cc (manual)", N_ITERS) ); println!( "{}", single_threaded::>( "tracing-rc (unsync)", N_ITERS ) ); println!( "{}", single_threaded::>( "tracing-rc (sync)", N_ITERS ) ); for n_threads in 1..=available_parallelism().unwrap().get() { // println!("--- {n_threads} threads"); dumpster::sync::set_collect_condition(dumpster::sync::default_collect_condition); println!( "{}", multi_threaded::>( "dumpster (sync)", N_ITERS, n_threads, ) ); dumpster::sync::set_collect_condition(sync_never_collect); println!( "{}", multi_threaded::>( "dumpster (sync/manual)", N_ITERS, n_threads, ) ); println!( "{}", multi_threaded::>( "tracing-rc (sync)", N_ITERS, n_threads ) ); } } for _ in 0..20 { // run fewer tests of shredder because it takes forever println!( "{}", single_threaded::>("shredder", N_ITERS) ); for n_threads in 1..=available_parallelism().unwrap().get() { println!( "{}", multi_threaded::>( "shredder", N_ITERS, n_threads ) ); } } for _ in 0..100 { println!("{}", single_threaded::>("Rc", N_ITERS)); println!("{}", single_threaded::>("Arc", N_ITERS)); for n_threads in 1..=available_parallelism().unwrap().get() { println!( "{}", multi_threaded::>("Arc", N_ITERS, n_threads) ); } } } /// Run a benchmark of a multi-threaded garbage collector. fn single_threaded(name: &'static str, n_iters: usize) -> BenchmarkData { fastrand::seed(12345); let mut gcs = (0..50).map(|_| M::new(Vec::new())).collect::>(); // println!("{name}: running..."); let tic = Instant::now(); for _n in 0..n_iters { // println!("iter {_n}"); if gcs.is_empty() { gcs.push(M::new(Vec::new())); } else { match fastrand::u8(0..4) { 0 => { // println!("create allocation"); // create new allocation gcs.push(M::new(Vec::new())); } 1 => { // println!("add reference"); // add a reference if gcs.len() > 1 { let from = fastrand::usize(0..gcs.len()); let to = fastrand::usize(0..gcs.len()); let new_gc = gcs[to].clone(); gcs[from].apply(|v| v.push(new_gc)); } } 2 => { // println!("remove gc"); // destroy a reference owned by the vector gcs.swap_remove(fastrand::usize(0..gcs.len())); } 3 => { // println!("remove reference"); // destroy a reference owned by some gc let from = fastrand::usize(0..gcs.len()); gcs[from].apply(|v| { if !v.is_empty() { let to = fastrand::usize(0..v.len()); v.swap_remove(to); } }) } _ => unreachable!(), } } } drop(gcs); M::collect(); let toc = Instant::now(); // println!("finished {name} in {:?}", (toc - tic)); BenchmarkData { name, test: "single_threaded", n_threads: 1, n_ops: n_iters, duration: toc.duration_since(tic), } } fn multi_threaded( name: &'static str, n_iters: usize, n_threads: usize, ) -> BenchmarkData { let vecs: Vec>> = (0..(n_threads * 10)) .map(|_| Mutex::new((0..50).map(|_| M::new(Vec::new())).collect())) .collect(); let tic = Mutex::new(Instant::now()); let toc = Mutex::new(Instant::now()); scope(|s| { for i in 0..n_threads { let vecs = &vecs; let tic = &tic; let toc = &toc; thread::Builder::new() .name(format!("multi_threaded{i}")) .spawn_scoped(s, move || { *tic.lock() = Instant::now(); fastrand::seed(12345 + i as u64); for _n in 0..(n_iters / n_threads) { let v1_id = fastrand::usize(0..vecs.len()); match fastrand::u8(0..4) { // create 0 => vecs[v1_id].lock().push(M::new(Vec::new())), // add ref 1 => { let v2_id = fastrand::usize(0..vecs.len()); if v1_id == v2_id { let g1 = vecs[v1_id].lock(); if g1.len() < 2 { continue; } let i1 = fastrand::usize(0..g1.len()); let i2 = fastrand::usize(0..g1.len()); let new_gc = g1[i2].clone(); g1[i1].apply(|v| v.push(new_gc)); } else { // prevent deadlock by locking lower one first let (g1, g2) = if v1_id < v2_id { (vecs[v1_id].lock(), vecs[v2_id].lock()) } else { let g2 = vecs[v2_id].lock(); (vecs[v1_id].lock(), g2) }; if g1.is_empty() || g2.is_empty() { continue; } let i1 = fastrand::usize(0..g1.len()); let i2 = fastrand::usize(0..g2.len()); let new_gc = g2[i2].clone(); g1[i1].apply(|v| v.push(new_gc)); } } // destroy gc 2 => { let mut guard = vecs[v1_id].lock(); if guard.is_empty() { continue; } let idx = fastrand::usize(0..guard.len()); guard.swap_remove(idx); } // destroy ref 3 => { let guard = vecs[v1_id].lock(); if guard.is_empty() { continue; } guard[fastrand::usize(0..guard.len())].apply(|v| { if !v.is_empty() { v.swap_remove(fastrand::usize(0..v.len())); } }); } _ => unreachable!(), }; } *toc.lock() = Instant::now(); }) .unwrap(); } }); M::collect(); // This op is single threaded and shouldn't count let duration = toc.lock().duration_since(*tic.lock()); // println!("finished {name} in {duration:?}"); BenchmarkData { name, test: "multi_threaded", n_threads, n_ops: (n_iters / n_threads) * n_threads, duration, } } ================================================ FILE: dumpster_derive/.gitignore ================================================ /target /Cargo.lock ================================================ FILE: dumpster_derive/Cargo.toml ================================================ [package] name = "dumpster_derive" version = "2.0.0" edition = "2021" license = "MPL-2.0" authors = ["Clayton Ramsey"] description = "Implementation of #[derive(Trace)] for dumpster" repository = "https://github.com/claytonwramsey/dumpster" readme = "../README.md" keywords = ["dumpster", "garbage_collector", "derive", "gc"] categories = ["memory-management", "data-structures"] [lib] proc-macro = true [dependencies] proc-macro2 = "1.0.60" quote = "1.0" syn = "2.0" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html ================================================ FILE: dumpster_derive/src/lib.rs ================================================ /* dumpster, a cycle-tracking garbage collector for Rust. Copyright (C) 2023 Clayton Ramsey. This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #![warn(clippy::pedantic)] #![warn(clippy::cargo)] #![allow(clippy::multiple_crate_versions)] use proc_macro2::{TokenStream, TokenTree}; use quote::{format_ident, quote, quote_spanned, ToTokens as _}; use syn::{ parse_macro_input, parse_quote, spanned::Spanned, Data, DeriveInput, Fields, GenericParam, Generics, Ident, Index, Path, }; #[proc_macro_derive(Trace, attributes(dumpster))] /// Derive `Trace` for a type. pub fn derive_trace(input: proc_macro::TokenStream) -> proc_macro::TokenStream { let input = parse_macro_input!(input as DeriveInput); let mut dumpster: Path = parse_quote!(::dumpster); // look for `crate` argument for attr in &input.attrs { if !attr.path().is_ident("dumpster") { continue; } let result = attr.parse_nested_meta(|meta| { if meta.path.is_ident("crate") { dumpster = meta.value()?.parse()?; Ok(()) } else { Err(meta.error("unsupported attribute")) } }); if let Err(err) = result { return err.into_compile_error().into(); } } // name of the type being implemented let name = &input.ident; // generic parameters of the type being implemented let generics = add_trait_bounds(&dumpster, input.generics); let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); let impl_generics = { let tokens = impl_generics.into_token_stream(); let param = quote! { __V: #dumpster::Visitor }; let params = if tokens.is_empty() { quote! { #param } } else { // remove the angle bracket delimiters let mut tokens: Vec = tokens.into_iter().skip(1).collect(); tokens.pop(); let tokens: TokenStream = tokens.into_iter().collect(); quote! { #param, #tokens } }; quote! { < #params > } }; let do_visitor = delegate_methods(&dumpster, name, &input.data); let generated = quote! { unsafe impl #impl_generics #dumpster::TraceWith<__V> for #name #ty_generics #where_clause { #[inline] fn accept(&self, visitor: &mut __V) -> ::core::result::Result<(), ()> { #do_visitor } } }; generated.into() } /// Collect the trait bounds for some generic expression. fn add_trait_bounds(dumpster: &Path, mut generics: Generics) -> Generics { for param in &mut generics.params { if let GenericParam::Type(ref mut type_param) = *param { type_param .bounds .push(parse_quote!(#dumpster::TraceWith<__V>)); } } generics } #[allow(clippy::too_many_lines)] /// Generate method implementations for [`Trace`] for some data type. fn delegate_methods(dumpster: &Path, name: &Ident, data: &Data) -> TokenStream { match data { Data::Struct(data) => match data.fields { Fields::Named(ref f) => { let delegate_visit = f.named.iter().map(|f| { let name = &f.ident; quote_spanned! {f.span() => #dumpster::TraceWith::accept( &self.#name, visitor )?; } }); quote! { #(#delegate_visit)* ::core::result::Result::Ok(()) } } Fields::Unnamed(ref f) => { let delegate_visit = f.unnamed.iter().enumerate().map(|(i, f)| { let index = Index::from(i); quote_spanned! {f.span() => #dumpster::TraceWith::accept( &self.#index, visitor )?; } }); quote! { #(#delegate_visit)* ::core::result::Result::Ok(()) } } Fields::Unit => quote! { ::core::result::Result::Ok(()) }, }, Data::Enum(e) => { let mut delegate_visit = TokenStream::new(); for var in &e.variants { let var_name = &var.ident; match &var.fields { Fields::Named(n) => { let mut binding = TokenStream::new(); let mut execution_visit = TokenStream::new(); for (i, name) in n.named.iter().enumerate() { let field_name = format_ident!("field{i}"); let field_ident = name.ident.as_ref().unwrap(); if i == 0 { binding.extend(quote! { #field_ident: #field_name }); } else { binding.extend(quote! { , #field_ident: #field_name }); } execution_visit.extend(quote! { #dumpster::TraceWith::accept( #field_name, visitor )?; }); } delegate_visit.extend( quote! {#name::#var_name{#binding} => {#execution_visit ::core::result::Result::Ok(())},}, ); } Fields::Unnamed(u) => { let mut binding = TokenStream::new(); let mut execution_visit = TokenStream::new(); for (i, _) in u.unnamed.iter().enumerate() { let field_name = format_ident!("field{i}"); if i == 0 { binding.extend(quote! { #field_name }); } else { binding.extend(quote! { , #field_name }); } execution_visit.extend(quote! { #dumpster::TraceWith::accept( #field_name, visitor )?; }); } delegate_visit.extend( quote! {#name::#var_name(#binding) => {#execution_visit ::core::result::Result::Ok(())},}, ); } Fields::Unit => { delegate_visit .extend(quote! {#name::#var_name => ::core::result::Result::Ok(()),}); } } } quote! {match self {#delegate_visit}} } Data::Union(u) => { quote_spanned! { u.union_token.span => compile_error!("`Trace` must be manually implemented for unions"); } } } } ================================================ FILE: dumpster_test/.gitignore ================================================ /target /Cargo.lock ================================================ FILE: dumpster_test/Cargo.toml ================================================ [package] name = "dumpster_test" version = "0.1.0" edition = "2021" license = "MPL-2.0" authors = ["Clayton Ramsey"] description = "Tests for dumpster garbage collection crate" repository = "https://github.com/claytonwramsey/dumpster" readme = "../README.md" keywords = ["dumpster", "garbage_collector", "test"] categories = ["data-structures", "memory-management"] publish = false [dev-dependencies] dumpster = { path = "../dumpster" } dumpster_derive = { path = "../dumpster_derive" } ================================================ FILE: dumpster_test/src/lib.rs ================================================ /* dumpster, a cycle-tracking garbage collector for Rust. Copyright (C) 2023 Clayton Ramsey. This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #![warn(clippy::pedantic)] #![warn(clippy::cargo)] #![cfg(test)] use std::{ cell::RefCell, sync::atomic::{AtomicU8, AtomicUsize, Ordering}, }; use dumpster::unsync::{collect, Gc}; use dumpster_derive::Trace; #[derive(Trace)] struct Empty; #[derive(Trace)] #[allow(dead_code)] struct UnitTuple(); #[derive(Trace)] struct MultiRef { counter: &'static AtomicUsize, pointers: RefCell>>, } #[derive(Trace)] #[allow(unused)] enum Refs { None, One(Gc), Many { refs: Vec> }, } #[derive(Trace)] #[allow(unused)] enum A { None, } #[derive(Trace)] #[allow(unused)] enum B { One(Gc), } #[derive(Trace)] #[allow(unused)] struct Generic { value: T, } impl Drop for MultiRef { fn drop(&mut self) { self.counter.fetch_add(1, Ordering::Relaxed); } } #[test] fn unit() { static DROP_COUNT: AtomicU8 = AtomicU8::new(0); #[derive(Trace)] struct DropCount; impl Drop for DropCount { fn drop(&mut self) { DROP_COUNT.fetch_add(1, Ordering::Relaxed); } } let gc1 = Gc::new(DropCount); let gc2 = Gc::clone(&gc1); drop(gc1); assert_eq!(DROP_COUNT.load(Ordering::Relaxed), 0); drop(gc2); assert_eq!(DROP_COUNT.load(Ordering::Relaxed), 1); } #[test] fn self_referential() { static COUNT: AtomicUsize = AtomicUsize::new(0); let gc1 = Gc::new(MultiRef { counter: &COUNT, pointers: RefCell::new(Vec::new()), }); gc1.pointers.borrow_mut().push(Gc::clone(&gc1)); assert_eq!(COUNT.load(Ordering::Relaxed), 0); drop(gc1); collect(); assert_eq!(COUNT.load(Ordering::Relaxed), 1); } #[test] fn double_loop() { static COUNT: AtomicUsize = AtomicUsize::new(0); let gc1 = Gc::new(MultiRef { counter: &COUNT, pointers: RefCell::new(Vec::new()), }); gc1.pointers .borrow_mut() .extend([Gc::clone(&gc1), Gc::clone(&gc1)]); assert_eq!(COUNT.load(Ordering::Relaxed), 0); drop(gc1); collect(); assert_eq!(COUNT.load(Ordering::Relaxed), 1); } #[test] fn parallel_loop() { static COUNT_1: AtomicUsize = AtomicUsize::new(0); static COUNT_2: AtomicUsize = AtomicUsize::new(0); static COUNT_3: AtomicUsize = AtomicUsize::new(0); static COUNT_4: AtomicUsize = AtomicUsize::new(0); let gc1 = Gc::new(MultiRef { counter: &COUNT_1, pointers: RefCell::new(Vec::new()), }); let gc2 = Gc::new(MultiRef { counter: &COUNT_2, pointers: RefCell::new(vec![Gc::clone(&gc1)]), }); let gc3 = Gc::new(MultiRef { counter: &COUNT_3, pointers: RefCell::new(vec![Gc::clone(&gc1)]), }); let gc4 = Gc::new(MultiRef { counter: &COUNT_4, pointers: RefCell::new(vec![Gc::clone(&gc2), Gc::clone(&gc3)]), }); gc1.pointers.borrow_mut().push(Gc::clone(&gc4)); drop(gc1); drop(gc2); drop(gc3); assert_eq!(COUNT_1.load(Ordering::Relaxed), 0); assert_eq!(COUNT_2.load(Ordering::Relaxed), 0); assert_eq!(COUNT_3.load(Ordering::Relaxed), 0); assert_eq!(COUNT_4.load(Ordering::Relaxed), 0); drop(gc4); collect(); assert_eq!(COUNT_1.load(Ordering::Relaxed), 1); assert_eq!(COUNT_2.load(Ordering::Relaxed), 1); assert_eq!(COUNT_3.load(Ordering::Relaxed), 1); assert_eq!(COUNT_4.load(Ordering::Relaxed), 1); } #[test] #[allow(clippy::similar_names)] fn unsync_as_ptr() { #[derive(Trace)] struct B(Gc); let empty = Gc::new(Empty); let empty_a = Gc::clone(&empty); let empty_ptr = Gc::as_ptr(&empty); assert_eq!(empty_ptr, Gc::as_ptr(&empty_a)); let b = B(Gc::clone(&empty)); assert_eq!(empty_ptr, Gc::as_ptr(&b.0)); let bb = Gc::new(B(Gc::clone(&empty))); assert_eq!(empty_ptr, Gc::as_ptr(&bb.0)); let empty2 = Gc::new(Empty); let empty2_ptr = Gc::as_ptr(&empty2); assert_ne!(empty_ptr, empty2_ptr); let b2 = Gc::new(B(Gc::clone(&empty2))); assert_eq!(empty2_ptr, Gc::as_ptr(&b2.0)); assert_ne!(empty_ptr, Gc::as_ptr(&b2.0)); assert_ne!(Gc::as_ptr(&b.0), Gc::as_ptr(&b2.0)); assert_ne!(Gc::as_ptr(&b.0), empty2_ptr); } ================================================ FILE: rustfmt.toml ================================================ newline_style = "Unix" wrap_comments = true comment_width = 100 format_code_in_doc_comments = true imports_granularity = "Crate"