[
  {
    "path": ".github/workflows/release-plz.yml",
    "content": "name: Release-plz\n\non:\n  push:\n    branches:\n      - master\n\njobs:\n  release-plz-release:\n    name: Release-plz release\n    runs-on: ubuntu-latest\n    if: ${{ github.repository_owner == 'Amanieu' }}\n    permissions:\n      contents: write\n    steps:\n      - name: Checkout repository\n        uses: actions/checkout@v4\n        with:\n          fetch-depth: 0\n      - name: Install Rust toolchain\n        uses: dtolnay/rust-toolchain@stable\n      - name: Run release-plz\n        uses: release-plz/action@v0.5\n        with:\n          command: release\n        env:\n          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n          CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}\n\n  release-plz-pr:\n    name: Release-plz PR\n    runs-on: ubuntu-latest\n    if: ${{ github.repository_owner == 'Amanieu' }}\n    permissions:\n      pull-requests: write\n      contents: write\n    concurrency:\n      group: release-plz-${{ github.ref }}\n      cancel-in-progress: false\n    steps:\n      - name: Checkout repository\n        uses: actions/checkout@v4\n        with:\n          fetch-depth: 0\n      - name: Install Rust toolchain\n        uses: dtolnay/rust-toolchain@stable\n      - name: Run release-plz\n        uses: release-plz/action@v0.5\n        with:\n          command: release-pr\n        env:\n          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n          CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}\n"
  },
  {
    "path": ".gitignore",
    "content": "target\nCargo.lock\n"
  },
  {
    "path": ".travis.yml",
    "content": "language: rust\nsudo: false\n\nrust:\n- nightly\n- beta\n- stable\n- 1.45.0\n\nscript:\n- cargo build\n- cargo test\n- cargo doc\n- if [ $TRAVIS_RUST_VERSION = nightly ]; then rustup target add aarch64-unknown-none; fi\n- if [ $TRAVIS_RUST_VERSION = nightly ]; then RUSTFLAGS=\"-Zcrate-attr=feature(integer_atomics)\" cargo check --target=aarch64-unknown-none; fi\n\nnotifications:\n  email: false\n"
  },
  {
    "path": "CHANGELOG.md",
    "content": "# Changelog\n\nAll notable changes to this project will be documented in this file.\n\nThe format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),\nand this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).\n\n## [Unreleased]\n\n## [0.6.1](https://github.com/Amanieu/atomic-rs/compare/v0.6.0...v0.6.1) - 2025-06-19\n\n### Other\n\n- Implement (de)serialization with `serde`.\n"
  },
  {
    "path": "Cargo.toml",
    "content": "[package]\nname = \"atomic\"\nversion = \"0.6.1\"\nedition = \"2018\"\nauthors = [\"Amanieu d'Antras <amanieu@gmail.com>\"]\ndescription = \"Generic Atomic<T> wrapper type\"\nlicense = \"Apache-2.0/MIT\"\nrepository = \"https://github.com/Amanieu/atomic-rs\"\nreadme = \"README.md\"\nkeywords = [\"atomic\", \"no_std\"]\n\n[features]\ndefault = [\"fallback\"]\nstd = []\nfallback = []\nnightly = []\nserde = [\"dep:serde\"]\n\n[dependencies]\nbytemuck = \"1.13.1\"\nserde = { version = \"1.0.219\", default-features = false, optional = true }\n\n[dev-dependencies]\nbytemuck = { version = \"1.13.1\", features = [\"derive\"] }\nserde = { version = \"1.0.219\", default-features = false, features = [\"derive\"] }\nserde_json = { version = \"1.0.140\" }\n"
  },
  {
    "path": "LICENSE-APACHE",
    "content": "                              Apache License\n                        Version 2.0, January 2004\n                     http://www.apache.org/licenses/\n\nTERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n1. Definitions.\n\n   \"License\" shall mean the terms and conditions for use, reproduction,\n   and distribution as defined by Sections 1 through 9 of this document.\n\n   \"Licensor\" shall mean the copyright owner or entity authorized by\n   the copyright owner that is granting the License.\n\n   \"Legal Entity\" shall mean the union of the acting entity and all\n   other entities that control, are controlled by, or are under common\n   control with that entity. For the purposes of this definition,\n   \"control\" means (i) the power, direct or indirect, to cause the\n   direction or management of such entity, whether by contract or\n   otherwise, or (ii) ownership of fifty percent (50%) or more of the\n   outstanding shares, or (iii) beneficial ownership of such entity.\n\n   \"You\" (or \"Your\") shall mean an individual or Legal Entity\n   exercising permissions granted by this License.\n\n   \"Source\" form shall mean the preferred form for making modifications,\n   including but not limited to software source code, documentation\n   source, and configuration files.\n\n   \"Object\" form shall mean any form resulting from mechanical\n   transformation or translation of a Source form, including but\n   not limited to compiled object code, generated documentation,\n   and conversions to other media types.\n\n   \"Work\" shall mean the work of authorship, whether in Source or\n   Object form, made available under the License, as indicated by a\n   copyright notice that is included in or attached to the work\n   (an example is provided in the Appendix below).\n\n   \"Derivative Works\" shall mean any work, whether in Source or Object\n   form, that is based on (or derived from) the Work and for which the\n   editorial revisions, annotations, elaborations, or other modifications\n   represent, as a whole, an original work of authorship. For the purposes\n   of this License, Derivative Works shall not include works that remain\n   separable from, or merely link (or bind by name) to the interfaces of,\n   the Work and Derivative Works thereof.\n\n   \"Contribution\" shall mean any work of authorship, including\n   the original version of the Work and any modifications or additions\n   to that Work or Derivative Works thereof, that is intentionally\n   submitted to Licensor for inclusion in the Work by the copyright owner\n   or by an individual or Legal Entity authorized to submit on behalf of\n   the copyright owner. For the purposes of this definition, \"submitted\"\n   means any form of electronic, verbal, or written communication sent\n   to the Licensor or its representatives, including but not limited to\n   communication on electronic mailing lists, source code control systems,\n   and issue tracking systems that are managed by, or on behalf of, the\n   Licensor for the purpose of discussing and improving the Work, but\n   excluding communication that is conspicuously marked or otherwise\n   designated in writing by the copyright owner as \"Not a Contribution.\"\n\n   \"Contributor\" shall mean Licensor and any individual or Legal Entity\n   on behalf of whom a Contribution has been received by Licensor and\n   subsequently incorporated within the Work.\n\n2. Grant of Copyright License. Subject to the terms and conditions of\n   this License, each Contributor hereby grants to You a perpetual,\n   worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n   copyright license to reproduce, prepare Derivative Works of,\n   publicly display, publicly perform, sublicense, and distribute the\n   Work and such Derivative Works in Source or Object form.\n\n3. Grant of Patent License. Subject to the terms and conditions of\n   this License, each Contributor hereby grants to You a perpetual,\n   worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n   (except as stated in this section) patent license to make, have made,\n   use, offer to sell, sell, import, and otherwise transfer the Work,\n   where such license applies only to those patent claims licensable\n   by such Contributor that are necessarily infringed by their\n   Contribution(s) alone or by combination of their Contribution(s)\n   with the Work to which such Contribution(s) was submitted. If You\n   institute patent litigation against any entity (including a\n   cross-claim or counterclaim in a lawsuit) alleging that the Work\n   or a Contribution incorporated within the Work constitutes direct\n   or contributory patent infringement, then any patent licenses\n   granted to You under this License for that Work shall terminate\n   as of the date such litigation is filed.\n\n4. Redistribution. You may reproduce and distribute copies of the\n   Work or Derivative Works thereof in any medium, with or without\n   modifications, and in Source or Object form, provided that You\n   meet the following conditions:\n\n   (a) You must give any other recipients of the Work or\n       Derivative Works a copy of this License; and\n\n   (b) You must cause any modified files to carry prominent notices\n       stating that You changed the files; and\n\n   (c) You must retain, in the Source form of any Derivative Works\n       that You distribute, all copyright, patent, trademark, and\n       attribution notices from the Source form of the Work,\n       excluding those notices that do not pertain to any part of\n       the Derivative Works; and\n\n   (d) If the Work includes a \"NOTICE\" text file as part of its\n       distribution, then any Derivative Works that You distribute must\n       include a readable copy of the attribution notices contained\n       within such NOTICE file, excluding those notices that do not\n       pertain to any part of the Derivative Works, in at least one\n       of the following places: within a NOTICE text file distributed\n       as part of the Derivative Works; within the Source form or\n       documentation, if provided along with the Derivative Works; or,\n       within a display generated by the Derivative Works, if and\n       wherever such third-party notices normally appear. The contents\n       of the NOTICE file are for informational purposes only and\n       do not modify the License. You may add Your own attribution\n       notices within Derivative Works that You distribute, alongside\n       or as an addendum to the NOTICE text from the Work, provided\n       that such additional attribution notices cannot be construed\n       as modifying the License.\n\n   You may add Your own copyright statement to Your modifications and\n   may provide additional or different license terms and conditions\n   for use, reproduction, or distribution of Your modifications, or\n   for any such Derivative Works as a whole, provided Your use,\n   reproduction, and distribution of the Work otherwise complies with\n   the conditions stated in this License.\n\n5. Submission of Contributions. Unless You explicitly state otherwise,\n   any Contribution intentionally submitted for inclusion in the Work\n   by You to the Licensor shall be under the terms and conditions of\n   this License, without any additional terms or conditions.\n   Notwithstanding the above, nothing herein shall supersede or modify\n   the terms of any separate license agreement you may have executed\n   with Licensor regarding such Contributions.\n\n6. Trademarks. This License does not grant permission to use the trade\n   names, trademarks, service marks, or product names of the Licensor,\n   except as required for reasonable and customary use in describing the\n   origin of the Work and reproducing the content of the NOTICE file.\n\n7. Disclaimer of Warranty. Unless required by applicable law or\n   agreed to in writing, Licensor provides the Work (and each\n   Contributor provides its Contributions) on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n   implied, including, without limitation, any warranties or conditions\n   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n   PARTICULAR PURPOSE. You are solely responsible for determining the\n   appropriateness of using or redistributing the Work and assume any\n   risks associated with Your exercise of permissions under this License.\n\n8. Limitation of Liability. In no event and under no legal theory,\n   whether in tort (including negligence), contract, or otherwise,\n   unless required by applicable law (such as deliberate and grossly\n   negligent acts) or agreed to in writing, shall any Contributor be\n   liable to You for damages, including any direct, indirect, special,\n   incidental, or consequential damages of any character arising as a\n   result of this License or out of the use or inability to use the\n   Work (including but not limited to damages for loss of goodwill,\n   work stoppage, computer failure or malfunction, or any and all\n   other commercial damages or losses), even if such Contributor\n   has been advised of the possibility of such damages.\n\n9. Accepting Warranty or Additional Liability. While redistributing\n   the Work or Derivative Works thereof, You may choose to offer,\n   and charge a fee for, acceptance of support, warranty, indemnity,\n   or other liability obligations and/or rights consistent with this\n   License. However, in accepting such obligations, You may act only\n   on Your own behalf and on Your sole responsibility, not on behalf\n   of any other Contributor, and only if You agree to indemnify,\n   defend, and hold each Contributor harmless for any liability\n   incurred by, or claims asserted against, such Contributor by reason\n   of your accepting any such warranty or additional liability.\n\nEND OF TERMS AND CONDITIONS\n\nAPPENDIX: How to apply the Apache License to your work.\n\n   To apply the Apache License to your work, attach the following\n   boilerplate notice, with the fields enclosed by brackets \"[]\"\n   replaced with your own identifying information. (Don't include\n   the brackets!)  The text should be enclosed in the appropriate\n   comment syntax for the file format. We also recommend that a\n   file or class name and description of purpose be included on the\n   same \"printed page\" as the copyright notice for easier\n   identification within third-party archives.\n\nCopyright [yyyy] [name of copyright owner]\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n\thttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n"
  },
  {
    "path": "LICENSE-MIT",
    "content": "Copyright (c) 2016 The Rust Project Developers\n\nPermission is hereby granted, free of charge, to any\nperson obtaining a copy of this software and associated\ndocumentation files (the \"Software\"), to deal in the\nSoftware without restriction, including without\nlimitation the rights to use, copy, modify, merge,\npublish, distribute, sublicense, and/or sell copies of\nthe Software, and to permit persons to whom the Software\nis furnished to do so, subject to the following\nconditions:\n\nThe above copyright notice and this permission notice\nshall be included in all copies or substantial portions\nof the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF\nANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED\nTO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\nPARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\nSHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\nCLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\nOF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR\nIN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n"
  },
  {
    "path": "README.md",
    "content": "Generic `Atomic<T>` for Rust\n============================\n\n[![Build Status](https://travis-ci.org/Amanieu/atomic-rs.svg?branch=master)](https://travis-ci.org/Amanieu/atomic-rs) [![Crates.io](https://img.shields.io/crates/v/atomic.svg)](https://crates.io/crates/atomic)\n\nA Rust library which provides a generic `Atomic<T>` type for all `T: NoUninit` types, unlike the standard library which only provides a few fixed atomic types (`AtomicBool`, `AtomicIsize`, `AtomicUsize`, `AtomicPtr`). The `NoUninit` bound is from the [bytemuck] crate, and indicates that a type has no internal padding bytes. You will need to derive or implement this trait for all types used with `Atomic<T>`.\n\nThis library will use native atomic instructions if possible, and will otherwise fall back to a lock-based mechanism. You can use the `Atomic::<T>::is_lock_free()` function to check whether native atomic operations are supported for a given type. Note that a type must have a power-of-2 size and alignment in order to be used by native atomic instructions.\n\nThis crate uses `#![no_std]` and only depends on libcore.\n\n[bytemuck]: https://docs.rs/bytemuck\n\n[Documentation](https://docs.rs/atomic)\n\n## Features\n\nThis crate has the following [Cargo\nfeatures](https://doc.rust-lang.org/cargo/reference/features.html):\n\n* `fallback`: Fall back to locks when atomic instructions cannot be\n  used. (Enabled by default.)\n* `serde`: Enables serialization and serialization of `Atomic<T>` with\n  [serde](https://docs.rs/serde/latest/serde/).\n\n## Usage\n\nAdd this to your `Cargo.toml`:\n\n```toml\n[dependencies]\natomic = \"0.6\"\n```\n\nand this to your crate root:\n\n```rust\nextern crate atomic;\n```\n\n## License\n\nLicensed under either of\n\n * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)\n * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)\n\nat your option.\n\n### Contribution\n\nUnless you explicitly state otherwise, any contribution intentionally submitted\nfor inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any\nadditional terms or conditions.\n"
  },
  {
    "path": "src/fallback.rs",
    "content": "// Copyright 2016 Amanieu d'Antras\n//\n// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or\n// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or\n// http://opensource.org/licenses/MIT>, at your option. This file may not be\n// copied, modified, or distributed except according to those terms.\n\nuse core::cmp;\nuse core::hint;\nuse core::num::Wrapping;\nuse core::ops;\nuse core::ptr;\nuse core::sync::atomic::{AtomicUsize, Ordering};\n\nuse bytemuck::NoUninit;\n\n// We use an AtomicUsize instead of an AtomicBool because it performs better\n// on architectures that don't have byte-sized atomics.\n//\n// We give each spinlock its own cache line to avoid false sharing.\n#[repr(align(64))]\nstruct SpinLock(AtomicUsize);\n\nimpl SpinLock {\n    fn lock(&self) {\n        while self\n            .0\n            .compare_exchange_weak(0, 1, Ordering::Acquire, Ordering::Relaxed)\n            .is_err()\n        {\n            while self.0.load(Ordering::Relaxed) != 0 {\n                hint::spin_loop();\n            }\n        }\n    }\n\n    fn unlock(&self) {\n        self.0.store(0, Ordering::Release);\n    }\n}\n\n// A big array of spinlocks which we use to guard atomic accesses. A spinlock is\n// chosen based on a hash of the address of the atomic object, which helps to\n// reduce contention compared to a single global lock.\nmacro_rules! array {\n    (@accum (0, $($_es:expr),*) -> ($($body:tt)*))\n        => {array!(@as_expr [$($body)*])};\n    (@accum (1, $($es:expr),*) -> ($($body:tt)*))\n        => {array!(@accum (0, $($es),*) -> ($($body)* $($es,)*))};\n    (@accum (2, $($es:expr),*) -> ($($body:tt)*))\n        => {array!(@accum (0, $($es),*) -> ($($body)* $($es,)* $($es,)*))};\n    (@accum (4, $($es:expr),*) -> ($($body:tt)*))\n        => {array!(@accum (2, $($es,)* $($es),*) -> ($($body)*))};\n    (@accum (8, $($es:expr),*) -> ($($body:tt)*))\n        => {array!(@accum (4, $($es,)* $($es),*) -> ($($body)*))};\n    (@accum (16, $($es:expr),*) -> ($($body:tt)*))\n        => {array!(@accum (8, $($es,)* $($es),*) -> ($($body)*))};\n    (@accum (32, $($es:expr),*) -> ($($body:tt)*))\n        => {array!(@accum (16, $($es,)* $($es),*) -> ($($body)*))};\n    (@accum (64, $($es:expr),*) -> ($($body:tt)*))\n        => {array!(@accum (32, $($es,)* $($es),*) -> ($($body)*))};\n\n    (@as_expr $e:expr) => {$e};\n\n    [$e:expr; $n:tt] => { array!(@accum ($n, $e) -> ()) };\n}\nstatic SPINLOCKS: [SpinLock; 64] = array![SpinLock(AtomicUsize::new(0)); 64];\n\n// Spinlock pointer hashing function from compiler-rt\n#[inline]\nfn lock_for_addr(addr: usize) -> &'static SpinLock {\n    // Disregard the lowest 4 bits.  We want all values that may be part of the\n    // same memory operation to hash to the same value and therefore use the same\n    // lock.\n    let mut hash = addr >> 4;\n    // Use the next bits as the basis for the hash\n    let low = hash & (SPINLOCKS.len() - 1);\n    // Now use the high(er) set of bits to perturb the hash, so that we don't\n    // get collisions from atomic fields in a single object\n    hash >>= 16;\n    hash ^= low;\n    // Return a pointer to the lock to use\n    &SPINLOCKS[hash & (SPINLOCKS.len() - 1)]\n}\n\n#[inline]\nfn lock(addr: usize) -> LockGuard {\n    let lock = lock_for_addr(addr);\n    lock.lock();\n    LockGuard(lock)\n}\n\nstruct LockGuard(&'static SpinLock);\nimpl Drop for LockGuard {\n    #[inline]\n    fn drop(&mut self) {\n        self.0.unlock();\n    }\n}\n\n#[inline]\npub unsafe fn atomic_load<T>(dst: *mut T) -> T {\n    let _l = lock(dst as usize);\n    ptr::read(dst)\n}\n\n#[inline]\npub unsafe fn atomic_store<T>(dst: *mut T, val: T) {\n    let _l = lock(dst as usize);\n    ptr::write(dst, val);\n}\n\n#[inline]\npub unsafe fn atomic_swap<T>(dst: *mut T, val: T) -> T {\n    let _l = lock(dst as usize);\n    ptr::replace(dst, val)\n}\n\n#[inline]\npub unsafe fn atomic_compare_exchange<T: NoUninit>(\n    dst: *mut T,\n    current: T,\n    new: T,\n) -> Result<T, T> {\n    let _l = lock(dst as usize);\n    let result = ptr::read(dst);\n    // compare_exchange compares with memcmp instead of Eq\n    let a = bytemuck::bytes_of(&result);\n    let b = bytemuck::bytes_of(&current);\n    if a == b {\n        ptr::write(dst, new);\n        Ok(result)\n    } else {\n        Err(result)\n    }\n}\n\n#[inline]\npub unsafe fn atomic_add<T: Copy>(dst: *mut T, val: T) -> T\nwhere\n    Wrapping<T>: ops::Add<Output = Wrapping<T>>,\n{\n    let _l = lock(dst as usize);\n    let result = ptr::read(dst);\n    ptr::write(dst, (Wrapping(result) + Wrapping(val)).0);\n    result\n}\n\n#[inline]\npub unsafe fn atomic_sub<T: Copy>(dst: *mut T, val: T) -> T\nwhere\n    Wrapping<T>: ops::Sub<Output = Wrapping<T>>,\n{\n    let _l = lock(dst as usize);\n    let result = ptr::read(dst);\n    ptr::write(dst, (Wrapping(result) - Wrapping(val)).0);\n    result\n}\n\n#[inline]\npub unsafe fn atomic_and<T: Copy + ops::BitAnd<Output = T>>(dst: *mut T, val: T) -> T {\n    let _l = lock(dst as usize);\n    let result = ptr::read(dst);\n    ptr::write(dst, result & val);\n    result\n}\n\n#[inline]\npub unsafe fn atomic_or<T: Copy + ops::BitOr<Output = T>>(dst: *mut T, val: T) -> T {\n    let _l = lock(dst as usize);\n    let result = ptr::read(dst);\n    ptr::write(dst, result | val);\n    result\n}\n\n#[inline]\npub unsafe fn atomic_xor<T: Copy + ops::BitXor<Output = T>>(dst: *mut T, val: T) -> T {\n    let _l = lock(dst as usize);\n    let result = ptr::read(dst);\n    ptr::write(dst, result ^ val);\n    result\n}\n\n#[inline]\npub unsafe fn atomic_min<T: Copy + cmp::Ord>(dst: *mut T, val: T) -> T {\n    let _l = lock(dst as usize);\n    let result = ptr::read(dst);\n    ptr::write(dst, cmp::min(result, val));\n    result\n}\n\n#[inline]\npub unsafe fn atomic_max<T: Copy + cmp::Ord>(dst: *mut T, val: T) -> T {\n    let _l = lock(dst as usize);\n    let result = ptr::read(dst);\n    ptr::write(dst, cmp::max(result, val));\n    result\n}\n"
  },
  {
    "path": "src/lib.rs",
    "content": "// Copyright 2016 Amanieu d'Antras\n//\n// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or\n// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or\n// http://opensource.org/licenses/MIT>, at your option. This file may not be\n// copied, modified, or distributed except according to those terms.\n\n//! Generic `Atomic<T>` wrapper type\n//!\n//! Atomic types provide primitive shared-memory communication between\n//! threads, and are the building blocks of other concurrent types.\n//!\n//! This library defines a generic atomic wrapper type `Atomic<T>` for all\n//! `T: NoUninit` types.\n//! Atomic types present operations that, when used correctly, synchronize\n//! updates between threads.\n//!\n//! The `NoUninit` bound is from the [bytemuck] crate, and indicates that a\n//! type has no internal padding bytes. You will need to derive or implement\n//! this trait for all types used with `Atomic<T>`.\n//!\n//! Each method takes an `Ordering` which represents the strength of\n//! the memory barrier for that operation. These orderings are the\n//! same as [LLVM atomic orderings][1].\n//!\n//! [1]: http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations\n//!\n//! Atomic variables are safe to share between threads (they implement `Sync`)\n//! but they do not themselves provide the mechanism for sharing. The most\n//! common way to share an atomic variable is to put it into an `Arc` (an\n//! atomically-reference-counted shared pointer).\n//!\n//! Most atomic types may be stored in static variables, initialized using\n//! the `const fn` constructors. Atomic statics are often used for lazy global\n//! initialization.\n//!\n//! [bytemuck]: https://docs.rs/bytemuck\n\n#![warn(missing_docs)]\n#![warn(rust_2018_idioms)]\n#![no_std]\n#![cfg_attr(feature = \"nightly\", feature(integer_atomics))]\n\n#[cfg(any(test, feature = \"std\"))]\n#[macro_use]\nextern crate std;\n\nuse core::mem::MaybeUninit;\n// Re-export some useful definitions from libcore\npub use core::sync::atomic::{fence, Ordering};\n\nuse core::cell::UnsafeCell;\nuse core::fmt;\n\n#[cfg(feature = \"std\")]\nuse std::panic::RefUnwindSafe;\n\nuse bytemuck::NoUninit;\n\n#[cfg(feature = \"fallback\")]\nmod fallback;\nmod ops;\n\n/// A generic atomic wrapper type which allows an object to be safely shared\n/// between threads.\n#[repr(transparent)]\npub struct Atomic<T> {\n    // The MaybeUninit is here to work around rust-lang/rust#87341.\n    v: UnsafeCell<MaybeUninit<T>>,\n}\n\n// Atomic<T> is only Sync if T is Send\nunsafe impl<T: Copy + Send> Sync for Atomic<T> {}\n\n// Given that atomicity is guaranteed, Atomic<T> is RefUnwindSafe if T is\n//\n// This is trivially correct for native lock-free atomic types. For those whose\n// atomicity is emulated using a spinlock, it is still correct because the\n// `Atomic` API does not allow doing any panic-inducing operation after writing\n// to the target object.\n#[cfg(feature = \"std\")]\nimpl<T: RefUnwindSafe> RefUnwindSafe for Atomic<T> {}\n\nimpl<T: Default> Default for Atomic<T> {\n    #[inline]\n    fn default() -> Self {\n        Self::new(Default::default())\n    }\n}\n\nimpl<T: NoUninit + fmt::Debug> fmt::Debug for Atomic<T> {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        f.debug_tuple(\"Atomic\")\n            .field(&self.load(Ordering::Relaxed))\n            .finish()\n    }\n}\n\nimpl<T> Atomic<T> {\n    /// Creates a new `Atomic`.\n    #[inline]\n    pub const fn new(v: T) -> Atomic<T> {\n        Atomic {\n            v: UnsafeCell::new(MaybeUninit::new(v)),\n        }\n    }\n\n    /// Checks if `Atomic` objects of this type are lock-free.\n    ///\n    /// If an `Atomic` is not lock-free then it may be implemented using locks\n    /// internally, which makes it unsuitable for some situations (such as\n    /// communicating with a signal handler).\n    #[inline]\n    pub const fn is_lock_free() -> bool {\n        ops::atomic_is_lock_free::<T>()\n    }\n}\n\nimpl<T: NoUninit> Atomic<T> {\n    #[inline]\n    fn inner_ptr(&self) -> *mut T {\n        self.v.get() as *mut T\n    }\n\n    /// Returns a mutable reference to the underlying type.\n    ///\n    /// This is safe because the mutable reference guarantees that no other threads are\n    /// concurrently accessing the atomic data.\n    #[inline]\n    pub fn get_mut(&mut self) -> &mut T {\n        unsafe { &mut *self.inner_ptr() }\n    }\n\n    /// Consumes the atomic and returns the contained value.\n    ///\n    /// This is safe because passing `self` by value guarantees that no other threads are\n    /// concurrently accessing the atomic data.\n    #[inline]\n    pub fn into_inner(self) -> T {\n        unsafe { self.v.into_inner().assume_init() }\n    }\n\n    /// Loads a value from the `Atomic`.\n    ///\n    /// `load` takes an `Ordering` argument which describes the memory ordering\n    /// of this operation.\n    ///\n    /// # Panics\n    ///\n    /// Panics if `order` is `Release` or `AcqRel`.\n    #[inline]\n    pub fn load(&self, order: Ordering) -> T {\n        unsafe { ops::atomic_load(self.inner_ptr(), order) }\n    }\n\n    /// Stores a value into the `Atomic`.\n    ///\n    /// `store` takes an `Ordering` argument which describes the memory ordering\n    /// of this operation.\n    ///\n    /// # Panics\n    ///\n    /// Panics if `order` is `Acquire` or `AcqRel`.\n    #[inline]\n    pub fn store(&self, val: T, order: Ordering) {\n        unsafe {\n            ops::atomic_store(self.inner_ptr(), val, order);\n        }\n    }\n\n    /// Stores a value into the `Atomic`, returning the old value.\n    ///\n    /// `swap` takes an `Ordering` argument which describes the memory ordering\n    /// of this operation.\n    #[inline]\n    pub fn swap(&self, val: T, order: Ordering) -> T {\n        unsafe { ops::atomic_swap(self.inner_ptr(), val, order) }\n    }\n\n    /// Stores a value into the `Atomic` if the current value is the same as the\n    /// `current` value.\n    ///\n    /// The return value is a result indicating whether the new value was\n    /// written and containing the previous value. On success this value is\n    /// guaranteed to be equal to `new`.\n    ///\n    /// `compare_exchange` takes two `Ordering` arguments to describe the memory\n    /// ordering of this operation. The first describes the required ordering if\n    /// the operation succeeds while the second describes the required ordering\n    /// when the operation fails. The failure ordering can't be `Release` or\n    /// `AcqRel` and must be equivalent or weaker than the success ordering.\n    #[inline]\n    pub fn compare_exchange(\n        &self,\n        current: T,\n        new: T,\n        success: Ordering,\n        failure: Ordering,\n    ) -> Result<T, T> {\n        unsafe { ops::atomic_compare_exchange(self.inner_ptr(), current, new, success, failure) }\n    }\n\n    /// Stores a value into the `Atomic` if the current value is the same as the\n    /// `current` value.\n    ///\n    /// Unlike `compare_exchange`, this function is allowed to spuriously fail\n    /// even when the comparison succeeds, which can result in more efficient\n    /// code on some platforms. The return value is a result indicating whether\n    /// the new value was written and containing the previous value.\n    ///\n    /// `compare_exchange` takes two `Ordering` arguments to describe the memory\n    /// ordering of this operation. The first describes the required ordering if\n    /// the operation succeeds while the second describes the required ordering\n    /// when the operation fails. The failure ordering can't be `Release` or\n    /// `AcqRel` and must be equivalent or weaker than the success ordering.\n    /// success ordering.\n    #[inline]\n    pub fn compare_exchange_weak(\n        &self,\n        current: T,\n        new: T,\n        success: Ordering,\n        failure: Ordering,\n    ) -> Result<T, T> {\n        unsafe {\n            ops::atomic_compare_exchange_weak(self.inner_ptr(), current, new, success, failure)\n        }\n    }\n\n    /// Fetches the value, and applies a function to it that returns an optional\n    /// new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else\n    /// `Err(previous_value)`.\n    ///\n    /// Note: This may call the function multiple times if the value has been changed from other threads in\n    /// the meantime, as long as the function returns `Some(_)`, but the function will have been applied\n    /// only once to the stored value.\n    ///\n    /// `fetch_update` takes two [`Ordering`] arguments to describe the memory ordering of this operation.\n    /// The first describes the required ordering for when the operation finally succeeds while the second\n    /// describes the required ordering for loads. These correspond to the success and failure orderings of\n    /// [`compare_exchange`] respectively.\n    ///\n    /// Using [`Acquire`] as success ordering makes the store part\n    /// of this operation [`Relaxed`], and using [`Release`] makes the final successful load\n    /// [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]\n    /// and must be equivalent to or weaker than the success ordering.\n    ///\n    /// [`compare_exchange`]: #method.compare_exchange\n    /// [`Ordering`]: enum.Ordering.html\n    /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed\n    /// [`Release`]: enum.Ordering.html#variant.Release\n    /// [`Acquire`]: enum.Ordering.html#variant.Acquire\n    /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst\n    ///\n    /// # Examples\n    ///\n    /// ```rust\n    /// use atomic::{Atomic, Ordering};\n    ///\n    /// let x = Atomic::new(7);\n    /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(7));\n    /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(7));\n    /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(8));\n    /// assert_eq!(x.load(Ordering::SeqCst), 9);\n    /// ```\n    #[inline]\n    pub fn fetch_update<F>(\n        &self,\n        set_order: Ordering,\n        fetch_order: Ordering,\n        mut f: F,\n    ) -> Result<T, T>\n    where\n        F: FnMut(T) -> Option<T>,\n    {\n        let mut prev = self.load(fetch_order);\n        while let Some(next) = f(prev) {\n            match self.compare_exchange_weak(prev, next, set_order, fetch_order) {\n                x @ Ok(_) => return x,\n                Err(next_prev) => prev = next_prev,\n            }\n        }\n        Err(prev)\n    }\n}\n\nimpl Atomic<bool> {\n    /// Logical \"and\" with a boolean value.\n    ///\n    /// Performs a logical \"and\" operation on the current value and the argument\n    /// `val`, and sets the new value to the result.\n    ///\n    /// Returns the previous value.\n    #[inline]\n    pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {\n        unsafe { ops::atomic_and(self.inner_ptr(), val, order) }\n    }\n\n    /// Logical \"or\" with a boolean value.\n    ///\n    /// Performs a logical \"or\" operation on the current value and the argument\n    /// `val`, and sets the new value to the result.\n    ///\n    /// Returns the previous value.\n    #[inline]\n    pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {\n        unsafe { ops::atomic_or(self.inner_ptr(), val, order) }\n    }\n\n    /// Logical \"xor\" with a boolean value.\n    ///\n    /// Performs a logical \"xor\" operation on the current value and the argument\n    /// `val`, and sets the new value to the result.\n    ///\n    /// Returns the previous value.\n    #[inline]\n    pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {\n        unsafe { ops::atomic_xor(self.inner_ptr(), val, order) }\n    }\n}\n\nmacro_rules! atomic_ops_common {\n    ($($t:ty)*) => ($(\n        impl Atomic<$t> {\n            /// Add to the current value, returning the previous value.\n            #[inline]\n            pub fn fetch_add(&self, val: $t, order: Ordering) -> $t {\n                unsafe { ops::atomic_add(self.inner_ptr(), val, order) }\n            }\n\n            /// Subtract from the current value, returning the previous value.\n            #[inline]\n            pub fn fetch_sub(&self, val: $t, order: Ordering) -> $t {\n                unsafe { ops::atomic_sub(self.inner_ptr(), val, order) }\n            }\n\n            /// Bitwise and with the current value, returning the previous value.\n            #[inline]\n            pub fn fetch_and(&self, val: $t, order: Ordering) -> $t {\n                unsafe { ops::atomic_and(self.inner_ptr(), val, order) }\n            }\n\n            /// Bitwise or with the current value, returning the previous value.\n            #[inline]\n            pub fn fetch_or(&self, val: $t, order: Ordering) -> $t {\n                unsafe { ops::atomic_or(self.inner_ptr(), val, order) }\n            }\n\n            /// Bitwise xor with the current value, returning the previous value.\n            #[inline]\n            pub fn fetch_xor(&self, val: $t, order: Ordering) -> $t {\n                unsafe { ops::atomic_xor(self.inner_ptr(), val, order) }\n            }\n        }\n    )*);\n}\nmacro_rules! atomic_ops_signed {\n    ($($t:ty)*) => (\n        atomic_ops_common!{ $($t)* }\n        $(\n            impl Atomic<$t> {\n                /// Minimum with the current value.\n                #[inline]\n                pub fn fetch_min(&self, val: $t, order: Ordering) -> $t {\n                    unsafe { ops::atomic_min(self.inner_ptr(), val, order) }\n                }\n\n                /// Maximum with the current value.\n                #[inline]\n                pub fn fetch_max(&self, val: $t, order: Ordering) -> $t {\n                    unsafe { ops::atomic_max(self.inner_ptr(), val, order) }\n                }\n            }\n        )*\n    );\n}\nmacro_rules! atomic_ops_unsigned {\n    ($($t:ty)*) => (\n        atomic_ops_common!{ $($t)* }\n        $(\n            impl Atomic<$t> {\n                /// Minimum with the current value.\n                #[inline]\n                pub fn fetch_min(&self, val: $t, order: Ordering) -> $t {\n                    unsafe { ops::atomic_umin(self.inner_ptr(), val, order) }\n                }\n\n                /// Maximum with the current value.\n                #[inline]\n                pub fn fetch_max(&self, val: $t, order: Ordering) -> $t {\n                    unsafe { ops::atomic_umax(self.inner_ptr(), val, order) }\n                }\n            }\n        )*\n    );\n}\natomic_ops_signed! { i8 i16 i32 i64 isize i128 }\natomic_ops_unsigned! { u8 u16 u32 u64 usize u128 }\n\n#[cfg(feature = \"serde\")]\nmod serde_impl;\n\n#[cfg(test)]\nmod tests {\n    use super::{Atomic, Ordering::*};\n    use bytemuck::NoUninit;\n    use core::mem;\n\n    #[derive(Copy, Clone, Eq, PartialEq, Debug, Default, NoUninit)]\n    #[cfg_attr(feature = \"serde\", derive(serde::Serialize, serde::Deserialize))]\n    #[repr(C)]\n    struct Foo(u8, u8);\n\n    #[derive(Copy, Clone, Eq, PartialEq, Debug, Default, NoUninit)]\n    #[cfg_attr(feature = \"serde\", derive(serde::Serialize, serde::Deserialize))]\n    #[repr(C)]\n    struct Bar(u64, u64);\n\n    #[derive(Copy, Clone, Eq, PartialEq, Debug, Default, NoUninit)]\n    #[cfg_attr(feature = \"serde\", derive(serde::Serialize, serde::Deserialize))]\n    #[repr(C)]\n    struct Quux(u32);\n\n    #[cfg(feature = \"serde\")]\n    fn assert_serde<T>(atomic: &Atomic<T>, value: T)\n    where\n        T: NoUninit\n            + PartialEq\n            + std::fmt::Debug\n            + for<'a> serde::Deserialize<'a>\n            + serde::Serialize,\n    {\n        let s = serde_json::to_string(atomic).unwrap();\n        assert_eq!(s, serde_json::to_string(&value).unwrap());\n\n        let x: Atomic<T> = serde_json::from_str(&s).unwrap();\n        assert_eq!(x.load(SeqCst), value);\n    }\n\n    #[test]\n    fn atomic_bool() {\n        let a = Atomic::new(false);\n        assert_eq!(\n            Atomic::<bool>::is_lock_free(),\n            cfg!(target_has_atomic = \"8\"),\n        );\n        assert_eq!(format!(\"{:?}\", a), \"Atomic(false)\");\n        assert_eq!(a.load(SeqCst), false);\n        a.store(true, SeqCst);\n        assert_eq!(a.swap(false, SeqCst), true);\n        assert_eq!(a.compare_exchange(true, false, SeqCst, SeqCst), Err(false));\n        assert_eq!(a.compare_exchange(false, true, SeqCst, SeqCst), Ok(false));\n        assert_eq!(a.fetch_and(false, SeqCst), true);\n        assert_eq!(a.fetch_or(true, SeqCst), false);\n        assert_eq!(a.fetch_xor(false, SeqCst), true);\n        assert_eq!(a.load(SeqCst), true);\n\n        #[cfg(feature = \"serde\")]\n        assert_serde(&a, true);\n    }\n\n    #[test]\n    fn atomic_i8() {\n        let a = Atomic::new(0i8);\n        assert_eq!(Atomic::<i8>::is_lock_free(), cfg!(target_has_atomic = \"8\"));\n        assert_eq!(format!(\"{:?}\", a), \"Atomic(0)\");\n        assert_eq!(a.load(SeqCst), 0);\n        a.store(1, SeqCst);\n        assert_eq!(a.swap(2, SeqCst), 1);\n        assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));\n        assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));\n        assert_eq!(a.fetch_add(123, SeqCst), 3);\n        // Make sure overflows are handled correctly\n        assert_eq!(a.fetch_sub(-56, SeqCst), 126);\n        assert_eq!(a.fetch_and(7, SeqCst), -74);\n        assert_eq!(a.fetch_or(64, SeqCst), 6);\n        assert_eq!(a.fetch_xor(1, SeqCst), 70);\n        assert_eq!(a.fetch_min(30, SeqCst), 71);\n        assert_eq!(a.fetch_max(-25, SeqCst), 30);\n        assert_eq!(a.load(SeqCst), 30);\n\n        #[cfg(feature = \"serde\")]\n        assert_serde(&a, 30);\n    }\n\n    #[test]\n    fn atomic_i16() {\n        let a = Atomic::new(0i16);\n        assert_eq!(\n            Atomic::<i16>::is_lock_free(),\n            cfg!(target_has_atomic = \"16\")\n        );\n        assert_eq!(format!(\"{:?}\", a), \"Atomic(0)\");\n        assert_eq!(a.load(SeqCst), 0);\n        a.store(1, SeqCst);\n        assert_eq!(a.swap(2, SeqCst), 1);\n        assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));\n        assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));\n        assert_eq!(a.fetch_add(123, SeqCst), 3);\n        assert_eq!(a.fetch_sub(-56, SeqCst), 126);\n        assert_eq!(a.fetch_and(7, SeqCst), 182);\n        assert_eq!(a.fetch_or(64, SeqCst), 6);\n        assert_eq!(a.fetch_xor(1, SeqCst), 70);\n        assert_eq!(a.fetch_min(30, SeqCst), 71);\n        assert_eq!(a.fetch_max(-25, SeqCst), 30);\n        assert_eq!(a.load(SeqCst), 30);\n\n        #[cfg(feature = \"serde\")]\n        assert_serde(&a, 30);\n    }\n\n    #[test]\n    fn atomic_i32() {\n        let a = Atomic::new(0i32);\n        assert_eq!(\n            Atomic::<i32>::is_lock_free(),\n            cfg!(target_has_atomic = \"32\")\n        );\n        assert_eq!(format!(\"{:?}\", a), \"Atomic(0)\");\n        assert_eq!(a.load(SeqCst), 0);\n        a.store(1, SeqCst);\n        assert_eq!(a.swap(2, SeqCst), 1);\n        assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));\n        assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));\n        assert_eq!(a.fetch_add(123, SeqCst), 3);\n        assert_eq!(a.fetch_sub(-56, SeqCst), 126);\n        assert_eq!(a.fetch_and(7, SeqCst), 182);\n        assert_eq!(a.fetch_or(64, SeqCst), 6);\n        assert_eq!(a.fetch_xor(1, SeqCst), 70);\n        assert_eq!(a.fetch_min(30, SeqCst), 71);\n        assert_eq!(a.fetch_max(-25, SeqCst), 30);\n        assert_eq!(a.load(SeqCst), 30);\n\n        #[cfg(feature = \"serde\")]\n        assert_serde(&a, 30);\n    }\n\n    // on 32-bit x86 64 bit atomics exist, but they can't be used to implement\n    // atomic<i64> because AtomicI64 has a greater alignment requirement than\n    // i64.\n    #[cfg(any(\n        feature = \"fallback\",\n        all(target_has_atomic = \"64\", not(target_arch = \"x86\"))\n    ))]\n    #[test]\n    fn atomic_i64() {\n        let a = Atomic::new(0i64);\n        assert_eq!(\n            Atomic::<i64>::is_lock_free(),\n            cfg!(target_has_atomic = \"64\") && mem::align_of::<i64>() == 8\n        );\n        assert_eq!(format!(\"{:?}\", a), \"Atomic(0)\");\n        assert_eq!(a.load(SeqCst), 0);\n        a.store(1, SeqCst);\n        assert_eq!(a.swap(2, SeqCst), 1);\n        assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));\n        assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));\n        assert_eq!(a.fetch_add(123, SeqCst), 3);\n        assert_eq!(a.fetch_sub(-56, SeqCst), 126);\n        assert_eq!(a.fetch_and(7, SeqCst), 182);\n        assert_eq!(a.fetch_or(64, SeqCst), 6);\n        assert_eq!(a.fetch_xor(1, SeqCst), 70);\n        assert_eq!(a.fetch_min(30, SeqCst), 71);\n        assert_eq!(a.fetch_max(-25, SeqCst), 30);\n        assert_eq!(a.load(SeqCst), 30);\n\n        #[cfg(feature = \"serde\")]\n        assert_serde(&a, 30);\n    }\n\n    #[cfg(any(feature = \"fallback\", target_has_atomic = \"128\"))]\n    #[test]\n    fn atomic_i128() {\n        let a = Atomic::new(0i128);\n        assert_eq!(\n            Atomic::<i128>::is_lock_free(),\n            cfg!(feature = \"nightly\") & cfg!(target_has_atomic = \"128\")\n        );\n        assert_eq!(format!(\"{:?}\", a), \"Atomic(0)\");\n        assert_eq!(a.load(SeqCst), 0);\n        a.store(1, SeqCst);\n        assert_eq!(a.swap(2, SeqCst), 1);\n        assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));\n        assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));\n        assert_eq!(a.fetch_add(123, SeqCst), 3);\n        assert_eq!(a.fetch_sub(-56, SeqCst), 126);\n        assert_eq!(a.fetch_and(7, SeqCst), 182);\n        assert_eq!(a.fetch_or(64, SeqCst), 6);\n        assert_eq!(a.fetch_xor(1, SeqCst), 70);\n        assert_eq!(a.fetch_min(30, SeqCst), 71);\n        assert_eq!(a.fetch_max(-25, SeqCst), 30);\n        assert_eq!(a.load(SeqCst), 30);\n\n        #[cfg(feature = \"serde\")]\n        assert_serde(&a, 30);\n    }\n\n    #[test]\n    fn atomic_isize() {\n        let a = Atomic::new(0isize);\n        assert_eq!(format!(\"{:?}\", a), \"Atomic(0)\");\n        assert_eq!(a.load(SeqCst), 0);\n        a.store(1, SeqCst);\n        assert_eq!(a.swap(2, SeqCst), 1);\n        assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));\n        assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));\n        assert_eq!(a.fetch_add(123, SeqCst), 3);\n        assert_eq!(a.fetch_sub(-56, SeqCst), 126);\n        assert_eq!(a.fetch_and(7, SeqCst), 182);\n        assert_eq!(a.fetch_or(64, SeqCst), 6);\n        assert_eq!(a.fetch_xor(1, SeqCst), 70);\n        assert_eq!(a.fetch_min(30, SeqCst), 71);\n        assert_eq!(a.fetch_max(-25, SeqCst), 30);\n        assert_eq!(a.load(SeqCst), 30);\n\n        #[cfg(feature = \"serde\")]\n        assert_serde(&a, 30);\n    }\n\n    #[test]\n    fn atomic_u8() {\n        let a = Atomic::new(0u8);\n        assert_eq!(Atomic::<u8>::is_lock_free(), cfg!(target_has_atomic = \"8\"));\n        assert_eq!(format!(\"{:?}\", a), \"Atomic(0)\");\n        assert_eq!(a.load(SeqCst), 0);\n        a.store(1, SeqCst);\n        assert_eq!(a.swap(2, SeqCst), 1);\n        assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));\n        assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));\n        assert_eq!(a.fetch_add(123, SeqCst), 3);\n        assert_eq!(a.fetch_sub(56, SeqCst), 126);\n        assert_eq!(a.fetch_and(7, SeqCst), 70);\n        assert_eq!(a.fetch_or(64, SeqCst), 6);\n        assert_eq!(a.fetch_xor(1, SeqCst), 70);\n        assert_eq!(a.fetch_min(30, SeqCst), 71);\n        assert_eq!(a.fetch_max(25, SeqCst), 30);\n        assert_eq!(a.load(SeqCst), 30);\n\n        #[cfg(feature = \"serde\")]\n        assert_serde(&a, 30);\n    }\n\n    #[test]\n    fn atomic_u16() {\n        let a = Atomic::new(0u16);\n        assert_eq!(\n            Atomic::<u16>::is_lock_free(),\n            cfg!(target_has_atomic = \"16\")\n        );\n        assert_eq!(format!(\"{:?}\", a), \"Atomic(0)\");\n        assert_eq!(a.load(SeqCst), 0);\n        a.store(1, SeqCst);\n        assert_eq!(a.swap(2, SeqCst), 1);\n        assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));\n        assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));\n        assert_eq!(a.fetch_add(123, SeqCst), 3);\n        assert_eq!(a.fetch_sub(56, SeqCst), 126);\n        assert_eq!(a.fetch_and(7, SeqCst), 70);\n        assert_eq!(a.fetch_or(64, SeqCst), 6);\n        assert_eq!(a.fetch_xor(1, SeqCst), 70);\n        assert_eq!(a.fetch_min(30, SeqCst), 71);\n        assert_eq!(a.fetch_max(25, SeqCst), 30);\n        assert_eq!(a.load(SeqCst), 30);\n\n        #[cfg(feature = \"serde\")]\n        assert_serde(&a, 30);\n    }\n\n    #[test]\n    fn atomic_u32() {\n        let a = Atomic::new(0u32);\n        assert_eq!(\n            Atomic::<u32>::is_lock_free(),\n            cfg!(target_has_atomic = \"32\")\n        );\n        assert_eq!(format!(\"{:?}\", a), \"Atomic(0)\");\n        assert_eq!(a.load(SeqCst), 0);\n        a.store(1, SeqCst);\n        assert_eq!(a.swap(2, SeqCst), 1);\n        assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));\n        assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));\n        assert_eq!(a.fetch_add(123, SeqCst), 3);\n        assert_eq!(a.fetch_sub(56, SeqCst), 126);\n        assert_eq!(a.fetch_and(7, SeqCst), 70);\n        assert_eq!(a.fetch_or(64, SeqCst), 6);\n        assert_eq!(a.fetch_xor(1, SeqCst), 70);\n        assert_eq!(a.fetch_min(30, SeqCst), 71);\n        assert_eq!(a.fetch_max(25, SeqCst), 30);\n        assert_eq!(a.load(SeqCst), 30);\n\n        #[cfg(feature = \"serde\")]\n        assert_serde(&a, 30);\n    }\n\n    // on 32-bit x86 64 bit atomics exist, but they can't be used to implement\n    // atomic<u64> because AtomicU64 has a greater alignment requirement than\n    // u64.\n    #[cfg(any(\n        feature = \"fallback\",\n        all(target_has_atomic = \"64\", not(target_arch = \"x86\"))\n    ))]\n    #[test]\n    fn atomic_u64() {\n        let a = Atomic::new(0u64);\n        assert_eq!(\n            Atomic::<u64>::is_lock_free(),\n            cfg!(target_has_atomic = \"64\") && mem::align_of::<u64>() == 8\n        );\n        assert_eq!(format!(\"{:?}\", a), \"Atomic(0)\");\n        assert_eq!(a.load(SeqCst), 0);\n        a.store(1, SeqCst);\n        assert_eq!(a.swap(2, SeqCst), 1);\n        assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));\n        assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));\n        assert_eq!(a.fetch_add(123, SeqCst), 3);\n        assert_eq!(a.fetch_sub(56, SeqCst), 126);\n        assert_eq!(a.fetch_and(7, SeqCst), 70);\n        assert_eq!(a.fetch_or(64, SeqCst), 6);\n        assert_eq!(a.fetch_xor(1, SeqCst), 70);\n        assert_eq!(a.fetch_min(30, SeqCst), 71);\n        assert_eq!(a.fetch_max(25, SeqCst), 30);\n        assert_eq!(a.load(SeqCst), 30);\n\n        #[cfg(feature = \"serde\")]\n        assert_serde(&a, 30);\n    }\n\n    #[cfg(any(feature = \"fallback\", target_has_atomic = \"128\"))]\n    #[test]\n    fn atomic_u128() {\n        let a = Atomic::new(0u128);\n        assert_eq!(\n            Atomic::<u128>::is_lock_free(),\n            cfg!(feature = \"nightly\") & cfg!(target_has_atomic = \"128\")\n        );\n        assert_eq!(format!(\"{:?}\", a), \"Atomic(0)\");\n        assert_eq!(a.load(SeqCst), 0);\n        a.store(1, SeqCst);\n        assert_eq!(a.swap(2, SeqCst), 1);\n        assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));\n        assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));\n        assert_eq!(a.fetch_add(123, SeqCst), 3);\n        assert_eq!(a.fetch_sub(56, SeqCst), 126);\n        assert_eq!(a.fetch_and(7, SeqCst), 70);\n        assert_eq!(a.fetch_or(64, SeqCst), 6);\n        assert_eq!(a.fetch_xor(1, SeqCst), 70);\n        assert_eq!(a.fetch_min(30, SeqCst), 71);\n        assert_eq!(a.fetch_max(25, SeqCst), 30);\n        assert_eq!(a.load(SeqCst), 30);\n\n        #[cfg(feature = \"serde\")]\n        assert_serde(&a, 30);\n    }\n\n    #[test]\n    fn atomic_usize() {\n        let a = Atomic::new(0usize);\n        assert_eq!(format!(\"{:?}\", a), \"Atomic(0)\");\n        assert_eq!(a.load(SeqCst), 0);\n        a.store(1, SeqCst);\n        assert_eq!(a.swap(2, SeqCst), 1);\n        assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));\n        assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));\n        assert_eq!(a.fetch_add(123, SeqCst), 3);\n        assert_eq!(a.fetch_sub(56, SeqCst), 126);\n        assert_eq!(a.fetch_and(7, SeqCst), 70);\n        assert_eq!(a.fetch_or(64, SeqCst), 6);\n        assert_eq!(a.fetch_xor(1, SeqCst), 70);\n        assert_eq!(a.fetch_min(30, SeqCst), 71);\n        assert_eq!(a.fetch_max(25, SeqCst), 30);\n        assert_eq!(a.load(SeqCst), 30);\n\n        #[cfg(feature = \"serde\")]\n        assert_serde(&a, 30);\n    }\n\n    #[cfg(feature = \"fallback\")]\n    #[test]\n    fn atomic_foo() {\n        let a = Atomic::default();\n        assert_eq!(Atomic::<Foo>::is_lock_free(), false);\n        assert_eq!(format!(\"{:?}\", a), \"Atomic(Foo(0, 0))\");\n        assert_eq!(a.load(SeqCst), Foo(0, 0));\n        a.store(Foo(1, 1), SeqCst);\n        assert_eq!(a.swap(Foo(2, 2), SeqCst), Foo(1, 1));\n        assert_eq!(\n            a.compare_exchange(Foo(5, 5), Foo(45, 45), SeqCst, SeqCst),\n            Err(Foo(2, 2))\n        );\n        assert_eq!(\n            a.compare_exchange(Foo(2, 2), Foo(3, 3), SeqCst, SeqCst),\n            Ok(Foo(2, 2))\n        );\n        assert_eq!(a.load(SeqCst), Foo(3, 3));\n\n        #[cfg(feature = \"serde\")]\n        assert_serde(&a, Foo(3, 3));\n    }\n\n    #[cfg(feature = \"fallback\")]\n    #[test]\n    fn atomic_bar() {\n        let a = Atomic::default();\n        assert_eq!(Atomic::<Bar>::is_lock_free(), false);\n        assert_eq!(format!(\"{:?}\", a), \"Atomic(Bar(0, 0))\");\n        assert_eq!(a.load(SeqCst), Bar(0, 0));\n        a.store(Bar(1, 1), SeqCst);\n        assert_eq!(a.swap(Bar(2, 2), SeqCst), Bar(1, 1));\n        assert_eq!(\n            a.compare_exchange(Bar(5, 5), Bar(45, 45), SeqCst, SeqCst),\n            Err(Bar(2, 2))\n        );\n        assert_eq!(\n            a.compare_exchange(Bar(2, 2), Bar(3, 3), SeqCst, SeqCst),\n            Ok(Bar(2, 2))\n        );\n        assert_eq!(a.load(SeqCst), Bar(3, 3));\n\n        #[cfg(feature = \"serde\")]\n        assert_serde(&a, Bar(3, 3));\n    }\n\n    #[test]\n    fn atomic_quxx() {\n        let a = Atomic::default();\n        assert_eq!(\n            Atomic::<Quux>::is_lock_free(),\n            cfg!(target_has_atomic = \"32\")\n        );\n        assert_eq!(format!(\"{:?}\", a), \"Atomic(Quux(0))\");\n        assert_eq!(a.load(SeqCst), Quux(0));\n        a.store(Quux(1), SeqCst);\n        assert_eq!(a.swap(Quux(2), SeqCst), Quux(1));\n        assert_eq!(\n            a.compare_exchange(Quux(5), Quux(45), SeqCst, SeqCst),\n            Err(Quux(2))\n        );\n        assert_eq!(\n            a.compare_exchange(Quux(2), Quux(3), SeqCst, SeqCst),\n            Ok(Quux(2))\n        );\n        assert_eq!(a.load(SeqCst), Quux(3));\n\n        #[cfg(feature = \"serde\")]\n        assert_serde(&a, Quux(3));\n    }\n}\n"
  },
  {
    "path": "src/ops.rs",
    "content": "// Copyright 2016 Amanieu d'Antras\n//\n// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or\n// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or\n// http://opensource.org/licenses/MIT>, at your option. This file may not be\n// copied, modified, or distributed except according to those terms.\n\nuse bytemuck::NoUninit;\n\n#[cfg(feature = \"fallback\")]\nuse crate::fallback;\nuse core::cmp;\nuse core::mem;\nuse core::num::Wrapping;\nuse core::ops;\nuse core::sync::atomic::Ordering;\n\nmacro_rules! match_atomic {\n    ($type:ident, $atomic:ident, $impl:expr, $fallback_impl:expr) => {\n        match mem::size_of::<$type>() {\n            #[cfg(target_has_atomic = \"8\")]\n            1 if mem::align_of::<$type>() >= 1 => {\n                type $atomic = core::sync::atomic::AtomicU8;\n\n                $impl\n            }\n            #[cfg(target_has_atomic = \"16\")]\n            2 if mem::align_of::<$type>() >= 2 => {\n                type $atomic = core::sync::atomic::AtomicU16;\n\n                $impl\n            }\n            #[cfg(target_has_atomic = \"32\")]\n            4 if mem::align_of::<$type>() >= 4 => {\n                type $atomic = core::sync::atomic::AtomicU32;\n\n                $impl\n            }\n            #[cfg(target_has_atomic = \"64\")]\n            8 if mem::align_of::<$type>() >= 8 => {\n                type $atomic = core::sync::atomic::AtomicU64;\n\n                $impl\n            }\n            #[cfg(all(feature = \"nightly\", target_has_atomic = \"128\"))]\n            16 if mem::align_of::<$type>() >= 16 => {\n                type $atomic = core::sync::atomic::AtomicU128;\n\n                $impl\n            }\n            #[cfg(feature = \"fallback\")]\n            _ => $fallback_impl,\n            #[cfg(not(feature = \"fallback\"))]\n            _ => panic!(\"Atomic operations for type `{}` are not available as the `fallback` feature of the `atomic` crate is disabled.\", core::any::type_name::<$type>()),\n        }\n    };\n}\n\nmacro_rules! match_signed_atomic {\n    ($type:ident, $atomic:ident, $impl:expr, $fallback_impl:expr) => {\n        match mem::size_of::<$type>() {\n            #[cfg(target_has_atomic = \"8\")]\n            1 if mem::align_of::<$type>() >= 1 => {\n                type $atomic = core::sync::atomic::AtomicI8;\n\n                $impl\n            }\n            #[cfg(target_has_atomic = \"16\")]\n            2 if mem::align_of::<$type>() >= 2 => {\n                type $atomic = core::sync::atomic::AtomicI16;\n\n                $impl\n            }\n            #[cfg(target_has_atomic = \"32\")]\n            4 if mem::align_of::<$type>() >= 4 => {\n                type $atomic = core::sync::atomic::AtomicI32;\n\n                $impl\n            }\n            #[cfg(target_has_atomic = \"64\")]\n            8 if mem::align_of::<$type>() >= 8 => {\n                type $atomic = core::sync::atomic::AtomicI64;\n\n                $impl\n            }\n            #[cfg(all(feature = \"nightly\", target_has_atomic = \"128\"))]\n            16 if mem::align_of::<$type>() >= 16 => {\n                type $atomic = core::sync::atomic::AtomicI128;\n\n                $impl\n            }\n            #[cfg(feature = \"fallback\")]\n            _ => $fallback_impl,\n            #[cfg(not(feature = \"fallback\"))]\n            _ => panic!(\"Atomic operations for type `{}` are not available as the `fallback` feature of the `atomic` crate is disabled.\", core::any::type_name::<$type>()),\n        }\n    };\n}\n\n#[inline]\npub const fn atomic_is_lock_free<T>() -> bool {\n    let size = mem::size_of::<T>();\n    let align = mem::align_of::<T>();\n\n    (cfg!(target_has_atomic = \"8\") & (size == 1) & (align >= 1))\n        | (cfg!(target_has_atomic = \"16\") & (size == 2) & (align >= 2))\n        | (cfg!(target_has_atomic = \"32\") & (size == 4) & (align >= 4))\n        | (cfg!(target_has_atomic = \"64\") & (size == 8) & (align >= 8))\n        | (cfg!(feature = \"nightly\")\n            & cfg!(target_has_atomic = \"128\")\n            & (size == 16)\n            & (align >= 16))\n}\n\n#[inline]\npub unsafe fn atomic_load<T: NoUninit>(dst: *mut T, order: Ordering) -> T {\n    match_atomic!(\n        T,\n        A,\n        mem::transmute_copy(&(*(dst as *const A)).load(order)),\n        fallback::atomic_load(dst)\n    )\n}\n\n#[inline]\npub unsafe fn atomic_store<T: NoUninit>(dst: *mut T, val: T, order: Ordering) {\n    match_atomic!(\n        T,\n        A,\n        (*(dst as *const A)).store(mem::transmute_copy(&val), order),\n        fallback::atomic_store(dst, val)\n    )\n}\n\n#[inline]\npub unsafe fn atomic_swap<T: NoUninit>(dst: *mut T, val: T, order: Ordering) -> T {\n    match_atomic!(\n        T,\n        A,\n        mem::transmute_copy(&(*(dst as *const A)).swap(mem::transmute_copy(&val), order)),\n        fallback::atomic_swap(dst, val)\n    )\n}\n\n#[inline]\nunsafe fn map_result<T, U>(r: Result<T, T>) -> Result<U, U> {\n    match r {\n        Ok(x) => Ok(mem::transmute_copy(&x)),\n        Err(x) => Err(mem::transmute_copy(&x)),\n    }\n}\n\n#[inline]\npub unsafe fn atomic_compare_exchange<T: NoUninit>(\n    dst: *mut T,\n    current: T,\n    new: T,\n    success: Ordering,\n    failure: Ordering,\n) -> Result<T, T> {\n    match_atomic!(\n        T,\n        A,\n        map_result((*(dst as *const A)).compare_exchange(\n            mem::transmute_copy(&current),\n            mem::transmute_copy(&new),\n            success,\n            failure,\n        )),\n        fallback::atomic_compare_exchange(dst, current, new)\n    )\n}\n\n#[inline]\npub unsafe fn atomic_compare_exchange_weak<T: NoUninit>(\n    dst: *mut T,\n    current: T,\n    new: T,\n    success: Ordering,\n    failure: Ordering,\n) -> Result<T, T> {\n    match_atomic!(\n        T,\n        A,\n        map_result((*(dst as *const A)).compare_exchange_weak(\n            mem::transmute_copy(&current),\n            mem::transmute_copy(&new),\n            success,\n            failure,\n        )),\n        fallback::atomic_compare_exchange(dst, current, new)\n    )\n}\n\n#[inline]\npub unsafe fn atomic_add<T: NoUninit>(dst: *mut T, val: T, order: Ordering) -> T\nwhere\n    Wrapping<T>: ops::Add<Output = Wrapping<T>>,\n{\n    match_atomic!(\n        T,\n        A,\n        mem::transmute_copy(&(*(dst as *const A)).fetch_add(mem::transmute_copy(&val), order),),\n        fallback::atomic_add(dst, val)\n    )\n}\n\n#[inline]\npub unsafe fn atomic_sub<T: NoUninit>(dst: *mut T, val: T, order: Ordering) -> T\nwhere\n    Wrapping<T>: ops::Sub<Output = Wrapping<T>>,\n{\n    match_atomic!(\n        T,\n        A,\n        mem::transmute_copy(&(*(dst as *const A)).fetch_sub(mem::transmute_copy(&val), order),),\n        fallback::atomic_sub(dst, val)\n    )\n}\n\n#[inline]\npub unsafe fn atomic_and<T: NoUninit + ops::BitAnd<Output = T>>(\n    dst: *mut T,\n    val: T,\n    order: Ordering,\n) -> T {\n    match_atomic!(\n        T,\n        A,\n        mem::transmute_copy(&(*(dst as *const A)).fetch_and(mem::transmute_copy(&val), order),),\n        fallback::atomic_and(dst, val)\n    )\n}\n\n#[inline]\npub unsafe fn atomic_or<T: NoUninit + ops::BitOr<Output = T>>(\n    dst: *mut T,\n    val: T,\n    order: Ordering,\n) -> T {\n    match_atomic!(\n        T,\n        A,\n        mem::transmute_copy(&(*(dst as *const A)).fetch_or(mem::transmute_copy(&val), order),),\n        fallback::atomic_or(dst, val)\n    )\n}\n\n#[inline]\npub unsafe fn atomic_xor<T: NoUninit + ops::BitXor<Output = T>>(\n    dst: *mut T,\n    val: T,\n    order: Ordering,\n) -> T {\n    match_atomic!(\n        T,\n        A,\n        mem::transmute_copy(&(*(dst as *const A)).fetch_xor(mem::transmute_copy(&val), order),),\n        fallback::atomic_xor(dst, val)\n    )\n}\n\n#[inline]\npub unsafe fn atomic_min<T: NoUninit + cmp::Ord>(dst: *mut T, val: T, order: Ordering) -> T {\n    match_signed_atomic!(\n        T,\n        A,\n        mem::transmute_copy(&(*(dst as *const A)).fetch_min(mem::transmute_copy(&val), order),),\n        fallback::atomic_min(dst, val)\n    )\n}\n\n#[inline]\npub unsafe fn atomic_max<T: NoUninit + cmp::Ord>(dst: *mut T, val: T, order: Ordering) -> T {\n    match_signed_atomic!(\n        T,\n        A,\n        mem::transmute_copy(&(*(dst as *const A)).fetch_max(mem::transmute_copy(&val), order),),\n        fallback::atomic_max(dst, val)\n    )\n}\n\n#[inline]\npub unsafe fn atomic_umin<T: NoUninit + cmp::Ord>(dst: *mut T, val: T, order: Ordering) -> T {\n    match_atomic!(\n        T,\n        A,\n        mem::transmute_copy(&(*(dst as *const A)).fetch_min(mem::transmute_copy(&val), order),),\n        fallback::atomic_min(dst, val)\n    )\n}\n\n#[inline]\npub unsafe fn atomic_umax<T: NoUninit + cmp::Ord>(dst: *mut T, val: T, order: Ordering) -> T {\n    match_atomic!(\n        T,\n        A,\n        mem::transmute_copy(&(*(dst as *const A)).fetch_max(mem::transmute_copy(&val), order),),\n        fallback::atomic_max(dst, val)\n    )\n}\n"
  },
  {
    "path": "src/serde_impl.rs",
    "content": "use core::sync::atomic::Ordering;\n\nuse bytemuck::NoUninit;\nuse serde::{Deserialize, Deserializer, Serialize, Serializer};\n\nuse crate::Atomic;\n\nimpl<T> Serialize for Atomic<T>\nwhere\n    T: NoUninit + Serialize,\n{\n    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>\n    where\n        S: Serializer,\n    {\n        // Matches the atomic ordering used in `Debug` for `Atomic<T>`.\n        self.load(Ordering::Relaxed).serialize(serializer)\n    }\n}\n\nimpl<'de, T> Deserialize<'de> for Atomic<T>\nwhere\n    T: for<'a> Deserialize<'a>,\n{\n    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>\n    where\n        D: Deserializer<'de>,\n    {\n        Deserialize::deserialize(deserializer).map(Self::new)\n    }\n}\n"
  }
]