Repository: frostyplanet/crossfire-rs Branch: master Commit: b64cafe2a210 Files: 94 Total size: 741.0 KB Directory structure: gitextract_uux3elsf/ ├── .github/ │ └── workflows/ │ ├── cron_2.0_arm.yml │ ├── cron_2.0_x86.yml │ ├── cron_dev.yml │ ├── cron_dev_arm.yml │ ├── cron_dev_arm_trace.yml │ ├── cron_master_async_std_arm.yml │ ├── cron_master_async_std_x86.yml │ ├── cron_master_compio_arm.yml │ ├── cron_master_compio_x86.yml │ ├── cron_master_smol_arm.yml │ ├── cron_master_smol_x86.yml │ ├── cron_master_threaded_arm.yml │ ├── cron_master_threaded_x86.yml │ ├── cron_master_tokio_arm.yml │ ├── cron_master_tokio_x86.yml │ ├── fast.yml │ ├── leak.yml │ ├── miri_dev.yml │ ├── miri_dev_log.yml │ ├── miri_tokio.yml │ ├── miri_tokio_cur.yml │ ├── miri_tokio_cur_log.yml │ ├── miri_tokio_log.yml │ ├── pr.yml │ └── typos.yml ├── .gitignore ├── AGENTS.md ├── CHANGELOG.md ├── CONTRIBUTION ├── Cargo.toml ├── LICENSE ├── Makefile ├── README.md ├── benches/ │ └── inner.rs ├── git-hooks/ │ └── pre-commit ├── rustfmt.toml ├── src/ │ ├── async_rx.rs │ ├── async_tx.rs │ ├── backoff.rs │ ├── blocking_rx.rs │ ├── blocking_tx.rs │ ├── collections.rs │ ├── compat.rs │ ├── crossbeam/ │ │ ├── array_queue.rs │ │ ├── array_queue_mpsc.rs │ │ ├── array_queue_spsc.rs │ │ ├── err.rs │ │ ├── mod.rs │ │ └── seg_queue.rs │ ├── flavor/ │ │ ├── array.rs │ │ ├── array_mpsc.rs │ │ ├── array_spsc.rs │ │ ├── list.rs │ │ ├── mod.rs │ │ ├── one.rs │ │ ├── one_mpsc.rs │ │ └── one_spmc.rs │ ├── lib.rs │ ├── mpmc.rs │ ├── mpsc.rs │ ├── null.rs │ ├── oneshot.rs │ ├── select/ │ │ ├── mod.rs │ │ ├── multiplex.rs │ │ └── select.rs │ ├── shared.rs │ ├── sink.rs │ ├── spsc.rs │ ├── stream.rs │ ├── waitgroup.rs │ ├── waker.rs │ ├── waker_registry.rs │ └── weak.rs └── test-suite/ ├── Cargo.toml ├── benches/ │ ├── async_channel.rs │ ├── common.rs │ ├── crossbeam.rs │ ├── crossfire.rs │ ├── crossfire_select.rs │ ├── extra.rs │ ├── flume.rs │ ├── kanal.rs │ └── tokio.rs ├── scripts/ │ └── miri.sh └── src/ ├── lib.rs ├── test_async.rs ├── test_async_blocking.rs ├── test_blocking_async.rs ├── test_blocking_context.rs ├── test_oneshot.rs ├── test_select_async.rs ├── test_select_blocking.rs ├── test_type_switch.rs └── test_waitgroup.rs ================================================ FILE CONTENTS ================================================ ================================================ FILE: .github/workflows/cron_2.0_arm.yml ================================================ name: cron-2.0-arm on: schedule: [cron: "30 */5 * * *"] workflow_dispatch: env: CARGO_TERM_COLOR: always jobs: build_and_test: runs-on: "macos-15" steps: - uses: actions/checkout@v2 with: ref: v2.0 - name: Run tests with tokio multi-thread run: env WORKFLOW=1 make test - name: Run tests with --release run: env WORKFLOW=1 make test_release - name: Run tests with --release tokio single-thread run: env WORKFLOW=1 SINGLE_THREAD_RUNTIME=1 make test_release ================================================ FILE: .github/workflows/cron_2.0_x86.yml ================================================ name: cron-2.0-x86 on: schedule: [cron: "30 */5 * * *"] workflow_dispatch: env: CARGO_TERM_COLOR: always jobs: build_and_test: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 with: ref: v2.0 - name: Run tests with tokio multi-thread run: env WORKFLOW=1 make test - name: Run tests with --release run: env WORKFLOW=1 make test_release - name: Run tests with --release tokio single-thread run: env WORKFLOW=1 SINGLE_THREAD_RUNTIME=1 make test_release ================================================ FILE: .github/workflows/cron_dev.yml ================================================ name: cron-dev on: schedule: [cron: "0 */6 * * *"] workflow_dispatch: env: CARGO_TERM_COLOR: always jobs: build_and_test: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 with: ref: dev - name: Build run: cargo build --verbose - name: Run tests with tokio multi-thread run: env WORKFLOW=1 make test - name: Run tests with tokio single-thread run: env WORKFLOW=1 SINGLE_THREAD_RUNTIME=1 make test - name: Run tests with --release run: env WORKFLOW=1 make test_release - name: Run tests with async_std run: env WORKFLOW=1 make test_async_std ================================================ FILE: .github/workflows/cron_dev_arm.yml ================================================ name: cron-dev-arm on: schedule: [cron: "0 */5 * * *"] workflow_dispatch: env: CARGO_TERM_COLOR: always jobs: build_and_test: runs-on: "macos-15" steps: - uses: actions/checkout@v2 with: ref: dev - name: Build run: cargo build --verbose # We use cargo-nextest because cargo test does not forward cancellation signal - uses: taiki-e/install-action@nextest - name: Run tests with tokio multi-thread run: cd test-suite; RUSTFLAGS="--cfg tokio_unstable" WORKFLOW=1 exec cargo nextest run -F="tokio" --hide-progress-bar -j 1 --no-capture - name: Run tests with --release run: cd test-suite; RUSTFLAGS="--cfg tokio_unstable" WORKFLOW=1 exec cargo nextest run -F="tokio" --hide-progress-bar -j 1 --no-capture -r - name: Run tests with --release tokio single-thread run: cd test-suite; RUSTFLAGS="--cfg tokio_unstable" WORKFLOW=1 SINGLE_THREAD_RUNTIME=1 exec cargo nextest run -F="tokio" --hide-progress-bar -j 1 --no-capture -r - name: Dump log on cancel if: ${{ cancelled() }} uses: actions/upload-artifact@v4 with: name: crossfire_ring path: /tmp/crossfire_ring.log ================================================ FILE: .github/workflows/cron_dev_arm_trace.yml ================================================ name: cron-dev-arm-trace on: schedule: [cron: "0 */5 * * *"] workflow_dispatch: env: CARGO_TERM_COLOR: always jobs: build_and_test: runs-on: "macos-15" steps: - uses: actions/checkout@v2 with: ref: dev - name: Rust version run: rustc -V - name: rustup run: rustup show - name: Build run: cargo build --verbose # We use cargo-nextest because cargo test does not forward cancellation signal - uses: taiki-e/install-action@nextest - name: Run tests with tokio multi-thread run: cd test-suite; RUSTFLAGS="--cfg tokio_unstable" WORKFLOW=1 exec cargo nextest run -F="tokio,trace_log" --hide-progress-bar -j 1 --no-capture - name: Run tests with tokio multi thread --release run: cd test-suite; RUSTFLAGS="--cfg tokio_unstable" WORKFLOW=1 exec cargo nextest run -F="tokio,trace_log" --hide-progress-bar -j 1 --no-capture -r - name: Run tests with tokio single-thread --release run: cd test-suite; RUSTFLAGS="--cfg tokio_unstable" WORKFLOW=1 SINGLE_THREAD_RUNTIME=1 exec cargo nextest run -F="tokio,trace_log" --hide-progress-bar -j 1 --no-capture -r - name: Dump log on cancel if: ${{ cancelled() || failure() }} uses: actions/upload-artifact@v4 with: name: crossfire_ring path: /tmp/crossfire_ring.log ================================================ FILE: .github/workflows/cron_master_async_std_arm.yml ================================================ name: cron-master-async_std-arm on: schedule: [cron: "0 */5 * * *"] workflow_dispatch: env: CARGO_TERM_COLOR: always jobs: build_and_test: runs-on: "macos-15" steps: - uses: actions/checkout@v2 # We use cargo-nextest because cargo test does not forward cancellation signal - uses: taiki-e/install-action@nextest - name: Run tests with async_std run: env WORKFLOW=1 make test_async_std - name: Run test with async_std release run: env WORKFLOW=1 make test_async_std_release - name: Run test with async_std release and trace_log run: cd test-suite; WORKFLOW=1 exec cargo nextest run -F="async_std,trace_log" --hide-progress-bar -j 1 --no-capture -r ================================================ FILE: .github/workflows/cron_master_async_std_x86.yml ================================================ name: cron-master-async_std-x86 on: schedule: [cron: "0 */5 * * *"] workflow_dispatch: env: CARGO_TERM_COLOR: always jobs: build_and_test: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 # We use cargo-nextest because cargo test does not forward cancellation signal - uses: taiki-e/install-action@nextest - name: Run tests with async_std run: env WORKFLOW=1 make test_async_std - name: Run test with async_std release run: env WORKFLOW=1 make test_async_std_release - name: Run test with async_std release and trace_log run: cd test-suite; WORKFLOW=1 exec cargo nextest run -F="async_std,trace_log" --hide-progress-bar -j 1 --no-capture -r ================================================ FILE: .github/workflows/cron_master_compio_arm.yml ================================================ name: cron-master-compio-arm on: schedule: [cron: "0 */5 * * *"] workflow_dispatch: env: CARGO_TERM_COLOR: always jobs: build_and_test: runs-on: "macos-15" steps: - uses: actions/checkout@v2 # We use cargo-nextest because cargo test does not forward cancellation signal - uses: taiki-e/install-action@nextest - name: Run tests with compio run: env WORKFLOW=1 make test_compio - name: Run test with compio release run: env WORKFLOW=1 make test_compio_release - name: Run test with compio release and trace_log run: cd test-suite; WORKFLOW=1 exec cargo nextest run -F="compio,trace_log" --hide-progress-bar -j 1 --no-capture -r - name: Run tests with compio_dispatcher run: env WORKFLOW=1 make test_compio_dispatcher - name: Run test with compio_dispatcher release run: env WORKFLOW=1 make test_compio_dispatcher_release - name: Run test with compio_dispatcher release and trace_log run: cd test-suite; WORKFLOW=1 exec cargo nextest run -F="compio_dispatcher,trace_log" --hide-progress-bar -j 1 --no-capture -r - name: Dump log on cancel if: ${{ cancelled() || failure() }} uses: actions/upload-artifact@v4 with: name: crossfire_ring path: /tmp/crossfire_ring.log ================================================ FILE: .github/workflows/cron_master_compio_x86.yml ================================================ name: cron-master-compio-x86 on: schedule: [cron: "0 */5 * * *"] workflow_dispatch: env: CARGO_TERM_COLOR: always jobs: build_and_test: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 # We use cargo-nextest because cargo test does not forward cancellation signal - uses: taiki-e/install-action@nextest - name: Run tests with compio run: env WORKFLOW=1 make test_compio - name: Run test with compio release run: env WORKFLOW=1 make test_compio_release - name: Run test with compio release and trace_log run: cd test-suite; WORKFLOW=1 exec cargo nextest run -F="compio,trace_log" --hide-progress-bar -j 1 --no-capture -r - name: Run tests with compio_dispatcher run: env WORKFLOW=1 make test_compio_dispatcher - name: Run test with compio_dispatcher release run: env WORKFLOW=1 make test_compio_dispatcher_release - name: Run test with compio_dispatcher release and trace_log run: cd test-suite; WORKFLOW=1 exec cargo nextest run -F="compio_dispatcher,trace_log" --hide-progress-bar -j 1 --no-capture -r - name: Dump log on cancel if: ${{ cancelled() || failure() }} uses: actions/upload-artifact@v4 with: name: crossfire_ring path: /tmp/crossfire_ring.log ================================================ FILE: .github/workflows/cron_master_smol_arm.yml ================================================ name: cron-master-smol-arm on: schedule: [cron: "0 */5 * * *"] workflow_dispatch: env: CARGO_TERM_COLOR: always jobs: build_and_test: runs-on: "macos-15" steps: - uses: actions/checkout@v2 # We use cargo-nextest because cargo test does not forward cancellation signal - uses: taiki-e/install-action@nextest - name: Run tests with smol run: env WORKFLOW=1 make test_smol - name: Run test with smol release run: env WORKFLOW=1 make test_smol_release - name: Run test with smol release and trace_log run: cd test-suite; WORKFLOW=1 exec cargo nextest run -F="smol,trace_log" --hide-progress-bar -j 1 --no-capture -r ================================================ FILE: .github/workflows/cron_master_smol_x86.yml ================================================ name: cron-master-smol-x86 on: schedule: [cron: "0 */5 * * *"] workflow_dispatch: env: CARGO_TERM_COLOR: always jobs: build_and_test: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 # We use cargo-nextest because cargo test does not forward cancellation signal - uses: taiki-e/install-action@nextest - name: Run tests with smol run: env WORKFLOW=1 make test_smol - name: Run test with smol release run: env WORKFLOW=1 make test_smol_release - name: Run test with smol release and trace_log run: cd test-suite; WORKFLOW=1 exec cargo nextest run -F="smol,trace_log" --hide-progress-bar -j 1 --no-capture -r ================================================ FILE: .github/workflows/cron_master_threaded_arm.yml ================================================ name: cron-master-threaded-arm on: schedule: [cron: "20 */5 * * *"] workflow_dispatch: env: CARGO_TERM_COLOR: always jobs: build_and_test: runs-on: "macos-15" steps: - uses: actions/checkout@v2 - name: Build run: cargo build --verbose - name: Run tests run: env WORKFLOW=1 make test test_blocking_context - name: Run tests with --release run: env WORKFLOW=1 make test_release test_blocking_context ================================================ FILE: .github/workflows/cron_master_threaded_x86.yml ================================================ name: cron-master-threaded-x86 on: schedule: [cron: "20 */5 * * *"] workflow_dispatch: env: CARGO_TERM_COLOR: always jobs: build_and_test: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - name: Build run: cargo build --verbose - name: Run tests run: env WORKFLOW=1 make test test_blocking_context - name: Run tests with --release run: env WORKFLOW=1 make test_release test_blocking_context ================================================ FILE: .github/workflows/cron_master_tokio_arm.yml ================================================ name: cron-master-tokio-arm on: schedule: [cron: "0 */5 * * *"] workflow_dispatch: env: CARGO_TERM_COLOR: always jobs: build_and_test: runs-on: "macos-15" steps: - uses: actions/checkout@v2 - name: Build run: cargo build --verbose # We use cargo-nextest because cargo test does not forward cancellation signal - uses: taiki-e/install-action@nextest - name: Run tests with tokio multi-thread run: cd test-suite; WORKFLOW=1 exec cargo nextest run -F="tokio,time" --hide-progress-bar -j 1 --no-capture - name: Run tests with tokio multi thread --release with trace log run: cd test-suite; WORKFLOW=1 exec cargo nextest run -F="tokio,trace_log" --hide-progress-bar -j 1 --no-capture -r - name: Run tests with tokio single-thread --release with trace_log run: cd test-suite; WORKFLOW=1 SINGLE_THREAD_RUNTIME=1 exec cargo nextest run -F="tokio,trace_log" --hide-progress-bar -j 1 --no-capture -r - name: Run tests with tokio multi thread --release run: cd test-suite; WORKFLOW=1 exec cargo nextest run -F="tokio,time" --hide-progress-bar -j 1 --no-capture -r - name: Run tests with tokio single-thread --release run: cd test-suite; WORKFLOW=1 SINGLE_THREAD_RUNTIME=1 exec cargo nextest run -F="tokio,time" --hide-progress-bar -j 1 --no-capture -r - name: Dump log on cancel if: ${{ cancelled() }} uses: actions/upload-artifact@v4 with: name: crossfire_ring path: /tmp/crossfire_ring.log ================================================ FILE: .github/workflows/cron_master_tokio_x86.yml ================================================ name: cron-master-tokio-x86 on: schedule: [cron: "0 */5 * * *"] workflow_dispatch: env: CARGO_TERM_COLOR: always jobs: build_and_test: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - name: Build run: cargo build --verbose - name: Run tests with tokio multi-thread run: env WORKFLOW=1 make test - name: Run tests with tokio current-thread run: env WORKFLOW=1 SINGLE_THREAD_RUNTIME=1 make test - name: Run tests with tokio multi-thread --release run: env WORKFLOW=1 make test_release - name: Run tests with tokio current-thread --release run: env WORKFLOW=1 SINGLE_THREAD_RUNTIME=1 make test_release ================================================ FILE: .github/workflows/fast.yml ================================================ name: fast-validation on: push: branches: [ "master" ] env: CARGO_TERM_COLOR: always jobs: build_and_test: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - name: Build with default run: cargo build --verbose - name: Clippy with default run: cargo clippy -- -D warnings - name: Build with tokio run: cargo build -F tokio --verbose - name: Build with async_std run: cargo build -F async_std --verbose - name: doc test & internal test run: cargo test - name: doc build run: cargo doc --all-features - name: Run basic tests with tokio run: make test basic - name: Run timeout tests with async_std run: make test_async_std timeout ================================================ FILE: .github/workflows/leak.yml ================================================ name: leak on: schedule: [cron: "30 */10 * * *"] workflow_dispatch: env: CARGO_TERM_COLOR: always jobs: build_and_test: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - name: Build run: cargo build --verbose - name: Run test-suite with LSAN run: env NIGHTLY="+nightly" RUSTFLAGS="-Zsanitizer=leak" WORKFLOW=1 make test - name: Run internal test run: make test_internal ================================================ FILE: .github/workflows/miri_dev.yml ================================================ name: miri-dev on: workflow_dispatch: schedule: [cron: "20 */7 * * *"] env: CARGO_TERM_COLOR: always jobs: test_tokio: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 with: ref: dev - name: install specified nightly toolchain run: rustup toolchain install nightly-2025-12-01 - name: install miri component to specified nightly run: rustup component add miri --toolchain nightly-2025-12-01 - name: Run miri tests without log (tokio multi thread) run: cd test-suite; NIGHTLY_VERSION=nightly-2025-12-01 scripts/miri.sh -F tokio,time - name: Run miri tests without log (tokio current thread) run: cd test-suite; SINGLE_THREAD_RUNTIME=1 NIGHTLY_VERSION=nightly-2025-12-01 scripts/miri.sh -F tokio ================================================ FILE: .github/workflows/miri_dev_log.yml ================================================ name: miri-dev-log on: workflow_dispatch: schedule: [cron: "30 */8 * * *"] env: CARGO_TERM_COLOR: always jobs: test_tokio: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 with: ref: dev - name: install specified nightly toolchain run: rustup toolchain install nightly-2025-12-01 - name: install miri run: rustup component add miri --toolchain nightly-2025-12-01 - name: Run miri tests with log (tokio multi thread) run: cd test-suite; NIGHTLY_VERSION=nightly-2025-12-01 scripts/miri.sh --features trace_log,tokio - name: collect log if: ${{ failure() }} uses: actions/upload-artifact@v4 with: name: crossfire_miri_tokio_multithread path: /tmp/crossfire_miri.log ================================================ FILE: .github/workflows/miri_tokio.yml ================================================ name: miri-tokio on: workflow_dispatch: schedule: [cron: "20 */6 * * *"] env: CARGO_TERM_COLOR: always jobs: build_and_test: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - name: install specified nightly toolchain run: rustup toolchain install nightly-2025-12-01 - name: install miri run: rustup component add miri --toolchain nightly-2025-12-01 #run: rustup component add --toolchain nightly-x86_64-unknown-linux-gnu miri - name: Run miri tests without log (tokio multi thread) run: cd test-suite; NIGHTLY_VERSION=nightly-2025-12-01 WORKFLOW=1 scripts/miri.sh -F tokio,time ================================================ FILE: .github/workflows/miri_tokio_cur.yml ================================================ name: miri-tokio-cur on: workflow_dispatch: schedule: [cron: "50 */7 * * *"] env: CARGO_TERM_COLOR: always jobs: build_and_test: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - name: install specified nightly toolchain run: rustup toolchain install nightly-2025-12-01 - name: install miri run: rustup component add miri --toolchain nightly-2025-12-01 #run: rustup component add --toolchain nightly-x86_64-unknown-linux-gnu miri - name: Run miri tests without log (tokio current thread) run: cd test-suite; NIGHTLY_VERSION=nightly-2025-12-01 WORKFLOW=1 SINGLE_THREAD_RUNTIME=1 scripts/miri.sh -F tokio,time ================================================ FILE: .github/workflows/miri_tokio_cur_log.yml ================================================ name: miri-tokio-cur-log on: workflow_dispatch: schedule: [cron: "20 */9 * * *"] env: CARGO_TERM_COLOR: always jobs: build_and_test: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - name: install specified nightly toolchain run: rustup toolchain install nightly-2025-12-01 - name: install miri run: rustup component add miri --toolchain nightly-2025-12-01 #run: rustup component add --toolchain nightly-x86_64-unknown-linux-gnu miri - name: Run miri tests with log (tokio current thread) run: cd test-suite; NIGHTLY_VERSION=nightly-2025-12-01 WORKFLOW=1 SINGLE_THREAD_RUNTIME=1 scripts/miri.sh --features trace_log,tokio - name: collect log if: ${{ failure() }} uses: actions/upload-artifact@v4 with: name: crossfire_miri path: /tmp/crossfire_miri.log ================================================ FILE: .github/workflows/miri_tokio_log.yml ================================================ name: miri-tokio-log on: workflow_dispatch: schedule: [cron: "10 */7 * * *"] env: CARGO_TERM_COLOR: always jobs: build_and_test: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - name: install specified nightly toolchain run: rustup toolchain install nightly-2025-12-01 - name: install miri run: rustup component add miri --toolchain nightly-2025-12-01 #run: rustup component add --toolchain nightly-x86_64-unknown-linux-gnu miri - name: Run miri tests with log (tokio multi thread) run: cd test-suite; NIGHTLY_VERSION=nightly-2025-12-01 WORKFLOW=1 scripts/miri.sh --features trace_log,tokio - name: collect log if: ${{ failure() }} uses: actions/upload-artifact@v4 with: name: crossfire_miri_tokio_multithread path: /tmp/crossfire_miri.log ================================================ FILE: .github/workflows/pr.yml ================================================ name: pr-validation on: pull_request: types: - opened - synchronize - ready_for_review env: CARGO_TERM_COLOR: always jobs: build_and_test: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - name: Build with default run: cargo build --verbose - name: Build with tokio run: cargo build -F tokio --verbose - name: Build with async_std run: cargo build -F async_std --verbose - name: Run tests with tokio run: make test - name: Run tests with tokio run: make test_async_std timeout ================================================ FILE: .github/workflows/typos.yml ================================================ name: Typo checker on: pull_request: types: - opened - synchronize - ready_for_review push: branches: [ "master" ] workflow_dispatch: jobs: run: name: Spell Check with Typos runs-on: ubuntu-22.04 steps: - name: Checkout Actions Repository uses: actions/checkout@v3 - name: Check spelling of the entire repository uses: crate-ci/typos@v1.33.1 ================================================ FILE: .gitignore ================================================ /target Cargo.lock tags *.sw* ================================================ FILE: AGENTS.md ================================================ # General - All comments and documents must be in English. - Omit unnecessary obvious comments during coding. - Documents must be concise, well organized into categories, with no duplicated topics or redundant information. Related topics should be organized in close proximity. - If you don't know a 3rd-party API, look it up on `https://docs.rs/`. - Do not run cargo clippy. - Always use shorter token paths by importing traits or structures. - Avoid importing namespaces inside functions. # Test - Because crate::spsc, mpsc, mpmc module have the same type alias, in the test just use `crossfire::*`, and distinguish the types and functions with `spsc::`, `mpsc::`, `mpmc::` prefix. - Run test with `make test`. In order to prevent too long output truncated by AI tool, run test with `make test ` when you have a targeted test case. - Do not use cargo test to run the test, always use `make test`. Test case cannot be run concurrently with cargo test default param. - For statement that don't expect to fail, use `expect()` rather than `unwarp()` ================================================ FILE: CHANGELOG.md ================================================ # Changelog All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## Unreleased ### Added ### Removed ### Changed ### Fixed ## [3.1.10] - 2026-05-05 ### Fix - waitgroup: Avoid miri report on stack borrow rule (issue #66) ## [3.1.9] - 2026-05-05 ### Fix - Reduce Send requirement in generic (issue #64), which makes the error prompt cleaner ## [3.1.8] - 2026-05-04 ### Added - Add WeakTx. which can downgrade from or upgrade to MTx / MAsyncTx ## [3.1.7] - 2026-03-19 ### Added - oneshot: Add TxOneshot::is_disconnected() ## [3.1.6] - 2026-03-18 ### Added - waitgroup: Add WaitGroupInline (which does not allocate) ## [3.1.4] - 2026-02-25 ### Changed - oneshot: Add Sync for TxOneshot ## [3.1.2] - 2026-02-16 ### Changed - waitgroup: Add inner T inside, just like Arc, this break previous 3.1.0 and 3.1.1 ## [3.1.1] - 2026-02-15 ### Changed - waitgroup: Add Sync for WaitGroupGuard ## [3.1.0] - 2026-02-14 ### Added - Add WaitGroup that support async & blocking, with custom threshold. - oneshot: Add recv_async_timeout & recv_async_with_timer ### Changed - oneshot: Refactor oneshot and optimize out arc cost. try_recv() now require `&mut self`. - async_tx/async_rx: Refactor SendTimeoutFuture/RecvTimeoutFuture signature, to remove boxed future usage ## [3.0.6] - 2026-02-11 ### Fixed - Fix multiplex: Ensure all message received before disconnect ## [3.0.5] - 2026-02-11 ### Fixed - Fix msrv to 1.79 (NonZero usage) - Fix clippy warning and document ## [3.0.4] - 2026-02-03 ### Fixed - Avoid overflow evaluation in generic code Remove Send/'static/Unpin limit from Flavor/Queue trait and struct definition, add the limit to method. - Blocking method and struct don't need Unpin. - Async recv does not need Unpin. ## [3.0.3] - 2026-01-30 ### Fixed - Fix multiplex premature closing ## [3.0.2] - 2026-01-23 ### Added - Add missing into_async() method for blocking tx/rc ## [3.0.1] - 2026-01-22 ### Changed - Remove the mode setting from Multiplex (always use round-robin) - Add default custom weight for Multiplex select, optimize selection cost (throughput +20%) ## [3.0.0] - 2026-01-18 ### Changed - Disable direct_copy to make miri happy - Simplify waker cleaning logic ## [3.0.0.beta3] - 2026-01-16 ### Change - New implementation of ArraySpsc & ArrayMpsc, throughput +50% - New implementation of OneMpsc, minor speed up. - Change multiplex recv(), try_recv(), recv_timeout() to &self, and impl BlockingRxTrait. - Remove unused lifetime param in BlockingRxTrait. ### Fixed Problems from v3 beta - Add more backoff yielding for One flavor, to ensure 8x1, 16x1 cases stable, and minor optimize. - Fix commit_waiting state wrong condition, which lead to regression in cases like 1000 async tx. - Spsc should disable direct_copy (which only safe for MP) ## [3.0.0.beta2] - 2026-01-15 - Fix Array visibility in flavor module - Fix AsyncTxTrait for compio (The sleep does not have Send) ## [3.0.0.beta1] - 2026-01-14 ### Changed - Change interface to V3 generic flavor API - Optimize for SPSC ### Added - Add One flavor for bounded size 1 case - Add Null flavor for cancellation purpose channel - Add Select API - Add Multiplex API ## [2.1.10] - 2026-01-10 ### Added - Add `oneshot` module - Add test workflow for `compio` (by lisovskiy) ### Changed - Allow Blocking/Async Tx/Rx trait to be used as trait objects ## [2.1.9] - 2025-12-31 - Fix speed regression on ARM (fix backoff) ## [2.1.8] - 2025-11-08 ### Fixed - Add `#[must_use]` to hint missing await on Future (by MathisWellmann) ## [2.1.7] - 2025-11-08 ### Changed - Depend on `futures-core` crate instead of `futures` (issue #45) ## [2.1.6] - 2025-10-10 ### Changed - Delete the code probing tokio (to prevent an issue in cargo 1.87-1.90 triggering the code without tokio feature enable) ## [2.1.5] - 2025-10-06 ### Fixed - Remove doc_auto_cfg because removal by rust ## [2.1.4] - 2025-10-01 ### Changed - Adjust backoff for Arm (increase size 1 speed) - async: Use try_change_state() to reset init instead of get_state(), (Minor improvement on x86 bounded_100_async_n_n) ## [2.1.3] - 2025-09-26 ### Added - Add send_with_timer() and recv_with_timer() for other async runtime (eg. smol). ## [2.1.1-2.1.2] ### Changed - Minor changed to doc ## [2.1.0] - 2025-9-21 ### Changed - Refactor to drop dependency of crossbeam-channel, the underlayering is modified version of crossbeam-queue. - Bounded channel speed receive massive boost. - AsyncTx can convert back and forth with Tx, and AsyncRx can convert back and forth with Rx. - Optimise for VM machine that only have 1 cpu. - Use MaybeUninit to optimise the moving of large blob message for bounded channel, in nearly full scenario. - Rename ReceiveFuture to RecvFuture, ReceiveTimeoutFuture to RecvTimeoutFuture. ### Removed - Remove AsyncTx::send_blocking() and AsyncRx::recv_blocking(), instead, you can use type conversion into Tx/Rx. ## [2.0.26] - 2025-08-30 ### Fixed - waker_registry: Fix hang detect by miri in cancel_waker(), issue #34 ## [2.0.25] - 2025-08-29 ### Fixed - More strict with the waker status, issue #34 (use SeqCst in reset_init) ## [2.0.24] - 2025-08-26 ### Fixed - More strict with the waker status, issue #34 (spurious wake up, and waker commit) ## [2.0.23] - 2025-08-23 ### Fixed - Change is_disconnected() to SeqCst ## [2.0.22] - 2025-08-21 ### Fixed - RegistryMulti: Fix defend against infinite loop for sink/stream, code introduced from 2.0.20. ## [2.0.21] - 2025-08-21 ### Added - Add clone_to_vec() method in async / blocking tx/rx trait ### Fixed - AsyncSink: Fix typo in clear waker on drop (Does not affect stability) ## [2.0.20] - 2025-08-17 ### Added - AsyncTxTrait: Add Into> - AsyncRxTrait: Add Into> ### Fixed - Change the behavior of AsyncSink::poll_send() and AsyncStream::poll_item(), to make sure stream/sink wakers are notified, preventing deadlock from happening if user wants to cancel the operation. Add explanation to the document. - Defend against infinite loop when waking up all wakers, given the change of sink/stream. ## [2.0.19] - 2025-08-13 ### Added - Add capacity() ## [2.0.18] - 2025-08-11 ### Fixed - Change some atomic load ordering from Acquire to SeqCst to pass validation by Miri. ## [2.0.17] - 2025-08-08 ### Fixed - Reuse and cleanup waker as much as possible (for idle select scenario) - Change some atomic store ordering from Release to SeqCst to avoid further trouble. ## [2.0.16] - 2025-08-04 ### Added - Add into_blocking() - Add missing into_sink() for MAsyncTx. - Add From for AsyncSink and AsyncStream. ## [2.0.15] - 2025-08-04 ### Added - Add missing conversion: MAsyncTx->AsyncTx and MTx->Tx ## [2.0.14] - 2025-08-03 ### Changed - Optimise bounded size 1 speed with backoff - Updated benchmark result vs kanal to wiki ## [2.0.13] - 2025-07-24 ### Fixed - Fix a deadlock https://github.com/frostyplanet/crossfire-rs/issues/22 ### Added - Allow type conversion from AsyncTx -> Tx, AsyncRx -> Rx ## [2.0.12] - 2025-07-18 ### Fixed - Fix a possible hang in LockedQueue introduced from v2.0.5 ## [2.0.11] - 2025-07-18 ### Added - Add Deref/AsRef for sender & receiver type to ChannelShared - Add is_full(), get_tx_count(), get_rx_count() - Revert the removal of send_blocking() and recv_blocking() (will maintain through 2.0.x) ### Removed - Remove DerefMut because it's no used. ### Fixed - Fix send_timeout() in blocking context ## [2.0.10] yanked published with the wrong branch, do not use. ## [2.0.9] - 2025-07-16 ### Added - Add is_disconnected() to sender and receiver type. - Add Deref for AsyncSink to AsyncTx, and AsyncStream to AsyncRx, remove duplicated code. ### Fixed - Fix a rare deadlock, when only one future in async runtime (for example channel async-blocking or blocking-async). Runtime will spuriously wake up with changed Waker. ### Removed - Remove send_blocking() & recv_blocking(), which is anti-pattern. (Calling function that blocks might lead to deadlock in async runtime) ## [2.0.8] - 2025-07-14 ### Added - AsyncStream: Add try_recv(), len() & is_empty() ## [2.0.7] - 2025-07-13 ### Added - AsyncStream: Add poll_item() for writing custom future, as a replacement to AsyncRx's poll_item(), but without the need of LockedWaker. - Add AsyncSink::poll_send() for writing custom future, as a replacement to AsyncTx's poll_send(), but without the need of LockedWaker. - Implement Debug & Display for all senders and receivers. ### Remove - Hide LockedWaker, since AsyncRx::poll_item() and AsyncTx::poll_send() is hidden. ### Changed - Optimise speed for SPSC & MPSC up to 60% (with WeakCell) - Add execution time log to test cases. ### Fixed - Fix LockedQueue empty flag (not affecting usage, just not accurate to internal test cases) ## [2.0.6] - 2025-07-10 ### Added - Support timeout and tested on async-std ### Changed - mark make_recv_future() & make_send_future() deprecated. - Change poll_send() & poll_item() to private function. ## [2.0.5] - 2025-07-09 ### Added - Add send_timeout() & recv_timeout() for async context ### Fixed - AsyncRx: Fix rare case that message left on disconnect - Fixed document typo and improve description. ### Changed - Optimise RegistryMulti, with 20%+ speed improved on MPSC / MPMC ## [2.0.4] - 2025-07-08 ### Changed - Remove Sync marker in Tx, Rx, AsyncTx, AsyncRx to prevent misuse with Arc ## [2.0.3] - 2025-07-07 ### Changed - Remove duplicated code. ### Fixed - AsyncRx should not have Clone. - Protect against misuse of spsc/mpsc when user should use mpmc (avoiding deadlocks) ## [2.0.2] - 2025-07-05 ### Added - Add channels for blocking context (which equals to crossbeam) ### Changed - Remove unused Clone for LockedWaker ### Fixed - spsc: Add missing unsupported size=0 overwrites ## [2.0.1] - 2025-07-03 ### Added - Add timeout API for blocking context (by Zach Schoenberger) ### Changed - Set min Rust version and edition in alignment with crossbeam (by Zach Schoenberger) ## [2.0.0] - 2025-06-27 ### Added - spsc module - Benchmark suite written with criterion. ### Changed - Refactor the API design. Unify sender and receiver types. - Removal of macro rules and refactor SendWakers & RecvWakers into Enum, thus removal of generic type in Channelshared structure. - Removal of the spin lock in LockedWaker. Simplifying the logic without losing performance. - Rewrite the test cases with rstest. ### Removed - Drop SelectSame module, because of hard to maintain, can be replace with future-select. ## [1.1.0] - 2025-06-19 ### Changed - Migrate repo From to - Change rust edition to 2024, re-format the code and fix warnings. ## [1.0.1] - 2023-08-29 ### Fixed - Fix atomic ordering for ARM (Have been tested on some ARM deployment) ## [1.0.0] - 2022-12-03 ### Changed - Format all code and announcing v1.0 - I decided that x86_64 stable after one year test. ## [0.1.7] - 2021-08-22 ### Fixed - tx: Remove redundant old_waker.is_waked() on abandon ## [0.1.6] - 2021-08-21 ### Fixed - mpsc: Fix RxFuture old_waker.abandon in poll_item ## [0.1.5] - 2021-06-28 ### Changed - Replace deprecated compare_and_swap ### Fixed - SelectSame: Fix close_handler last_index - Fix fetch_add/sub ordering for ARM (discovered on test hang) ================================================ FILE: CONTRIBUTION ================================================ Original Author: - Plan (frostyplanet at gmail.com) Thanks: - Zach Schoenberger - MathisWellmann - lisovskiy - Sherlock-Holo ================================================ FILE: Cargo.toml ================================================ [workspace] members = ["test-suite"] [package] name = "crossfire" version = "3.1.10" authors = ["plan "] edition = "2021" license = "Apache-2.0" homepage = "https://github.com/frostyplanet/crossfire-rs" readme = "README.md" repository = "https://github.com/frostyplanet/crossfire-rs" documentation = "https://docs.rs/crossfire" keywords = ["async", "non-blocking", "lock-free", "channel"] categories = ["concurrency", "data-structures"] exclude = ["/ci/*", "/bors.toml"] description = "channels for async and threads" rust-version = "1.79" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # Because cargo-show-asm cannot match local crossfire package #[patch.crates-io] #crossfire = { path = "." } [dependencies] crossbeam-utils = "0.8" futures-core = "0.3" parking_lot = "0" tokio = { version = "1", features = ["time", "rt"], optional=true } async-std = {version = "1", optional=true} log = { version="0", optional=true} smallvec = "1" [dev-dependencies] log = "0" tokio = { version = "1", features = ["time", "sync", "rt-multi-thread", "rt", "macros"] } smol = "2" captains-log = "0" ## For profiling symbol #[profile.release] #debug = true [features] default = [] # Enable compat model for v2.x API compat = [] # This will enable timeout function tokio = ["dep:tokio"] # This will enable timeout function async_std = ["dep:async-std"] # for test workflow debugging trace_log = ["dep:log"] [package.metadata.docs.rs] all-features = true # enable features in the documentation rustdoc-args = ["--cfg", "docsrs"] [lints.clippy] new_ret_no_self = "allow" needless_range_loop = "allow" type_complexity = "allow" needless_return = "allow" mut_from_ref = "allow" transmute_ptr_to_ref = "allow" len_without_is_empty = "allow" new_without_default = "allow" result_unit_err = "allow" ================================================ FILE: LICENSE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright (C) 2023-2025 The Crossfire Project Developers Copyright (C) 2016-2023 Yunify Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: Makefile ================================================ PRIMARY_TARGET := $(firstword $(MAKECMDGOALS)) ARGS := $(filter-out $(PRIMARY_TARGET), $(MAKECMDGOALS)) RUN_TEST_CASE = _run_test_case() { \ case="$(filter-out $ARGS,$(MAKECMDGOALS))"; \ if [ -n "$${WORKFLOW}" ]; then \ export TEST_FLAG=" -- -q --test-threads=1"; \ else \ export TEST_FLAG=" -- --nocapture --test-threads=1"; \ export LOG_FILE="/tmp/test_crossfire.log"; \ fi; \ RUST_BACKTRACE=full cargo ${NIGHTLY} test -p crossfire-test ${ARGS} $${FEATURE_FLAG} $${TEST_FLAG}; \ } RUN_RELEASE_CASE = _run_test_release_case() { \ case="$(filter-out $@,$(MAKECMDGOALS))"; \ if [ -n "$${WORKFLOW}" ]; then \ export TEST_FLAG=" --release -- -q --test-threads=1"; \ else \ export LOG_FILE="/tmp/test_crossfire.log"; \ export TEST_FLAG=" --release -- --nocapture --test-threads=1"; \ fi; \ RUST_BACKTRACE=full cargo ${NIGHTLY} test -p crossfire-test ${ARGS} $${FEATURE_FLAG} $${TEST_FLAG}; \ } RUN_BENCH = _run_bench() { \ cd test-suite; \ cargo bench --bench ${ARGS}; \ } INSTALL_GITHOOKS = _install_githooks() { \ git config core.hooksPath ./git-hooks; \ } .PHONY: git-hooks git-hooks: @$(INSTALL_GITHOOKS); _install_githooks .PHONY: init init: git-hooks .PHONY: fmt fmt: init cargo fmt .PHONY: doc doc: RUSTDOCFLAGS="--cfg docsrs" cargo +nightly doc --all-features # usage: # make test # make test test_async .PHONY: test test: init @echo "Run test" @${RUN_TEST_CASE}; FEATURE_FLAG="-F tokio,time"; _run_test_case @echo "Done" # test with ringfile for deadlog .PHONY: test_log test_log: init @echo "Run test" @${RUN_TEST_CASE}; FEATURE_FLAG="-F tokio,time,trace_log"; _run_test_case @echo "Done" .PHONY: test_async_std test_async_std: init @echo "Run test" @${RUN_TEST_CASE}; FEATURE_FLAG="-F async_std,time,"; _run_test_case @echo "Done" .PHONY: test_log_async_std test_log_async_std: init @echo "Run test" @${RUN_TEST_CASE}; FEATURE_FLAG="-F async_std,time,trace_log"; _run_test_case @echo "Done" .PHONY: test_release test_release: @${RUN_RELEASE_CASE}; FEATURE_FLAG="-F tokio,time"; _run_test_release_case # test with ringfile for deadlog .PHONY: test_log_release test_log_release: @${RUN_RELEASE_CASE}; FEATURE_FLAG="-F tokio,time,trace_log"; _run_test_release_case .PHONY: test_async_std_release test_async_std_release: @${RUN_RELEASE_CASE}; FEATURE_FLAG="-F async_std,time"; _run_test_release_case # test with ringfile for deadlog .PHONY: test_log_async_std_release test_log_async_std_release: @${RUN_RELEASE_CASE}; FEATURE_FLAG="-F async_std,time,trace_log"; _run_test_release_case .PHONY: test_smol test_smol: @${RUN_TEST_CASE}; FEATURE_FLAG="-F smol,time"; _run_test_case # test with ringfile for deadlog .PHONY: test_log_smol test_log_smol: @${RUN_TEST_CASE}; FEATURE_FLAG="-F smol,time,trace_log"; _run_test_case .PHONY: test_smol_release test_smol_release: @${RUN_RELEASE_CASE}; FEATURE_FLAG="-F smol,time"; _run_test_release_case # test with ringfile for deadlog .PHONY: test_log_smol_release test_log_smol_release: @${RUN_RELEASE_CASE}; FEATURE_FLAG="-F smol,trace_log,time"; _run_test_release_case .PHONY: test_compio test_compio: @${RUN_TEST_CASE}; FEATURE_FLAG="-F compio"; _run_test_case # test with ringfile for deadlog .PHONY: test_log_compio test_log_compio: @${RUN_TEST_CASE}; FEATURE_FLAG="-F compio,trace_log"; _run_test_case .PHONY: test_compio_release test_compio_release: @${RUN_RELEASE_CASE}; FEATURE_FLAG="-F compio"; _run_test_release_case # test with ringfile for deadlog .PHONY: test_log_compio_release test_log_compio_release: @${RUN_RELEASE_CASE}; FEATURE_FLAG="-F compio,trace_log"; _run_test_release_case .PHONY: test_compio_dispatcher test_compio_dispatcher: @${RUN_TEST_CASE}; FEATURE_FLAG="-F compio_dispatcher"; _run_test_case # test with ringfile for deadlog .PHONY: test_log_compio_dispatcher test_log_compio_dispatcher: @${RUN_TEST_CASE}; FEATURE_FLAG="-F compio_dispatcher,trace_log"; _run_test_case .PHONY: test_compio_dispatcher_release test_compio_dispatcher_release: @${RUN_RELEASE_CASE}; FEATURE_FLAG="-F compio_dispatcher"; _run_test_release_case # test with ringfile for deadlog .PHONY: test_log_compio_dispatcher_release test_log_compio_dispatcher_release: @${RUN_RELEASE_CASE}; FEATURE_FLAG="-F compio_dispatcher,trace_log"; _run_test_release_case # Usage: make bench crossfire bounded_100_async_1_1 .PHONY: bench bench: @${RUN_BENCH}; _run_bench .PHONY: test_leak test_leak: test_internal @${RUN_TEST_CASE}; NIGHTLY="+nightly" RUSTFLAGS="-Zsanitizer=leak"; _run_test_case .PHONY: test_internal test_internal: RUSTFLAGS="-Zsanitizer=leak" cargo +nightly test -F trace_log --lib -- --nocapture ${ARGS} .PHONY: build build: init cargo build .DEFAULT_GOAL = build # Target name % means that it is a rule that matches anything, @: is a recipe; # the : means do nothing %: @: ================================================ FILE: README.md ================================================ # Crossfire [![Build Status](https://github.com/frostyplanet/crossfire-rs/workflows/Rust/badge.svg)]( https://github.com/frostyplanet/crossfire-rs/actions) [![License](https://img.shields.io/badge/license-MIT%20OR%20Apache--2.0-blue.svg)]( https://github.com/qignstor/crossfire-rs#license) [![Cargo](https://img.shields.io/crates/v/crossfire.svg)]( https://crates.io/crates/crossfire) [![Documentation](https://docs.rs/crossfire/badge.svg)]( https://docs.rs/crossfire) [![Rust 1.36+](https://img.shields.io/badge/rust-1.36+-lightgray.svg)]( https://www.rust-lang.org) High-performance lockless spsc/mpsc/mpmc channels, algorithm derives crossbeam with improvements. It supports async contexts and bridges the gap between async and blocking contexts. For the concept, please refer to the [wiki](https://github.com/frostyplanet/crossfire-rs/wiki). ## Version history * v1.0: Used in production since 2022.12. * v2.0: [2025.6] Refactored the codebase and API by removing generic types from the ChannelShared type, which made it easier to code with. * v2.1: [2025.9] Removed the dependency on crossbeam-channel and implemented with [a modified version of crossbeam-queue](https://github.com/frostyplanet/crossfire-rs/wiki/crossbeam-related), brings 2x performance improvements for both async and blocking contexts. * v3.0: [2026.1] Refactored API back to generic flavor interface, added [select](https://docs.rs/crossfire/latest/crossfire/select/index.html). Dedicated optimization: Bounded SPSC +70%, MPSC +30%, one-size +20%. Eliminate enum dispatch cost, async performance improved for another 33%. Checkout [compat](https://docs.rs/crossfire/latest/crossfire/compat/index.html) for migration from v2.x. ## Performance Being a lockless channel, crossfire outperforms other async-capable channels. And thanks to a lighter notification mechanism, in a blocking context, most cases are even better than the original crossbeam-channel, mpsc bounded size 100 blocking context mpmc bounded size 100 blocking context mpsc bounded size 100 async context mpmc bounded size 100 async context More benchmark data is posted on [wiki](https://github.com/frostyplanet/crossfire-rs/wiki/benchmark-v3.0.0-2026%E2%80%9001%E2%80%9018). Also, being a lockless channel, the algorithm relies on spinning and yielding. Spinning is good on multi-core systems, but not friendly to single-core systems (like virtual machines). So we provide a function `detect_backoff_cfg()` to detect the running platform. Calling it within the initialization section of your code, will get a 2x performance boost on VPS. The benchmark is written in the criterion framework. You can run the benchmark by: ``` shell make bench crossfire make bench crossfire_select ``` ## APIs ### Concurrency Modules - [spsc](https://docs.rs/crossfire/latest/crossfire/spsc/index.html), [mpsc](https://docs.rs/crossfire/latest/crossfire/mpsc/index.html), [mpmc](https://docs.rs/crossfire/latest/crossfire/mpmc/index.html). Each has different underlying implementation optimized to its concurrent model. The SP or SC interface is only for non-concurrent operation. It's more memory-efficient in waker registration, and has atomic ops cost reduced in the lockless algorithm. - [oneshot](https://docs.rs/crossfire/latest/crossfire/oneshot/index.html) has its special sender/receiver type because using `Tx` / `Rx` will be too heavy. - [select](https://docs.rs/crossfire/latest/crossfire/select/index.html): - [Select<'a>](https://docs.rs/crossfire/latest/crossfire/select/struct.Select.html): crossbeam-channel style type erased API, borrows receiver address and select with "token" - [Multiplex](https://docs.rs/crossfire/latest/crossfire/select/struct.Multiplex.html): Multiplex stream that owns multiple receiver, select from the same type of channel flavors, for the same type of message. - [waitgroup](https://docs.rs/crossfire/latest/crossfire/waitgroup/index.html) High performance WaitGroup that allows custom threshold. ### Flavors The following lockless queues are expose in [flavor](https://docs.rs/crossfire/latest/crossfire/flavor/index.html) module, and each one have type alias in spsc/mpsc/mpmc: - `List` (which use crossbeam `SegQueue`) - `Array` (which is an enum that wraps crossbeam `ArrayQueue`, and a `One` if init with size<=1) - For a bounded channel, a 0 size case is not supported yet. (rewrite as 1 size). - The implementation for spsc & mpsc is simplified from mpmc version. - `One` (which derives from `ArrayQueue` algorithm, but have better performance in size=1 scenario, because it have two slots to allow reader and writer works concurrently) - `Null` (See the doc [null](https://docs.rs/crossfire/latest/crossfire/null/index.html)), for cancellation purpose channel, that only wakeup on closing. **NOTE** : Although the name `Array`, `List` are the same between spsc/mpsc/mpmc module, they are different type alias local to its parent module. We suggest distinguish by namespace when import for use. ### Channel builder function Aside from function `bounded_*`, `unbounded_*` which specify the sender / receiver type, each module has [build()](https://docs.rs/crossfire/latest/crossfire/mpmc/fn.build.html) and [new()](https://docs.rs/crossfire/latest/crossfire/mpmc/fn.new.html) function, which can apply to any channel flavors, and any async/blocking combinations. ### Types
Context Sender (Producer) Receiver (Consumer)
Single Multiple Single Multiple
Blocking BlockingTxTrait BlockingRxTrait
Tx MTx Rx MRx
Weak referenceWeakTx
Async AsyncTxTrait AsyncRxTrait
AsyncTx MAsyncTx AsyncRx MAsyncRx
*Safety*: For the SP / SC version, `AsyncTx`, `AsyncRx`, `Tx`, and `Rx` are not `Clone` and without `Sync`. Although can be moved to other threads, but not allowed to use send/recv while in an Arc. (Refer to the compile_fail examples in the type document). The benefit of using the SP / SC API is completely lockless waker registration, in exchange for a performance boost. The sender/receiver can use the **`From`** trait to convert between blocking and async context counterparts (refer to the [example](#example) below) ### Error types Error types are the same as crossbeam-channel: `TrySendError`, `SendError`, `SendTimeoutError`, `TryRecvError`, `RecvError`, `RecvTimeoutError` ### Async compatibility Tested on tokio-1.x and async-std-1.x, crossfire is runtime-agnostic. The following scenarios are considered: * The `AsyncTx::send()` and `AsyncRx::recv()` operations are **cancellation-safe** in an async context. You can safely use the select! macro and timeout() function in tokio/futures in combination with recv(). On cancellation, `SendFuture` and `RecvFuture` will trigger drop(), which will clean up the state of the waker, making sure there is no memory-leak and deadlock. But you cannot know the true result from SendFuture, since it's dropped upon cancellation. Thus, we suggest using `AsyncTx::send_timeout()` instead. * When the "tokio" or "async_std" feature is enabled, we also provide two additional functions: - `AsyncTx::send_timeout()`, which will return the message that failed to be sent in `SendTimeoutError`. We guarantee the result is atomic. Alternatively, you can use `AsyncTx::send_with_timer()`. - `AsyncRx::recv_timeout()`, we guarantee the result is atomic. Alternatively, you can use `AsyncRx::recv_with_timer()`. * The waker footprint: When using a multi-producer and multi-consumer scenario, there's a small memory overhead to pass along a `Weak` reference of wakers. Because we aim to be lockless, when the sending/receiving futures are canceled (like tokio::time::timeout()), it might trigger an immediate cleanup if the try-lock is successful, otherwise will rely on lazy cleanup. (This won't be an issue because weak wakers will be consumed by actual message send and recv). On an idle-select scenario, like a notification for close, the waker will be reused as much as possible if poll() returns pending. * Handle written future: The future object created by `AsyncTx::send()`, `AsyncTx::send_timeout()`, `AsyncRx::recv()`, `AsyncRx::recv_timeout()` is `Sized`. You don't need to put them in `Box`. If you like to use poll function directly for complex behavior, you can call `AsyncSink::poll_send()` or `AsyncStream::poll_item()` with Context. ## Usage Cargo.toml: ```toml [dependencies] crossfire = "3.1" ``` ### Feature flags * `compat`: Enable the compat model, which has the same API namespace struct as V2.x * `tokio`: Enable `send_timeout()`, `recv_timeout()` with tokio sleep function. (conflict with `async_std` feature) * `async_std`: Enable send_timeout, recv_timeout with async-std sleep function. (conflict with `tokio` feature) * `trace_log`: Development mode, to enable internal log while testing or benchmark, to debug deadlock issues. ### Example blocking / async sender receiver mixed together ```rust extern crate crossfire; use crossfire::*; #[macro_use] extern crate tokio; use tokio::time::{sleep, interval, Duration}; #[tokio::main] async fn main() { let (tx, rx) = mpmc::bounded_async::(100); let mut recv_counter = 0; let mut co_tx = Vec::new(); let mut co_rx = Vec::new(); const ROUND: usize = 1000; let _tx: MTx> = tx.clone().into_blocking(); co_tx.push(tokio::task::spawn_blocking(move || { for i in 0..ROUND { _tx.send(i).expect("send ok"); } })); co_tx.push(tokio::spawn(async move { for i in 0..ROUND { tx.send(i).await.expect("send ok"); } })); let _rx: MRx> = rx.clone().into_blocking(); co_rx.push(tokio::task::spawn_blocking(move || { let mut count: usize = 0; 'A: loop { match _rx.recv() { Ok(_i) => { count += 1; } Err(_) => break 'A, } } count })); co_rx.push(tokio::spawn(async move { let mut count: usize = 0; 'A: loop { match rx.recv().await { Ok(_i) => { count += 1; } Err(_) => break 'A, } } count })); for th in co_tx { let _ = th.await.unwrap(); } for th in co_rx { recv_counter += th.await.unwrap(); } assert_eq!(recv_counter, ROUND * 2); } ``` ## Test status **NOTE**: Because we has push the speed to a level no one has gone before, it can put a pure pressure to the async runtime. Some hidden bug (especially atomic ops on weaker ordering platform) might occur: The test is placed in test-suite directory, run with: ``` make test ```
archruntimeworkflowstatus
x86_64 threaded cron_master_threaded_x86 STABLE
tokio 1.47.1 cron_master_tokio_x86 STABLE
async-std cron_master_async_std_x86 STABLE
smol cron_master_smol-x86 STABLE
compio cron_master_compio-x86 verifying
arm threaded cron_master_threaded_arm
STABLE
tokio >= 1.48 (tokio PR #7622) cron_master_tokio_arm
SHOULD UPGRADE tokio to 1.48
STABLE
async-std cron_master_async_std_arm STABLE
smol cron_master_smol_arm STABLE
compio cron_master_compio_arm verifying
miri (emulation) threaded miri_tokio
miri_tokio_cur
STABLE
tokioSTABLE
async-std- (timerfd_create) not supported by miri
smol- (timerfd_create) not supported by miri
### Debugging deadlock issue **Debug locally**: Use `--features trace_log` to run the bench or test until it hangs, then press `ctrl+c` or send `SIGINT`, there will be latest log dump to /tmp/crossfire_ring.log (refer to tests/common.rs `_setup_log()`) **Debug with github workflow**: https://github.com/frostyplanet/crossfire-rs/issues/37 ================================================ FILE: benches/inner.rs ================================================ use criterion::*; use crossbeam_queue::{ArrayQueue, SegQueue}; use crossbeam_utils::Backoff; use crossfire::collections::*; use parking_lot::Mutex; use std::cell::UnsafeCell; use std::collections::VecDeque; use std::sync::{ atomic::{AtomicBool, AtomicUsize, Ordering}, Arc, Weak, }; use std::thread; use std::time::Duration; const ONE_MILLION: usize = 1000000; struct Foo { _inner: usize, } pub struct LockedQueue { empty: AtomicBool, queue: Mutex>, } impl LockedQueue { #[inline] pub fn new(cap: usize) -> Self { Self { empty: AtomicBool::new(true), queue: Mutex::new(VecDeque::with_capacity(cap)) } } #[inline(always)] pub fn push(&self, msg: T) { let mut guard = self.queue.lock(); if guard.is_empty() { self.empty.store(false, Ordering::Release); } guard.push_back(msg); } #[inline(always)] pub fn pop(&self) -> Option { if self.empty.load(Ordering::Acquire) { return None; } let mut guard = self.queue.lock(); if let Some(item) = guard.pop_front() { if guard.len() == 0 { self.empty.store(true, Ordering::Release); } Some(item) } else { None } } #[inline(always)] pub fn len(&self) -> usize { let guard = self.queue.lock(); guard.len() } #[allow(dead_code)] #[inline(always)] pub fn exists(&self) -> bool { !self.empty.load(Ordering::Acquire) } } pub struct SpinQueue { lock: AtomicBool, queue: UnsafeCell>, } unsafe impl Send for SpinQueue {} unsafe impl Sync for SpinQueue {} impl SpinQueue { fn new(cap: usize) -> Self { Self { lock: AtomicBool::new(false), queue: UnsafeCell::new(VecDeque::with_capacity(cap)) } } #[inline(always)] fn get_queue(&self) -> &mut VecDeque { unsafe { std::mem::transmute(self.queue.get()) } } #[inline] fn push(&self, msg: T) { let backoff = Backoff::new(); while self.lock.swap(true, Ordering::SeqCst) { backoff.spin(); } self.get_queue().push_back(msg); self.lock.store(false, Ordering::Release); } #[inline] fn pop(&self) -> Option { let backoff = Backoff::new(); while self.lock.swap(true, Ordering::SeqCst) { backoff.spin(); } let r = self.get_queue().pop_front(); self.lock.store(false, Ordering::Release); r } } fn _bench_spin_queue(count: usize) { let queue = Arc::new(SpinQueue::>::new(10)); let mut th_s = Vec::new(); let counter = Arc::new(AtomicUsize::new(0)); for _ in 0..count { let _queue = queue.clone(); let _counter = counter.clone(); th_s.push(thread::spawn(move || loop { let i = _counter.fetch_add(1, Ordering::SeqCst); if i < ONE_MILLION { if let Some(weak) = _queue.pop() { let _ = weak.upgrade(); } } else { break; } })); } th_s.push(thread::spawn(move || { for _ in 0..ONE_MILLION { let foo = Arc::new(Foo { _inner: 1 }); queue.push(Arc::downgrade(&foo)); } })); for th in th_s { let _ = th.join(); } } fn _bench_locked_queue(count: usize) { let queue = Arc::new(LockedQueue::>::new(10)); let mut th_s = Vec::new(); let counter = Arc::new(AtomicUsize::new(0)); for _ in 0..count { let _queue = queue.clone(); let _counter = counter.clone(); th_s.push(thread::spawn(move || loop { let i = _counter.fetch_add(1, Ordering::SeqCst); if i < ONE_MILLION { if let Some(weak) = _queue.pop() { let _ = weak.upgrade(); } } else { break; } })); } th_s.push(thread::spawn(move || { for _ in 0..ONE_MILLION { let foo = Arc::new(Foo { _inner: 1 }); queue.push(Arc::downgrade(&foo)); } })); for th in th_s { let _ = th.join(); } } fn _bench_array_queue(count: usize) { let queue = Arc::new(ArrayQueue::>::new(1)); let mut th_s = Vec::new(); let counter = Arc::new(AtomicUsize::new(0)); for _ in 0..count { let _queue = queue.clone(); let _counter = counter.clone(); th_s.push(thread::spawn(move || loop { let i = _counter.fetch_add(1, Ordering::SeqCst); if i < ONE_MILLION { if let Some(weak) = _queue.pop() { let _ = weak.upgrade(); } } else { break; } })); } th_s.push(thread::spawn(move || { for _ in 0..ONE_MILLION { let foo = Arc::new(Foo { _inner: 1 }); queue.force_push(Arc::downgrade(&foo)); } })); for th in th_s { let _ = th.join(); } } fn _bench_seg_queue(count: usize) { let queue = Arc::new(SegQueue::>::new()); let mut th_s = Vec::new(); let counter = Arc::new(AtomicUsize::new(0)); for _ in 0..count { let _queue = queue.clone(); let _counter = counter.clone(); th_s.push(thread::spawn(move || loop { let i = _counter.fetch_add(1, Ordering::SeqCst); if i < ONE_MILLION { if let Some(weak) = _queue.pop() { let _ = weak.upgrade(); } } else { break; } })); } th_s.push(thread::spawn(move || { for _ in 0..ONE_MILLION { let foo = Arc::new(Foo { _inner: 1 }); queue.push(Arc::downgrade(&foo)); } })); for th in th_s { let _ = th.join(); } } fn _bench_weak_cell(count: usize) { let cell = Arc::new(WeakCell::::new()); let mut th_s = Vec::new(); let counter = Arc::new(AtomicUsize::new(0)); for _ in 0..count { let _cell = cell.clone(); let _counter = counter.clone(); th_s.push(thread::spawn(move || loop { let i = _counter.fetch_add(1, Ordering::SeqCst); if i < ONE_MILLION { let _ = _cell.pop(); } else { break; } })); } th_s.push(thread::spawn(move || { for _ in 0..ONE_MILLION { let foo = Arc::new(Foo { _inner: 1 }); cell.put(Arc::downgrade(&foo)); } })); for th in th_s { let _ = th.join(); } } fn _bench_empty(c: &mut Criterion) { let mut group = c.benchmark_group("empty"); group.significance_level(0.1).sample_size(50); group.measurement_time(Duration::from_secs(10)); group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_function("weak_cell", |b| { b.iter(|| { let cell = WeakCell::::new(); for _ in 0..ONE_MILLION { let _ = cell.pop(); } }) }); group.measurement_time(Duration::from_secs(10)); group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_function("spin VecDeque", |b| { b.iter(|| { let queue = SpinQueue::::new(10); for _ in 0..ONE_MILLION { let _ = queue.pop(); } }) }); group.measurement_time(Duration::from_secs(10)); group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_function("locked VecDeque", |b| { b.iter(|| { let queue = LockedQueue::::new(10); for _ in 0..ONE_MILLION { let _ = queue.pop(); } }) }); group.measurement_time(Duration::from_secs(10)); group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_function("array_queue", |b| { b.iter(|| { let queue = ArrayQueue::::new(1); for _ in 0..ONE_MILLION { let _ = queue.pop(); } }) }); group.measurement_time(Duration::from_secs(10)); group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_function("seg_queue", |b| { b.iter(|| { let queue = SegQueue::::new(); for _ in 0..ONE_MILLION { let _ = queue.pop(); } }) }); } fn _bench_sequence(c: &mut Criterion) { let mut group = c.benchmark_group("sequence"); group.significance_level(0.1).sample_size(50); group.measurement_time(Duration::from_secs(10)); group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_function("weak_cell", |b| { b.iter(|| { let cell = WeakCell::::new(); for _ in 0..ONE_MILLION { let foo = Arc::new(Foo { _inner: 1 }); cell.put(Arc::downgrade(&foo)); let _ = cell.pop(); } }) }); group.measurement_time(Duration::from_secs(10)); group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_function("spin VecDeque", |b| { b.iter(|| { let queue = SpinQueue::new(10); for _ in 0..ONE_MILLION { let foo = Arc::new(Foo { _inner: 1 }); let _ = queue.push(Arc::downgrade(&foo)); if let Some(w) = queue.pop() { let _ = w.upgrade(); } } }) }); group.measurement_time(Duration::from_secs(10)); group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_function("locked VecDeque", |b| { b.iter(|| { let queue = LockedQueue::new(10); for _ in 0..ONE_MILLION { let foo = Arc::new(Foo { _inner: 1 }); let _ = queue.push(Arc::downgrade(&foo)); if let Some(w) = queue.pop() { let _ = w.upgrade(); } } }) }); group.measurement_time(Duration::from_secs(10)); group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_function("array_queue", |b| { b.iter(|| { let queue = ArrayQueue::>::new(1); for _ in 0..ONE_MILLION { let foo = Arc::new(Foo { _inner: 1 }); let _ = queue.push(Arc::downgrade(&foo)); if let Some(w) = queue.pop() { let _ = w.upgrade(); } } }) }); group.measurement_time(Duration::from_secs(10)); group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_function("seg_queue", |b| { b.iter(|| { let queue = SegQueue::>::new(); for _ in 0..ONE_MILLION { let foo = Arc::new(Foo { _inner: 1 }); let _ = queue.push(Arc::downgrade(&foo)); if let Some(w) = queue.pop() { let _ = w.upgrade(); } } }) }); } fn _bench_threads(c: &mut Criterion) { let mut group = c.benchmark_group("threads"); group.significance_level(0.1).sample_size(50); group.measurement_time(Duration::from_secs(10)); for input in n_1() { group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_with_input(BenchmarkId::new("weak_cell", input), &input, |b, i| { b.iter(|| _bench_weak_cell(*i)) }); group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_with_input(BenchmarkId::new("spin VecDeque", input), &input, |b, i| { b.iter(|| _bench_spin_queue(*i)) }); group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_with_input(BenchmarkId::new("locked VecDeque", input), &input, |b, i| { b.iter(|| _bench_locked_queue(*i)) }); group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_with_input(BenchmarkId::new("array_queue", input), &input, |b, i| { b.iter(|| _bench_array_queue(*i)) }); group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_with_input(BenchmarkId::new("seg_queue", input), &input, |b, i| { b.iter(|| _bench_seg_queue(*i)) }); } } criterion_group!(benches, _bench_empty, _bench_sequence, _bench_threads); criterion_main!(benches); ================================================ FILE: git-hooks/pre-commit ================================================ #!/bin/bash make fmt || exit 1 # re add the files since changed by fmt files=$(git diff --cached --name-only --diff-filter=ACM | grep '.rs$') for f in $files; do echo "git add $f" git add $f done exit 0 ================================================ FILE: rustfmt.toml ================================================ edition = "2021" fn_params_layout = "Compressed" newline_style = "Unix" use_small_heuristics = "Max" max_width = 100 use_field_init_shorthand = true ================================================ FILE: src/async_rx.rs ================================================ use crate::flavor::{FlavorMC, FlavorSelect}; use crate::select::SelectResult; use crate::stream::AsyncStream; #[cfg(feature = "trace_log")] use crate::tokio_task_id; use crate::{shared::*, trace_log, MRx, NotCloneable, ReceiverType, Rx}; use std::cell::Cell; use std::fmt; use std::future::Future; use std::marker::PhantomData; use std::ops::Deref; use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; /// A single consumer (receiver) that works in an async context. /// /// Additional methods in [ChannelShared] can be accessed through `Deref`. /// /// `AsyncRx` can be converted into `Rx` via the `From` trait, /// which means you can have two types of receivers, both within async and /// blocking contexts, for the same channel. /// /// **NOTE**: `AsyncRx` is not `Clone` or `Sync`. /// If you need concurrent access, use [MAsyncRx] instead. /// /// `AsyncRx` has a `Send` marker and can be moved to other coroutines. /// The following code is OK: /// /// ``` rust /// use crossfire::*; /// async fn foo() { /// let (tx, rx) = mpsc::bounded_async::(100); /// tokio::spawn(async move { /// let _ = rx.recv().await; /// }); /// drop(tx); /// } /// ``` /// /// Because `AsyncRx` does not have a `Sync` marker, using `Arc` will lose the `Send` marker. /// /// For your safety, the following code **should not compile**: /// /// ``` compile_fail /// use crossfire::*; /// use std::sync::Arc; /// async fn foo() { /// let (tx, rx) = mpsc::bounded_async::(100); /// let rx = Arc::new(rx); /// tokio::spawn(async move { /// let _ = rx.recv().await; /// }); /// drop(tx); /// } /// ``` pub struct AsyncRx { pub(crate) shared: Arc>, // Remove the Sync marker to prevent being put in Arc _phan: PhantomData>, } unsafe impl Send for AsyncRx {} impl fmt::Debug for AsyncRx { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "AsyncRx{:p}", self) } } impl fmt::Display for AsyncRx { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "AsyncRx{:p}", self) } } impl Drop for AsyncRx { #[inline(always)] fn drop(&mut self) { self.shared.close_rx(); } } impl From> for AsyncRx { fn from(value: Rx) -> Self { value.add_rx(); Self::new(value.shared.clone()) } } impl AsyncRx { #[inline] pub(crate) fn new(shared: Arc>) -> Self { Self { shared, _phan: Default::default() } } /// Return true if the other side has closed #[inline(always)] pub fn is_disconnected(&self) -> bool { self.shared.is_tx_closed() } #[inline] pub fn into_stream(self) -> AsyncStream { AsyncStream::new(self) } #[inline] pub fn into_blocking(self) -> Rx { self.into() } } impl AsyncRx { /// Receives a message from the channel. This method will await until a message is received or the channel is closed. /// /// This function is cancellation-safe, so it's safe to use with `timeout()` and the `select!` macro. /// When a [RecvFuture] is dropped, no message will be received from the channel. /// /// For timeout scenarios, there's an alternative: [AsyncRx::recv_timeout()]. /// /// Returns `Ok(T)` on success. /// /// Returns Err([RecvError]) if the sender has been dropped. #[inline(always)] pub fn recv<'a>(&'a self) -> RecvFuture<'a, F> { RecvFuture { rx: self, waker: None } } // NOTE: we cannot use async fn recv_timeout signature because &self is not Send /// Receives a message from the channel with a timeout. /// Will await when channel is empty. /// /// The behavior is atomic: the message is either received successfully or the operation is canceled due to a timeout. /// /// Returns `Ok(T)` when successful. /// /// Returns Err([RecvTimeoutError::Timeout]) when a message could not be received because the channel is empty and the operation timed out. /// /// Returns Err([RecvTimeoutError::Disconnected]) if the sender has been dropped and the channel is empty. #[cfg(feature = "tokio")] #[cfg_attr(docsrs, doc(cfg(feature = "tokio")))] #[inline] pub fn recv_timeout( &self, duration: std::time::Duration, ) -> RecvTimeoutFuture<'_, F, tokio::time::Sleep, ()> { let sleep = tokio::time::sleep(duration); self.recv_with_timer(sleep) } #[cfg(feature = "async_std")] #[cfg_attr(docsrs, doc(cfg(feature = "async_std")))] #[inline] pub fn recv_timeout( &self, duration: std::time::Duration, ) -> RecvTimeoutFuture<'_, F, impl Future, ()> { let sleep = async_std::task::sleep(duration); self.recv_with_timer(sleep) } /// Receives a message from the channel with a custom timer function (from other async runtime). /// /// The behavior is atomic: the message is either received successfully or the operation is canceled due to a timeout. /// /// Returns `Ok(T)` when successful. /// /// Returns Err([RecvTimeoutError::Timeout]) when a message could not be received because the channel is empty and the operation timed out. /// /// Returns Err([RecvTimeoutError::Disconnected]) if the sender has been dropped and the channel is empty. /// /// # Argument: /// /// * `sleep`: The sleep function. /// The return value of `sleep` is ignore. We add generic `R` just in order to support smol::Timer /// /// # Example: /// /// with smol timer /// /// ```rust /// extern crate smol; /// use std::time::Duration; /// use crossfire::*; /// async fn foo() { /// let (tx, rx) = mpmc::bounded_async::(10); /// match rx.recv_with_timer(smol::Timer::after(Duration::from_secs(1))).await { /// Ok(_item)=>{ /// println!("message recv"); /// } /// Err(RecvTimeoutError::Timeout)=>{ /// println!("timeout"); /// } /// Err(RecvTimeoutError::Disconnected)=>{ /// println!("sender-side closed"); /// } /// } /// } /// ``` #[inline] pub fn recv_with_timer<'a, FR, R>(&'a self, sleep: FR) -> RecvTimeoutFuture<'a, F, FR, R> where FR: Future, { RecvTimeoutFuture { rx: self, waker: None, sleep } } /// Attempts to receive a message from the channel without blocking. /// /// Returns `Ok(T)` on successful. /// /// Returns Err([TryRecvError::Empty]) if the channel is empty. /// /// Returns Err([TryRecvError::Disconnected]) if the sender has been dropped and the channel is empty. #[inline(always)] pub fn try_recv(&self) -> Result { self.shared.try_recv() } /// This method use with [select](crate::select::Select), guarantee non-blocking /// /// # Panics /// /// Panics if SelectResult from other receiver is passed. #[inline(always)] pub fn read_select(&self, result: SelectResult) -> Result where F: FlavorSelect, { assert_eq!( self as *const Self as *const u8, result.channel, "invalid use select with another channel" ); self.as_ref().read_with_token(result.token) } /// Internal function might change in the future. For public version, use AsyncStream::poll_item() instead /// /// Returns `Ok(T)` on successful. /// /// Return Err([TryRecvError::Empty]) for Poll::Pending case. /// /// Return Err([TryRecvError::Disconnected]) when all Tx dropped and channel is empty. #[inline(always)] pub(crate) fn poll_item( &self, ctx: &mut Context, o_waker: &mut Option<::Waker>, ) -> Result { let shared = &self.shared; // When the result is not TryRecvError::Empty, // make sure always take the o_waker out and abandon, // to skip the timeout cleaning logic in Drop. macro_rules! on_recv_no_waker { () => {{ trace_log!("rx{:?}: recv", tokio_task_id!()); }}; } macro_rules! on_recv_waker { ($state: expr) => {{ trace_log!("rx{:?}: recv {:?} {:?}", tokio_task_id!(), o_waker, $state); shared.recvs.cancel_waker(o_waker); }}; } macro_rules! try_recv { ($recv_func: ident => $waker_handle: block) => { if let Some(item) = shared.inner.$recv_func() { shared.on_recv(); $waker_handle return Ok(item); } }; } loop { if o_waker.is_none() { try_recv!(try_recv=>{ on_recv_no_waker!()}); // First call if let Some(mut backoff) = shared.get_async_backoff() { loop { let complete = backoff.spin(); try_recv!(try_recv=>{ on_recv_no_waker!()}); if complete { break; } } } } else { try_recv!(try_recv => {on_recv_waker!(WakerState::Woken)}); } if shared.recvs.reg_waker_async(ctx, o_waker).is_some() { break; } // NOTE: The other side put something while reg_send and did not see the waker, // should check the channel again, otherwise might incur a dead lock. // NOTE: special API before we park // because Miri is not happy about ArrayQueue pop ordering, which is not SeqCst try_recv!(try_recv_final =>{ on_recv_waker!(WakerState::Init)}); if !STREAM { let state = shared.recvs.commit_waiting(o_waker); trace_log!("rx{:?}: commit_waiting {:?} {}", tokio_task_id!(), o_waker, state); if state == WakerState::Woken as u8 { continue; } } break; } if shared.is_tx_closed() { try_recv!(try_recv =>{ on_recv_waker!(WakerState::Closed)}); trace_log!("rx{:?}: disconnected {:?}", tokio_task_id!(), o_waker); Err(TryRecvError::Disconnected) } else { Err(TryRecvError::Empty) } } } /// A fixed-sized future object constructed by [AsyncRx::recv()] #[must_use] pub struct RecvFuture<'a, F: Flavor> { rx: &'a AsyncRx, waker: Option<::Waker>, } unsafe impl Send for RecvFuture<'_, F> {} impl Drop for RecvFuture<'_, F> { #[inline] fn drop(&mut self) { if let Some(waker) = self.waker.as_ref() { self.rx.shared.abandon_recv_waker(waker); } } } impl Future for RecvFuture<'_, F> { type Output = Result; #[inline] fn poll(self: Pin<&mut Self>, ctx: &mut Context) -> Poll { let mut _self = self.get_mut(); match _self.rx.poll_item::(ctx, &mut _self.waker) { Err(e) => { if !e.is_empty() { let _ = _self.waker.take(); Poll::Ready(Err(RecvError {})) } else { Poll::Pending } } Ok(item) => { debug_assert!(_self.waker.is_none()); Poll::Ready(Ok(item)) } } } } /// A fixed-sized future object constructed by [AsyncRx::recv_timeout()] #[must_use] pub struct RecvTimeoutFuture<'a, F, FR, R> where F: Flavor, FR: Future, { rx: &'a AsyncRx, waker: Option<::Waker>, sleep: FR, } unsafe impl Send for RecvTimeoutFuture<'_, F, FR, R> where F: Flavor, FR: Future, { } impl Drop for RecvTimeoutFuture<'_, F, FR, R> where F: Flavor, FR: Future, { #[inline] fn drop(&mut self) { if let Some(waker) = self.waker.as_ref() { self.rx.shared.abandon_recv_waker(waker); } } } impl Future for RecvTimeoutFuture<'_, F, FR, R> where F: Flavor, FR: Future, { type Output = Result; #[inline] fn poll(self: Pin<&mut Self>, ctx: &mut Context) -> Poll { // NOTE: we can use unchecked to bypass pin because we are not movig "sleep", // neither it's exposed outside let mut _self = unsafe { self.get_unchecked_mut() }; match _self.rx.poll_item::(ctx, &mut _self.waker) { Err(TryRecvError::Empty) => { if unsafe { Pin::new_unchecked(&mut _self.sleep) }.poll(ctx).is_ready() { return Poll::Ready(Err(RecvTimeoutError::Timeout)); } Poll::Pending } Err(TryRecvError::Disconnected) => Poll::Ready(Err(RecvTimeoutError::Disconnected)), Ok(item) => Poll::Ready(Ok(item)), } } } /// For writing generic code with MAsyncRx & AsyncRx pub trait AsyncRxTrait: Send + 'static + fmt::Debug + fmt::Display { /// Receive message, will await when channel is empty. /// /// Returns `Ok(T)` when successful. /// /// returns Err([RecvError]) when all Tx dropped. fn recv(&self) -> impl Future> + Send; /// Waits for a message to be received from the channel, but only for a limited time. /// Will await when channel is empty. /// /// The behavior is atomic, either successfully polls a message, /// or operation cancelled due to timeout. /// /// Returns Ok(T) when successful. /// /// Returns Err([RecvTimeoutError::Timeout]) when a message could not be received because the channel is empty and the operation timed out. /// /// returns Err([RecvTimeoutError::Disconnected]) when all Tx dropped and channel is empty. #[cfg(any(feature = "tokio", feature = "async_std"))] #[cfg_attr(docsrs, doc(cfg(any(feature = "tokio", feature = "async_std"))))] fn recv_timeout( &self, timeout: std::time::Duration, ) -> impl Future> + Send; /// Receives a message from the channel with a custom timer function (from other async runtime). /// /// The behavior is atomic: the message is either received successfully or the operation is canceled due to a timeout. /// /// Returns `Ok(T)` when successful. /// /// Returns Err([RecvTimeoutError::Timeout]) when a message could not be received because the channel is empty and the operation timed out. /// /// Returns Err([RecvTimeoutError::Disconnected]) if the sender has been dropped and the channel is empty. /// /// # Argument: /// /// * `fut`: The sleep function. It's possible to wrap this function with cancelable handle, /// you can control when to stop polling. the return value of `fut` is ignore. /// We add generic `R` just in order to support smol::Timer. fn recv_with_timer( &self, fut: FR, ) -> impl Future> + Send where FR: Future; /// Try to receive message, non-blocking. /// /// Returns Ok(T) when successful. /// /// Returns Err([TryRecvError::Empty]) when channel is empty. /// /// Returns Err([TryRecvError::Disconnected]) when all Tx dropped and channel is empty. fn try_recv(&self) -> Result; /// The number of messages in the channel at the moment fn len(&self) -> usize; /// The capacity of the channel, return None for unbounded channel. fn capacity(&self) -> Option; /// Whether channel is empty at the moment fn is_empty(&self) -> bool; /// Whether the channel is full at the moment fn is_full(&self) -> bool; /// Return true if the other side has closed fn is_disconnected(&self) -> bool; /// Return the number of senders fn get_tx_count(&self) -> usize; /// Return the number of receivers fn get_rx_count(&self) -> usize; fn clone_to_vec(self, count: usize) -> Vec where Self: Sized; fn to_stream(self) -> Pin>>; fn get_wakers_count(&self) -> (usize, usize); } impl AsyncRxTrait for AsyncRx { #[inline(always)] fn clone_to_vec(self, _count: usize) -> Vec { assert_eq!(_count, 1); vec![self] } #[inline(always)] fn recv(&self) -> impl Future> + Send { AsyncRx::recv(self) } #[cfg(any(feature = "tokio", feature = "async_std"))] #[cfg_attr(docsrs, doc(cfg(any(feature = "tokio", feature = "async_std"))))] #[inline(always)] fn recv_timeout( &self, duration: std::time::Duration, ) -> impl Future> + Send { AsyncRx::recv_timeout(self, duration) } #[inline(always)] fn recv_with_timer( &self, sleep: FR, ) -> impl Future> + Send where FR: Future, { AsyncRx::recv_with_timer(self, sleep) } #[inline(always)] fn try_recv(&self) -> Result { AsyncRx::::try_recv(self) } /// The number of messages in the channel at the moment #[inline(always)] fn len(&self) -> usize { self.as_ref().len() } /// The capacity of the channel, return None for unbounded channel. #[inline(always)] fn capacity(&self) -> Option { self.as_ref().capacity() } /// Whether channel is empty at the moment #[inline(always)] fn is_empty(&self) -> bool { self.as_ref().is_empty() } /// Whether the channel is full at the moment #[inline(always)] fn is_full(&self) -> bool { self.as_ref().is_full() } /// Return true if the other side has closed #[inline(always)] fn is_disconnected(&self) -> bool { self.as_ref().get_tx_count() == 0 } #[inline(always)] fn get_tx_count(&self) -> usize { self.as_ref().get_tx_count() } #[inline(always)] fn get_rx_count(&self) -> usize { self.as_ref().get_rx_count() } #[inline(always)] fn to_stream(self) -> Pin>> { Box::pin(self.into_stream()) } fn get_wakers_count(&self) -> (usize, usize) { self.as_ref().get_wakers_count() } } /// A multi-consumer (receiver) that works in an async context. /// /// Inherits from [`AsyncRx`] and implements `Clone`. /// Additional methods in [ChannelShared] can be accessed through `Deref`. /// /// You can use `into()` to convert it to `AsyncRx`. /// /// `MAsyncRx` can be converted into `MRx` via the `From` trait, /// which means you can have two types of receivers, both within async and /// blocking contexts, for the same channel. pub struct MAsyncRx(pub(crate) AsyncRx); impl fmt::Debug for MAsyncRx { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "MAsyncRx{:p}", self) } } impl fmt::Display for MAsyncRx { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "MAsyncRx{:p}", self) } } unsafe impl Sync for MAsyncRx {} impl Clone for MAsyncRx { #[inline] fn clone(&self) -> Self { let inner = &self.0; inner.shared.add_rx(); Self(AsyncRx::new(inner.shared.clone())) } } impl From> for AsyncRx { fn from(rx: MAsyncRx) -> Self { rx.0 } } impl MAsyncRx { #[inline] pub(crate) fn new(shared: Arc>) -> Self { Self(AsyncRx::new(shared)) } } impl MAsyncRx { #[inline] pub fn into_stream(self) -> AsyncStream { AsyncStream::new(self.0) } #[inline] pub fn into_blocking(self) -> MRx { self.into() } } impl Deref for MAsyncRx { type Target = AsyncRx; /// inherit all the functions of [AsyncRx] #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From> for MAsyncRx { fn from(value: MRx) -> Self { value.add_rx(); Self(AsyncRx::new(value.shared.clone())) } } impl AsyncRxTrait for MAsyncRx { #[inline(always)] fn clone_to_vec(self, count: usize) -> Vec { let mut v = Vec::with_capacity(count); for _ in 0..count - 1 { v.push(self.clone()); } v.push(self); v } #[inline(always)] fn try_recv(&self) -> Result { self.0.try_recv() } #[inline(always)] fn recv(&self) -> impl Future> + Send { self.0.recv() } #[cfg(any(feature = "tokio", feature = "async_std"))] #[cfg_attr(docsrs, doc(cfg(any(feature = "tokio", feature = "async_std"))))] #[inline(always)] fn recv_timeout( &self, duration: std::time::Duration, ) -> impl Future> + Send { self.0.recv_timeout(duration) } #[inline(always)] fn recv_with_timer( &self, fut: FR, ) -> impl Future> where FR: Future, { self.0.recv_with_timer(fut) } /// The number of messages in the channel at the moment #[inline(always)] fn len(&self) -> usize { self.as_ref().len() } /// The capacity of the channel, return None for unbounded channel. #[inline(always)] fn capacity(&self) -> Option { self.as_ref().capacity() } /// Whether channel is empty at the moment #[inline(always)] fn is_empty(&self) -> bool { self.as_ref().is_empty() } /// Whether the channel is full at the moment #[inline(always)] fn is_full(&self) -> bool { self.as_ref().is_full() } /// Return true if the other side has closed #[inline(always)] fn is_disconnected(&self) -> bool { self.as_ref().get_tx_count() == 0 } #[inline(always)] fn get_tx_count(&self) -> usize { self.as_ref().get_tx_count() } #[inline(always)] fn get_rx_count(&self) -> usize { self.as_ref().get_rx_count() } #[inline(always)] fn to_stream(self) -> Pin>> { Box::pin(self.into_stream()) } fn get_wakers_count(&self) -> (usize, usize) { self.as_ref().get_wakers_count() } } impl Deref for AsyncRx { type Target = ChannelShared; #[inline(always)] fn deref(&self) -> &ChannelShared { &self.shared } } impl AsRef> for AsyncRx { #[inline(always)] fn as_ref(&self) -> &ChannelShared { &self.shared } } impl AsRef> for MAsyncRx { #[inline(always)] fn as_ref(&self) -> &ChannelShared { &self.0.shared } } impl> ReceiverType for AsyncRx { type Flavor = F; #[inline(always)] fn new(shared: Arc>) -> Self { AsyncRx::new(shared) } } impl NotCloneable for AsyncRx {} impl + FlavorMC> ReceiverType for MAsyncRx { type Flavor = F; #[inline(always)] fn new(shared: Arc>) -> Self { MAsyncRx::new(shared) } } ================================================ FILE: src/async_tx.rs ================================================ use crate::flavor::FlavorMP; use crate::sink::AsyncSink; #[cfg(feature = "trace_log")] use crate::tokio_task_id; use crate::weak::WeakTx; use crate::{shared::*, trace_log, MTx, NotCloneable, SenderType, Tx}; use std::cell::Cell; use std::fmt; use std::future::Future; use std::marker::PhantomData; use std::mem::{needs_drop, MaybeUninit}; use std::ops::Deref; use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; /// A single producer (sender) that works in an async context. /// /// Additional methods in [ChannelShared] can be accessed through `Deref`. /// /// `AsyncTx` can be converted into `Tx` via the `From` trait. /// This means you can have two types of senders, both within async and blocking contexts, for the same channel. /// /// **NOTE**: `AsyncTx` is not `Clone` or `Sync`. /// If you need concurrent access, use [MAsyncTx] instead. /// /// `AsyncTx` has a `Send` marker and can be moved to other coroutines. /// The following code is OK: /// /// ``` rust /// use crossfire::*; /// async fn foo() { /// let (tx, rx) = spsc::bounded_async::(100); /// tokio::spawn(async move { /// let _ = tx.send(2).await; /// }); /// drop(rx); /// } /// ``` /// /// Because `AsyncTx` does not have a `Sync` marker, using `Arc` will lose the `Send` marker. /// /// For your safety, the following code **should not compile**: /// /// ``` compile_fail /// use crossfire::*; /// use std::sync::Arc; /// async fn foo() { /// let (tx, rx) = spsc::bounded_async::(100); /// let tx = Arc::new(tx); /// tokio::spawn(async move { /// let _ = tx.send(2).await; /// }); /// drop(rx); /// } /// ``` pub struct AsyncTx { pub(crate) shared: Arc>, // Remove the Sync marker to prevent being put in Arc _phan: PhantomData>, } impl fmt::Debug for AsyncTx { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "AsyncTx{:p}", self) } } impl fmt::Display for AsyncTx { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "AsyncTx{:p}", self) } } unsafe impl Send for AsyncTx {} impl Drop for AsyncTx { #[inline(always)] fn drop(&mut self) { self.shared.close_tx(); } } impl From> for AsyncTx { fn from(value: Tx) -> Self { value.add_tx(); Self::new(value.shared.clone()) } } impl AsyncTx { #[inline] pub(crate) fn new(shared: Arc>) -> Self { Self { shared, _phan: Default::default() } } #[inline] pub fn into_sink(self) -> AsyncSink { AsyncSink::new(self) } #[inline] pub fn into_blocking(self) -> Tx { self.into() } /// Return true if the other side has closed #[inline(always)] pub fn is_disconnected(&self) -> bool { self.shared.is_rx_closed() } } impl AsyncTx { /// Sends a message. This method will await until the message is sent or the channel is closed. /// /// This function is cancellation-safe, so it's safe to use with `timeout()` and the `select!` macro. /// When a [SendFuture] is dropped, no message will be sent. However, the original message /// cannot be returned due to API limitations. For timeout scenarios, we recommend using /// [AsyncTx::send_timeout()], which returns the message in a [SendTimeoutError]. /// /// Returns `Ok(())` on success. /// /// Returns Err([SendError]) if the receiver has been dropped. #[inline(always)] pub fn send<'a>(&'a self, item: F::Item) -> SendFuture<'a, F> { SendFuture { tx: self, item: MaybeUninit::new(item), waker: None } } /// Attempts to send a message without blocking. /// /// Returns `Ok(())` when successful. /// /// Returns Err([TrySendError::Full]) if the channel is full. /// /// Returns Err([TrySendError::Disconnected]) if the receiver has been dropped. #[inline] pub fn try_send(&self, item: F::Item) -> Result<(), TrySendError> { if self.shared.is_rx_closed() { return Err(TrySendError::Disconnected(item)); } let _item = MaybeUninit::new(item); if self.shared.inner.try_send(&_item) { self.shared.on_send(); Ok(()) } else { unsafe { Err(TrySendError::Full(_item.assume_init())) } } } /// Sends a message with a timeout. /// Will await when channel is full. /// /// The behavior is atomic: the message is either sent successfully or returned with error. /// /// Returns `Ok(())` when successful. /// /// Returns Err([SendTimeoutError::Timeout]) if the operation timed out. The error contains the message that failed to be sent. /// /// Returns Err([SendTimeoutError::Disconnected]) if the receiver has been dropped. The error contains the message that failed to be sent. #[cfg(feature = "tokio")] #[cfg_attr(docsrs, doc(cfg(feature = "tokio")))] #[inline] pub fn send_timeout( &self, item: F::Item, duration: std::time::Duration, ) -> SendTimeoutFuture<'_, F, tokio::time::Sleep, ()> { let sleep = tokio::time::sleep(duration); self.send_with_timer(item, sleep) } #[cfg(feature = "async_std")] #[cfg_attr(docsrs, doc(cfg(feature = "async_std")))] #[inline] pub fn send_timeout( &self, item: F::Item, duration: std::time::Duration, ) -> SendTimeoutFuture<'_, F, impl Future, ()> { let sleep = async_std::task::sleep(duration); self.send_with_timer(item, sleep) } /// Sends a message with a custom timer function (from other async runtime). /// /// The behavior is atomic: the message is either sent successfully or returned with error. /// /// Returns `Ok(())` when successful. /// /// Returns Err([SendTimeoutError::Timeout]) if the operation timed out. The error contains the message that failed to be sent. /// /// Returns Err([SendTimeoutError::Disconnected]) if the receiver has been dropped. The error contains the message that failed to be sent. /// /// # Argument: /// /// * `fut`: The sleep function. It's possible to wrap this function with cancelable handle, /// you can control when to stop polling. the return value of `fut` is ignore. /// We add generic `R` just in order to support smol::Timer. /// /// # Example: /// /// ```rust /// extern crate smol; /// use std::time::Duration; /// use crossfire::*; /// async fn foo() { /// let (tx, rx) = mpmc::bounded_async::(10); /// match tx.send_with_timer(1, smol::Timer::after(Duration::from_secs(1))).await { /// Ok(_)=>{ /// println!("message sent"); /// } /// Err(SendTimeoutError::Timeout(_item))=>{ /// println!("send timeout"); /// } /// Err(SendTimeoutError::Disconnected(_item))=>{ /// println!("receiver-side closed"); /// } /// } /// } /// ``` #[inline] pub fn send_with_timer(&self, item: F::Item, fut: FR) -> SendTimeoutFuture<'_, F, FR, R> where FR: Future, { SendTimeoutFuture { tx: self, item: MaybeUninit::new(item), waker: None, sleep: fut } } /// Internal function might change in the future. For public version, use AsyncSink::poll_send() instead. /// /// Returns `Poll::Ready(Ok(()))` on message sent. /// /// Returns `Poll::Pending` for Poll::Pending case. /// /// Returns `Poll::Ready(Err(())` when all Rx dropped. #[inline(always)] pub(crate) fn poll_send<'a, const SINK: bool>( &self, ctx: &'a mut Context, item: &MaybeUninit, o_waker: &'a mut Option<::Waker>, ) -> Poll> { let shared = &self.shared; if shared.is_rx_closed() { trace_log!("tx{:?}: closed {:?}", tokio_task_id!(), o_waker); return Poll::Ready(Err(())); } // When the result is not TrySendError::Full, // make sure always take the o_waker out and abandon, // to skip the timeout cleaning logic in Drop. loop { if shared.inner.try_send(item) { shared.on_send(); if let Some(_waker) = o_waker.take() { trace_log!("tx{:?}: send {:?}", tokio_task_id!(), _waker); } else { trace_log!("tx{:?}: send", tokio_task_id!()); } return Poll::Ready(Ok(())); } if o_waker.is_none() { if let Some(mut backoff) = shared.get_async_backoff() { loop { backoff.spin(); if shared.inner.try_send(item) { shared.on_send(); trace_log!("tx{:?}: send", tokio_task_id!()); return Poll::Ready(Ok(())); } if backoff.is_completed() { break; } } } } match shared.senders.reg_waker_async(ctx, o_waker) { Some(Poll::Pending) => return Poll::Pending, Some(Poll::Ready(())) => return Poll::Ready(Err(())), _ => {} } let state = shared.sender_double_check::(item, o_waker); trace_log!("tx{:?}: sender_double_check {:?} {}", tokio_task_id!(), o_waker, state); if state < WakerState::Woken as u8 { return Poll::Pending; } else if state > WakerState::Woken as u8 { if state == WakerState::Done as u8 { trace_log!("tx{:?}: send {:?} done", o_waker, tokio_task_id!()); let _ = o_waker.take(); return Poll::Ready(Ok(())); } else { debug_assert_eq!(state, WakerState::Closed as u8); trace_log!("tx{:?}: closed {:?}", o_waker, tokio_task_id!()); let _ = o_waker.take(); return Poll::Ready(Err(())); } } debug_assert_eq!(state, WakerState::Woken as u8); continue; } } } /// A fixed-sized future object constructed by [AsyncTx::send()] #[must_use] pub struct SendFuture<'a, F: Flavor> { tx: &'a AsyncTx, item: MaybeUninit, waker: Option<::Waker>, } unsafe impl Send for SendFuture<'_, F> where F::Item: Send {} impl Drop for SendFuture<'_, F> { #[inline] fn drop(&mut self) { // Cancelling the future, poll is not ready if let Some(waker) = self.waker.as_ref() { if self.tx.shared.abandon_send_waker(waker) && needs_drop::() { unsafe { self.item.assume_init_drop() }; } } } } impl Future for SendFuture<'_, F> where F::Item: Unpin, { type Output = Result<(), SendError>; #[inline] fn poll(self: Pin<&mut Self>, ctx: &mut Context) -> Poll { let mut _self = self.get_mut(); match _self.tx.poll_send::(ctx, &_self.item, &mut _self.waker) { Poll::Ready(Ok(())) => { debug_assert!(_self.waker.is_none()); Poll::Ready(Ok(())) } Poll::Ready(Err(())) => { let _ = _self.waker.take(); Poll::Ready(Err(SendError(unsafe { _self.item.assume_init_read() }))) } Poll::Pending => Poll::Pending, } } } /// A fixed-sized future object constructed by [AsyncTx::send_timeout()] #[must_use] pub struct SendTimeoutFuture<'a, F, FR, R> where F: Flavor, FR: Future, { tx: &'a AsyncTx, sleep: FR, item: MaybeUninit, waker: Option<::Waker>, } unsafe impl Send for SendTimeoutFuture<'_, F, FR, R> where F: Flavor, FR: Future, { } impl Drop for SendTimeoutFuture<'_, F, FR, R> where F: Flavor, FR: Future, { #[inline] fn drop(&mut self) { if let Some(waker) = self.waker.as_ref() { // Cancelling the future, poll is not ready if self.tx.shared.abandon_send_waker(waker) && needs_drop::() { unsafe { self.item.assume_init_drop() }; } } } } impl Future for SendTimeoutFuture<'_, F, FR, R> where F: Flavor, FR: Future, F::Item: Send + 'static + Unpin, { type Output = Result<(), SendTimeoutError>; #[inline] fn poll(self: Pin<&mut Self>, ctx: &mut Context) -> Poll { // NOTE: we can use unchecked to bypass pin because we are not movig "sleep", // neither it's exposed outside let mut _self = unsafe { self.get_unchecked_mut() }; match _self.tx.poll_send::(ctx, &_self.item, &mut _self.waker) { Poll::Ready(Ok(())) => { debug_assert!(_self.waker.is_none()); Poll::Ready(Ok(())) } Poll::Ready(Err(())) => { let _ = _self.waker.take(); Poll::Ready(Err(SendTimeoutError::Disconnected(unsafe { _self.item.assume_init_read() }))) } Poll::Pending => { let sleep = unsafe { Pin::new_unchecked(&mut _self.sleep) }; if sleep.poll(ctx).is_ready() { if _self.tx.shared.abandon_send_waker(&_self.waker.take().unwrap()) { return Poll::Ready(Err(SendTimeoutError::Timeout(unsafe { _self.item.assume_init_read() }))); } else { // Message already sent in background (on_recv). return Poll::Ready(Ok(())); } } Poll::Pending } } } } /// For writing generic code with MAsyncTx & AsyncTx pub trait AsyncTxTrait: Send + 'static + fmt::Debug + fmt::Display { /// Try to send message, non-blocking /// /// Returns `Ok(())` when successful. /// /// Returns Err([TrySendError::Full]) on channel full for bounded channel. /// /// Returns Err([TrySendError::Disconnected]) when all Rx dropped. fn try_send(&self, item: T) -> Result<(), TrySendError>; /// The number of messages in the channel at the moment fn len(&self) -> usize; /// The capacity of the channel, return None for unbounded channel. fn capacity(&self) -> Option; /// Whether channel is empty at the moment fn is_empty(&self) -> bool; /// Whether the channel is full at the moment fn is_full(&self) -> bool; /// Return true if the other side has closed fn is_disconnected(&self) -> bool; /// Return the number of senders fn get_tx_count(&self) -> usize; /// Return the number of receivers fn get_rx_count(&self) -> usize; fn clone_to_vec(self, count: usize) -> Vec where Self: Sized; fn get_wakers_count(&self) -> (usize, usize); /// Send message. Will await when channel is full. /// /// Returns `Ok(())` on successful. /// /// Returns Err([SendError]) when all Rx is dropped. fn send(&self, item: T) -> impl Future>> + Send where T: Send + 'static + Unpin; /// Waits for a message to be sent into the channel, but only for a limited time. /// Will await when channel is full. /// /// The behavior is atomic, either message sent successfully or returned on error. /// /// Returns `Ok(())` when successful. /// /// Returns Err([SendTimeoutError::Timeout]) when the operation timed out. /// /// Returns Err([SendTimeoutError::Disconnected]) when all Rx dropped. #[cfg(any(feature = "tokio", feature = "async_std"))] #[cfg_attr(docsrs, doc(cfg(any(feature = "tokio", feature = "async_std"))))] fn send_timeout<'a>( &'a self, item: T, duration: std::time::Duration, ) -> impl Future>> + Send where T: Send + 'static + Unpin; /// Sends a message with a custom timer function. /// Will await when channel is full. /// /// The behavior is atomic: the message is either sent successfully or returned with error. /// /// Returns `Ok(())` when successful. /// /// Returns Err([SendTimeoutError::Timeout]) if the operation timed out. The error contains the message that failed to be sent. /// /// Returns Err([SendTimeoutError::Disconnected]) if the receiver has been dropped. The error contains the message that failed to be sent. /// /// # Argument: /// /// * `fut`: The sleep function. It's possible to wrap this function with cancelable handle, /// you can control when to stop polling. the return value of `fut` is ignore. /// We add generic `R` just in order to support smol::Timer fn send_with_timer( &self, item: T, fut: FR, ) -> impl Future>> + Send where FR: Future, T: Send + 'static + Unpin; } impl AsyncTxTrait for AsyncTx { #[inline(always)] fn clone_to_vec(self, count: usize) -> Vec { assert_eq!(count, 1); vec![self] } #[inline(always)] fn try_send(&self, item: F::Item) -> Result<(), TrySendError> { AsyncTx::try_send(self, item) } #[inline(always)] fn send(&self, item: F::Item) -> impl Future>> + Send where F::Item: Send + 'static + Unpin, { AsyncTx::send(self, item) } #[cfg(any(feature = "tokio", feature = "async_std"))] #[cfg_attr(docsrs, doc(cfg(any(feature = "tokio", feature = "async_std"))))] #[inline(always)] fn send_timeout<'a>( &'a self, item: F::Item, duration: std::time::Duration, ) -> impl Future>> + Send where F::Item: Send + 'static + Unpin, { AsyncTx::send_timeout(self, item, duration) } #[inline(always)] fn send_with_timer( &self, item: F::Item, fut: FR, ) -> impl Future>> + Send where FR: Future, F::Item: Send + 'static + Unpin, { AsyncTx::send_with_timer(self, item, fut) } /// The number of messages in the channel at the moment #[inline(always)] fn len(&self) -> usize { self.as_ref().len() } /// The capacity of the channel, return None for unbounded channel. #[inline(always)] fn capacity(&self) -> Option { self.as_ref().capacity() } /// Whether channel is empty at the moment #[inline(always)] fn is_empty(&self) -> bool { self.as_ref().is_empty() } /// Whether the channel is full at the moment #[inline(always)] fn is_full(&self) -> bool { self.as_ref().is_full() } /// Return true if the other side has closed #[inline(always)] fn is_disconnected(&self) -> bool { self.as_ref().get_rx_count() == 0 } #[inline(always)] fn get_tx_count(&self) -> usize { self.as_ref().get_tx_count() } #[inline(always)] fn get_rx_count(&self) -> usize { self.as_ref().get_rx_count() } fn get_wakers_count(&self) -> (usize, usize) { self.as_ref().get_wakers_count() } } /// A multi-producer (sender) that works in an async context. /// /// Inherits from [`AsyncTx`] and implements `Clone`. /// Additional methods in [ChannelShared] can be accessed through `Deref`. /// /// You can use `into()` to convert it to `AsyncTx`. /// /// `MAsyncTx` can be converted into `MTx` via the `From` trait, /// which means you can have two types of senders, both within async and /// blocking contexts, for the same channel. pub struct MAsyncTx(pub(crate) AsyncTx); impl fmt::Debug for MAsyncTx { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "MAsyncTx{:p}", self) } } impl fmt::Display for MAsyncTx { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "MAsyncTx{:p}", self) } } unsafe impl Sync for MAsyncTx {} impl Clone for MAsyncTx { #[inline] fn clone(&self) -> Self { let inner = &self.0; inner.shared.add_tx(); Self(AsyncTx::new(inner.shared.clone())) } } impl From> for AsyncTx { fn from(tx: MAsyncTx) -> Self { tx.0 } } impl MAsyncTx { #[inline] pub(crate) fn new(shared: Arc>) -> Self { Self(AsyncTx::new(shared)) } #[inline] pub fn into_sink(self) -> AsyncSink { AsyncSink::new(self.0) } #[inline] pub fn into_blocking(self) -> MTx { self.into() } /// Get a weak reference of sender. /// /// # Example /// ``` /// use crossfire::*; /// let (tx, rx) = mpsc::bounded_async::(100); /// assert_eq!(tx.get_tx_count(), 1); /// let weak_tx = tx.downgrade(); /// let tx_clone = weak_tx.upgrade::>().unwrap(); /// assert_eq!(tx.get_tx_count(), 2); /// drop(tx); /// drop(tx_clone); /// assert!(weak_tx.upgrade::>().is_none()); /// assert_eq!(weak_tx.get_tx_count(), 0); /// drop(rx); /// ``` #[inline] pub fn downgrade(&self) -> WeakTx where F: FlavorMP, { WeakTx(self.shared.clone()) } } impl Deref for MAsyncTx { type Target = AsyncTx; /// inherit all the functions of [AsyncTx] #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From> for MAsyncTx { fn from(value: MTx) -> Self { value.add_tx(); Self(AsyncTx::new(value.shared.clone())) } } impl AsyncTxTrait for MAsyncTx { #[inline(always)] fn clone_to_vec(self, count: usize) -> Vec { let mut v = Vec::with_capacity(count); for _ in 0..count - 1 { v.push(self.clone()); } v.push(self); v } #[inline(always)] fn try_send(&self, item: F::Item) -> Result<(), TrySendError> { self.0.try_send(item) } #[inline(always)] fn send(&self, item: F::Item) -> impl Future>> + Send where F::Item: Send + 'static + Unpin, { self.0.send(item) } #[cfg(any(feature = "tokio", feature = "async_std"))] #[cfg_attr(docsrs, doc(cfg(any(feature = "tokio", feature = "async_std"))))] #[inline(always)] fn send_timeout<'a>( &'a self, item: F::Item, duration: std::time::Duration, ) -> impl Future>> + Send where F::Item: Send + 'static + Unpin, { self.0.send_timeout(item, duration) } #[inline(always)] fn send_with_timer( &self, item: F::Item, fut: FR, ) -> impl Future>> + Send where FR: Future, F::Item: Send + 'static + Unpin, { self.0.send_with_timer::(item, fut) } /// The number of messages in the channel at the moment #[inline(always)] fn len(&self) -> usize { self.as_ref().len() } /// The capacity of the channel, return None for unbounded channel. #[inline(always)] fn capacity(&self) -> Option { self.as_ref().capacity() } /// Whether channel is empty at the moment #[inline(always)] fn is_empty(&self) -> bool { self.as_ref().is_empty() } /// Whether the channel is full at the moment #[inline(always)] fn is_full(&self) -> bool { self.as_ref().is_full() } /// Return true if the other side has closed #[inline(always)] fn is_disconnected(&self) -> bool { self.as_ref().get_rx_count() == 0 } #[inline(always)] fn get_tx_count(&self) -> usize { self.as_ref().get_tx_count() } #[inline(always)] fn get_rx_count(&self) -> usize { self.as_ref().get_rx_count() } fn get_wakers_count(&self) -> (usize, usize) { self.as_ref().get_wakers_count() } } impl Deref for AsyncTx { type Target = ChannelShared; #[inline(always)] fn deref(&self) -> &ChannelShared { &self.shared } } impl AsRef> for AsyncTx { #[inline(always)] fn as_ref(&self) -> &ChannelShared { &self.shared } } impl AsRef> for MAsyncTx { #[inline(always)] fn as_ref(&self) -> &ChannelShared { &self.0.shared } } impl> SenderType for AsyncTx { type Flavor = F; #[inline(always)] fn new(shared: Arc>) -> Self { AsyncTx::new(shared) } } impl NotCloneable for AsyncTx {} impl + FlavorMP> SenderType for MAsyncTx { type Flavor = F; #[inline(always)] fn new(shared: Arc>) -> Self { MAsyncTx::new(shared) } } ================================================ FILE: src/backoff.rs ================================================ use core::num::NonZero; use std::mem::transmute; use std::sync::atomic::{AtomicBool, AtomicU32, Ordering}; use std::thread; pub const SPIN_LIMIT: u16 = 6; #[cfg(target_arch = "x86_64")] pub const DEFAULT_LIMIT: u16 = 6; #[cfg(not(target_arch = "x86_64"))] pub const DEFAULT_LIMIT: u16 = 10; pub const MAX_LIMIT: u16 = 10; pub const DEFAULT_CONFIG: u32 = BackoffConfig { spin_limit: SPIN_LIMIT, limit: DEFAULT_LIMIT }.to_u32(); static DETECT_CONFIG: AtomicU32 = AtomicU32::new(DEFAULT_CONFIG); static _INIT: AtomicBool = AtomicBool::new(false); /// Detect cpu number and auto setting backoff config. /// /// On one core system, it will be more effective (as much as 2x faster) to use yield than spinning. /// /// The function need to be invoke manually in your initialization code, which does not interrupt /// channel operation on other thread. By saving the result to global atomic, the effect will apply after execution. /// /// The result we choose not to include this in default channel initialization code, because /// Cpu detection process is somehow slow for benchmark standard, /// and `thread::available_parallelism()` might require I/O on system files, you may not /// like it in sandbox scenario. pub fn detect_backoff_cfg() { if _INIT.compare_exchange(false, true, Ordering::Relaxed, Ordering::Relaxed).is_err() { return; } if thread::available_parallelism().unwrap_or(NonZero::new(1).unwrap()) == NonZero::new(1).unwrap() { // For one core (like VM machine), better use yield_now instead of spin_loop. DETECT_CONFIG.store( BackoffConfig { spin_limit: 0, limit: DEFAULT_LIMIT }.to_u32(), Ordering::Release, ); } } #[derive(Debug, Clone, Copy)] #[repr(C)] pub struct BackoffConfig { pub spin_limit: u16, pub limit: u16, } impl Default for BackoffConfig { #[inline(always)] fn default() -> Self { Self::from_u32(DETECT_CONFIG.load(Ordering::Relaxed)) } } impl BackoffConfig { #[inline(always)] pub fn detect() -> Self { Self::from_u32(DETECT_CONFIG.load(Ordering::Relaxed)) } #[inline(always)] pub const fn to_u32(self) -> u32 { let i: u32 = unsafe { transmute(self) }; i } #[inline(always)] pub const fn from_u32(config: u32) -> Self { unsafe { transmute(config) } } #[allow(dead_code)] #[inline(always)] pub const fn async_limit(mut self, limit: u16) -> Self { if limit < self.limit { self.limit = limit; } self.spin_limit = limit; self } #[allow(dead_code)] #[inline(always)] pub const fn limit(mut self, limit: u16) -> Self { self.limit = limit; self } #[allow(dead_code)] #[inline(always)] pub const fn spin(mut self, spin_limit: u16) -> Self { if spin_limit < self.spin_limit { self.spin_limit = spin_limit; } self } } pub struct Backoff { step: u16, pub config: BackoffConfig, } impl Backoff { #[inline(always)] pub fn new() -> Self { Self { step: 0, config: BackoffConfig::default() } } #[inline(always)] pub fn from(config: BackoffConfig) -> Self { Self { step: 0, config } } #[allow(dead_code)] #[inline(always)] pub fn spin(&mut self) -> bool { for _ in 0..1 << self.step.min(SPIN_LIMIT) { std::hint::spin_loop(); } if self.step < MAX_LIMIT { self.step += 1; self.step > self.config.limit } else { true } } #[inline(always)] pub fn set_step(&mut self, step: u16) { self.step = step; } #[inline(always)] pub fn snooze(&mut self) -> bool { if self.step >= self.config.limit { return true; } if self.step < self.config.spin_limit { for _ in 0..1 << self.step { std::hint::spin_loop(); } } else { std::thread::yield_now(); } self.step += 1; false } #[allow(dead_code)] #[inline(always)] pub fn yield_now(&mut self) -> bool { if self.step >= self.config.limit { return true; } std::thread::yield_now(); self.step += 1; false } #[inline(always)] pub fn is_completed(&self) -> bool { self.step >= self.config.limit } #[allow(dead_code)] #[inline(always)] pub fn step(&self) -> usize { self.step as usize } #[inline(always)] pub fn reset(&mut self) { self.step = 0; } } #[cfg(test)] mod tests { use super::*; #[test] fn test_backoff() { let backoff = Backoff::from(BackoffConfig { spin_limit: 1, limit: 0 }); assert!(backoff.is_completed()); println!("Option size {}", size_of::>()); println!("backoff size {}", size_of::()); println!("BackoffConfig size {}", size_of::()); assert_eq!(size_of::(), size_of::()); let config = BackoffConfig { spin_limit: 6, limit: 7 }; let config_i = config.to_u32(); let _config = BackoffConfig::from_u32(config_i); assert_eq!(config.spin_limit, _config.spin_limit); assert_eq!(config.limit, _config.limit); let mut backoff = Backoff::from(BackoffConfig { spin_limit: 2, limit: 4 }); assert_eq!(backoff.step, 0); backoff.spin(); assert_eq!(backoff.step, 1); backoff.snooze(); assert_eq!(backoff.step, 2); backoff.snooze(); backoff.snooze(); backoff.snooze(); backoff.snooze(); assert_eq!(backoff.step, 4); backoff.spin(); assert_eq!(backoff.step, 5); } } ================================================ FILE: src/blocking_rx.rs ================================================ use crate::backoff::*; use crate::flavor::{FlavorMC, FlavorSelect}; use crate::select::SelectResult; use crate::{shared::*, trace_log, AsyncRx, MAsyncRx, NotCloneable, ReceiverType}; use std::cell::Cell; use std::fmt; use std::marker::PhantomData; use std::ops::Deref; use std::sync::{atomic::Ordering, Arc}; use std::time::{Duration, Instant}; /// A single consumer (receiver) that works in a blocking context. /// /// Additional methods in [ChannelShared] can be accessed through `Deref`. /// /// **NOTE**: `Rx` is not `Clone` or `Sync`. /// If you need concurrent access, use [MRx] instead. /// /// `Rx` has a `Send` marker and can be moved to other threads. /// The following code is OK: /// /// ``` rust /// use crossfire::*; /// let (tx, rx) = mpsc::bounded_blocking::(100); /// std::thread::spawn(move || { /// let _ = rx.recv(); /// }); /// drop(tx); /// ``` /// /// Because `Rx` does not have a `Sync` marker, using `Arc` will lose the `Send` marker. /// /// For your safety, the following code **should not compile**: /// /// ``` compile_fail /// use crossfire::*; /// use std::sync::Arc; /// let (tx, rx) = mpsc::bounded_blocking(100); /// let rx = Arc::new(rx); /// std::thread::spawn(move || { /// let _ = rx.recv(); /// }); /// drop(tx); /// ``` pub struct Rx { pub(crate) shared: Arc>, // Remove the Sync marker to prevent being put in Arc _phan: PhantomData>, waker_cache: WakerCache<()>, } unsafe impl Send for Rx {} impl fmt::Debug for Rx { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Rx{:p}", self) } } impl fmt::Display for Rx { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Rx{:p}", self) } } impl Drop for Rx { #[inline(always)] fn drop(&mut self) { self.shared.close_rx(); } } impl From> for Rx { fn from(value: AsyncRx) -> Self { value.add_rx(); Self::new(value.shared.clone()) } } impl Rx { #[inline(always)] pub(crate) fn new(shared: Arc>) -> Self { Self { shared, waker_cache: WakerCache::new(), _phan: Default::default() } } #[inline(always)] pub(crate) fn _recv_blocking( &self, deadline: Option, ) -> Result { let shared = &self.shared; let mut o_waker: Option<::Waker> = None; macro_rules! on_recv_no_waker { () => {{ trace_log!("rx: recv"); }}; } macro_rules! on_recv_waker { () => {{ trace_log!("rx: recv {:?}", o_waker); self.recvs.cache_waker(o_waker, &self.waker_cache); }}; } macro_rules! try_recv { ($handle_waker: block) => { if let Some(item) = shared.inner.try_recv() { shared.on_recv(); $handle_waker return Ok(item); } }; } try_recv!({ on_recv_no_waker!() }); let mut cfg = BackoffConfig::detect().limit(shared.backoff_limit); if shared.large { cfg = cfg.spin(2); } let mut backoff = Backoff::from(cfg); loop { let r = backoff.snooze(); try_recv!({ on_recv_no_waker!() }); if r { break; } } let mut state; 'MAIN: loop { shared.recvs.reg_waker_blocking(&mut o_waker, &self.waker_cache); // NOTE: special API before we park // because Miri is not happy about ArrayQueue pop ordering, which is not SeqCst if let Some(item) = shared.inner.try_recv_final() { shared.on_recv(); trace_log!("rx: recv cancel {:?} Init", o_waker); self.recvs.cancel_waker(&mut o_waker); return Ok(item); } state = shared.recvs.commit_waiting(&o_waker); trace_log!("rx: {:?} commit_waiting state={}", o_waker, state); if shared.is_tx_closed() { break 'MAIN; } while state < WakerState::Woken as u8 { match check_timeout(deadline) { Ok(None) => { std::thread::park(); } Ok(Some(dur)) => { std::thread::park_timeout(dur); } Err(_) => { shared.abandon_recv_waker(o_waker.as_ref().unwrap()); return Err(RecvTimeoutError::Timeout); } } state = self.recvs.get_waker_state(&o_waker, Ordering::SeqCst); trace_log!("rx: after park state={}", state); } if state == WakerState::Closed as u8 { break 'MAIN; } backoff.reset(); loop { try_recv!({ on_recv_waker!() }); if backoff.snooze() { break; } } } try_recv!({ on_recv_waker!() }); // make sure all msgs received, since we have soonze Err(RecvTimeoutError::Disconnected) } /// Receives a message from the channel. This method will block until a message is received or the channel is closed. /// /// Returns `Ok(T)` on success. /// /// Returns Err([RecvError]) if the sender has been dropped. #[inline] pub fn recv(&self) -> Result { self._recv_blocking(None).map_err(|err| match err { RecvTimeoutError::Disconnected => RecvError, RecvTimeoutError::Timeout => unreachable!(), }) } /// Attempts to receive a message from the channel without blocking. /// /// Returns `Ok(T)` when successful. /// /// Returns Err([TryRecvError::Empty]) if the channel is empty. /// /// Returns Err([TryRecvError::Disconnected]) if the sender has been dropped and the channel is empty. #[inline] pub fn try_recv(&self) -> Result { self.shared.try_recv() } /// Receives a message from the channel with a timeout. /// Will block when channel is empty. /// /// The behavior is atomic: the message is either received successfully or the operation is canceled due to a timeout. /// /// Returns `Ok(T)` when successful. /// /// Returns Err([RecvTimeoutError::Timeout]) when a message could not be received because the channel is empty and the operation timed out. /// /// Returns Err([RecvTimeoutError::Disconnected]) if the sender has been dropped and the channel is empty. #[inline] pub fn recv_timeout(&self, timeout: Duration) -> Result { match Instant::now().checked_add(timeout) { Some(deadline) => self._recv_blocking(Some(deadline)), None => self.try_recv().map_err(|e| match e { TryRecvError::Disconnected => RecvTimeoutError::Disconnected, TryRecvError::Empty => RecvTimeoutError::Timeout, }), } } /// Return true if the other side has closed #[inline(always)] pub fn is_disconnected(&self) -> bool { self.shared.is_tx_closed() } /// This method use with [select](crate::select::Select::select), guarantee non-blocking /// # Panics /// /// Panics if SelectResult from other receiver is passed. #[inline(always)] pub fn read_select(&self, result: SelectResult) -> Result where F: FlavorSelect, { assert_eq!( self as *const Self as *const u8, result.channel, "invalid use select with another channel" ); self.as_ref().read_with_token(result.token) } #[inline(always)] pub fn into_async(self) -> AsyncRx { self.into() } } /// A multi-consumer (receiver) that works in a blocking context. /// /// Inherits from [`Rx`] and implements `Clone`. /// Additional methods can be accessed through `Deref`. /// /// You can use `into()` to convert it to `Rx`. pub struct MRx(pub(crate) Rx); impl fmt::Debug for MRx { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "MRx{:p}", self) } } impl fmt::Display for MRx { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "MRx{:p}", self) } } unsafe impl Sync for MRx {} impl MRx where F: FlavorMC, { #[inline(always)] pub(crate) fn new(shared: Arc>) -> Self { Self(Rx::new(shared)) } #[inline(always)] pub fn into_async(self) -> MAsyncRx { self.into() } } impl Clone for MRx { #[inline(always)] fn clone(&self) -> Self { let inner = &self.0; inner.shared.add_rx(); Self(Rx::new(inner.shared.clone())) } } impl Deref for MRx { type Target = Rx; /// Inherits all the functions of [Rx]. #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From> for Rx { fn from(rx: MRx) -> Self { rx.0 } } impl From> for MRx { fn from(value: MAsyncRx) -> Self { value.add_rx(); Self(Rx::new(value.shared.clone())) } } /// For writing generic code with MRx & Rx pub trait BlockingRxTrait: Send + 'static + fmt::Debug + fmt::Display { /// Receives a message from the channel. This method will block until a message is received or the channel is closed. /// /// Returns `Ok(T)` on success. /// /// Returns Err([RecvError]) if the sender has been dropped. fn recv(&self) -> Result; /// Attempts to receive a message from the channel without blocking. /// /// Returns `Ok(T)` when successful. /// /// Returns Err([TryRecvError::Empty]) if the channel is empty. /// /// Returns Err([TryRecvError::Disconnected]) if the sender has been dropped and the channel is empty. fn try_recv(&self) -> Result; /// Receives a message from the channel with a timeout. /// Will block when channel is empty. /// /// Returns `Ok(T)` when successful. /// /// Returns Err([RecvTimeoutError::Timeout]) when a message could not be received because the channel is empty and the operation timed out. /// /// Returns Err([RecvTimeoutError::Disconnected]) if the sender has been dropped and the channel is empty. fn recv_timeout(&self, timeout: Duration) -> Result; /// The number of messages in the channel at the moment fn len(&self) -> usize; /// The capacity of the channel, return None for unbounded channel. fn capacity(&self) -> Option; /// Whether channel is empty at the moment fn is_empty(&self) -> bool; /// Whether the channel is full at the moment fn is_full(&self) -> bool; /// Return true if the other side has closed fn is_disconnected(&self) -> bool; /// Return the number of senders fn get_tx_count(&self) -> usize; /// Return the number of receivers fn get_rx_count(&self) -> usize; fn clone_to_vec(self, count: usize) -> Vec where Self: Sized; fn get_wakers_count(&self) -> (usize, usize); } impl BlockingRxTrait for Rx { #[inline(always)] fn clone_to_vec(self, _count: usize) -> Vec { assert_eq!(_count, 1); vec![self] } #[inline(always)] fn recv(&self) -> Result { Rx::recv(self) } #[inline(always)] fn try_recv(&self) -> Result { Rx::try_recv(self) } #[inline(always)] fn recv_timeout(&self, timeout: Duration) -> Result { Rx::recv_timeout(self, timeout) } /// The number of messages in the channel at the moment #[inline(always)] fn len(&self) -> usize { self.as_ref().len() } /// The capacity of the channel, return None for unbounded channel. #[inline(always)] fn capacity(&self) -> Option { self.as_ref().capacity() } /// Whether channel is empty at the moment #[inline(always)] fn is_empty(&self) -> bool { self.as_ref().is_empty() } /// Whether the channel is full at the moment #[inline(always)] fn is_full(&self) -> bool { self.as_ref().is_full() } /// Return true if the other side has closed #[inline(always)] fn is_disconnected(&self) -> bool { self.as_ref().is_tx_closed() } #[inline(always)] fn get_tx_count(&self) -> usize { self.as_ref().get_tx_count() } #[inline(always)] fn get_rx_count(&self) -> usize { self.as_ref().get_rx_count() } fn get_wakers_count(&self) -> (usize, usize) { self.as_ref().get_wakers_count() } } impl BlockingRxTrait for MRx where F: Flavor + FlavorMC, { #[inline(always)] fn clone_to_vec(self, count: usize) -> Vec { let mut v = Vec::with_capacity(count); for _ in 0..count - 1 { v.push(self.clone()); } v.push(self); v } #[inline(always)] fn recv(&self) -> Result { self.0.recv() } #[inline(always)] fn try_recv(&self) -> Result { self.0.try_recv() } #[inline(always)] fn recv_timeout(&self, timeout: Duration) -> Result { self.0.recv_timeout(timeout) } /// The number of messages in the channel at the moment #[inline(always)] fn len(&self) -> usize { self.as_ref().len() } /// The capacity of the channel, return None for unbounded channel. #[inline(always)] fn capacity(&self) -> Option { self.as_ref().capacity() } /// Whether channel is empty at the moment #[inline(always)] fn is_empty(&self) -> bool { self.as_ref().is_empty() } /// Whether the channel is full at the moment #[inline(always)] fn is_full(&self) -> bool { self.as_ref().is_full() } /// Return true if the other side has closed #[inline(always)] fn is_disconnected(&self) -> bool { self.as_ref().is_tx_closed() } #[inline(always)] fn get_tx_count(&self) -> usize { self.as_ref().get_tx_count() } #[inline(always)] fn get_rx_count(&self) -> usize { self.as_ref().get_rx_count() } fn get_wakers_count(&self) -> (usize, usize) { self.as_ref().get_wakers_count() } } impl Deref for Rx { type Target = ChannelShared; #[inline(always)] fn deref(&self) -> &ChannelShared { &self.shared } } impl AsRef> for Rx { #[inline(always)] fn as_ref(&self) -> &ChannelShared { &self.shared } } impl AsRef> for MRx { #[inline(always)] fn as_ref(&self) -> &ChannelShared { &self.0.shared } } impl> ReceiverType for Rx { type Flavor = F; #[inline(always)] fn new(shared: Arc>) -> Self { Rx::new(shared) } } impl NotCloneable for Rx {} impl ReceiverType for MRx where F: Flavor + FlavorMC, { type Flavor = F; #[inline(always)] fn new(shared: Arc>) -> Self { MRx::new(shared) } } ================================================ FILE: src/blocking_tx.rs ================================================ use crate::backoff::*; use crate::flavor::FlavorMP; use crate::weak::WeakTx; use crate::{shared::*, trace_log, AsyncTx, MAsyncTx, NotCloneable, SenderType}; use std::cell::Cell; use std::fmt; use std::marker::PhantomData; use std::mem::MaybeUninit; use std::ops::Deref; use std::sync::atomic::Ordering; use std::sync::Arc; use std::time::{Duration, Instant}; /// A single producer (sender) that works in a blocking context. /// /// Additional methods in [ChannelShared] can be accessed through `Deref`. /// /// **NOTE**: `Tx` is not `Clone` or `Sync`. /// If you need concurrent access, use [MTx] instead. /// /// `Tx` has a `Send` marker and can be moved to other threads. /// The following code is OK: /// /// ``` rust /// use crossfire::*; /// let (tx, rx) = spsc::bounded_blocking::(100); /// std::thread::spawn(move || { /// let _ = tx.send(1); /// }); /// drop(rx); /// ``` /// /// Because `Tx` does not have a `Sync` marker, using `Arc` will lose the `Send` marker. /// /// For your safety, the following code **should not compile**: /// /// ``` compile_fail /// use crossfire::*; /// use std::sync::Arc; /// let (tx, rx) = spsc::bounded_blocking::(100); /// let tx = Arc::new(tx); /// std::thread::spawn(move || { /// let _ = tx.send(1); /// }); /// drop(rx); /// ``` pub struct Tx { pub(crate) shared: Arc>, // Remove the Sync marker to prevent being put in Arc _phan: PhantomData>, waker_cache: WakerCache<*const F::Item>, } unsafe impl Send for Tx {} impl fmt::Debug for Tx { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Tx{:p}", self) } } impl fmt::Display for Tx { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Tx{:p}", self) } } impl Drop for Tx { #[inline(always)] fn drop(&mut self) { self.shared.close_tx(); } } impl From> for Tx { fn from(value: AsyncTx) -> Self { value.add_tx(); Self::new(value.shared.clone()) } } impl Tx { #[inline] pub(crate) fn new(shared: Arc>) -> Self { Self { shared, waker_cache: WakerCache::new(), _phan: Default::default() } } /// Return true if the other side has closed #[inline(always)] pub fn is_disconnected(&self) -> bool { self.shared.is_rx_closed() } #[inline] pub fn into_async(self) -> AsyncTx { self.into() } } impl Tx { #[inline(always)] pub(crate) fn _send_bounded( &self, item: &MaybeUninit, deadline: Option, ) -> Result<(), SendTimeoutError> { let shared = &self.shared; let large = shared.large; let backoff_cfg = BackoffConfig::detect().spin(2).limit(shared.backoff_limit); let mut backoff = Backoff::from(backoff_cfg); let congest = shared.sender_direct_copy(); // disable because of issue #54 let direct_copy = false; // let direct_copy = deadline.is_none() && shared.sender_direct_copy(); if large { backoff.set_step(2); } loop { let r = if large { backoff.yield_now() } else { backoff.spin() }; if direct_copy && large { match shared.inner.try_send_oneshot(item.as_ptr()) { Some(false) => break, None => { if r { break; } continue; } _ => { shared.on_send(); trace_log!("tx: send"); std::thread::yield_now(); return Ok(()); } } } else { if !shared.inner.try_send(item) { if r { break; } continue; } shared.on_send(); trace_log!("tx: send"); return Ok(()); } } let direct_copy_ptr: *const F::Item = std::ptr::null(); // if direct_copy { item.as_ptr() } else { std::ptr::null() }; let mut state: u8; let mut o_waker: Option<::Waker> = None; macro_rules! return_ok { () => { trace_log!("tx: send {:?}", o_waker); if shared.is_full() { // It's for 8x1, 16x1. std::thread::yield_now(); self.senders.cache_waker(o_waker, &self.waker_cache); } return Ok(()) }; } loop { self.senders.reg_waker_blocking(&mut o_waker, &self.waker_cache, direct_copy_ptr); // For nx1 (more likely congest), need to reset backoff // to allow more yield to receivers. // For nxn (the backoff is already complete), wait a little bit. state = shared.sender_double_check::(item, &mut o_waker); trace_log!("tx: sender_double_check {:?} state={}", o_waker, state); while state < WakerState::Woken as u8 { if congest { state = shared.sender_snooze(&o_waker, &mut backoff); } if state <= WakerState::Waiting as u8 { match check_timeout(deadline) { Ok(None) => { std::thread::park(); } Ok(Some(dur)) => { std::thread::park_timeout(dur); } Err(_) => { if shared.abandon_send_waker(o_waker.as_ref().unwrap()) { return Err(SendTimeoutError::Timeout(unsafe { item.assume_init_read() })); } else { // NOTE: Unlikely since we disable direct copy with deadline // state is WakerState::Done return Ok(()); } } } state = self.senders.get_waker_state(&o_waker, Ordering::SeqCst); trace_log!("tx: after park state={}", state); } } if state == WakerState::Woken as u8 { backoff.reset(); loop { if shared.inner.try_send(item) { shared.on_send(); return_ok!(); } if backoff.is_completed() { break; } backoff.snooze(); } } else if state == WakerState::Done as u8 { return_ok!(); } else { debug_assert_eq!(state, WakerState::Closed as u8); return Err(SendTimeoutError::Disconnected(unsafe { item.assume_init_read() })); } } } /// Sends a message. This method will block until the message is sent or the channel is closed. /// /// Returns `Ok(())` on success. /// /// Returns `Err(SendError)` if the receiver has been dropped. /// #[inline] pub fn send(&self, item: F::Item) -> Result<(), SendError> { let shared = &self.shared; if shared.is_rx_closed() { return Err(SendError(item)); } let _item = MaybeUninit::new(item); if shared.inner.try_send(&_item) { shared.on_send(); return Ok(()); } match self._send_bounded(&_item, None) { Ok(_) => Ok(()), Err(SendTimeoutError::Disconnected(e)) => Err(SendError(e)), Err(SendTimeoutError::Timeout(_)) => unreachable!(), } } /// Attempts to send a message without blocking. /// /// Returns `Ok(())` when successful. /// /// Returns Err([TrySendError::Full]) if the channel is full. /// /// Returns Err([TrySendError::Disconnected]) if the receiver has been dropped. #[inline] pub fn try_send(&self, item: F::Item) -> Result<(), TrySendError> { let shared = &self.shared; if shared.is_rx_closed() { return Err(TrySendError::Disconnected(item)); } let _item = MaybeUninit::new(item); if shared.inner.try_send(&_item) { shared.on_send(); Ok(()) } else { Err(TrySendError::Full(unsafe { _item.assume_init_read() })) } } /// Sends a message with a timeout. /// Will block when channel is full. /// /// The behavior is atomic: the message is either sent successfully or returned on error. /// /// Returns `Ok(())` when successful. /// /// Returns Err([SendTimeoutError::Timeout]) if the operation timed out. /// /// Returns Err([SendTimeoutError::Disconnected]) if the receiver has been dropped. #[inline] pub fn send_timeout( &self, item: F::Item, timeout: Duration, ) -> Result<(), SendTimeoutError> { let shared = &self.shared; if shared.is_rx_closed() { return Err(SendTimeoutError::Disconnected(item)); } match Instant::now().checked_add(timeout) { None => self.try_send(item).map_err(|e| match e { TrySendError::Disconnected(t) => SendTimeoutError::Disconnected(t), TrySendError::Full(t) => SendTimeoutError::Timeout(t), }), Some(deadline) => { let _item = MaybeUninit::new(item); if shared.inner.try_send(&_item) { shared.on_send(); return Ok(()); } match self._send_bounded(&_item, Some(deadline)) { Ok(_) => Ok(()), Err(e) => Err(e), } } } } } /// A multi-producer (sender) that works in a blocking context. /// /// Inherits from [`Tx`] and implements `Clone`. /// Additional methods can be accessed through `Deref`. /// /// You can use `into()` to convert it to `Tx`. pub struct MTx(pub(crate) Tx); impl fmt::Debug for MTx { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "MTx{:p}", self) } } impl fmt::Display for MTx { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "MTx{:p}", self) } } impl From> for Tx { fn from(tx: MTx) -> Self { tx.0 } } impl From> for MTx { fn from(value: MAsyncTx) -> Self { value.add_tx(); Self(Tx::new(value.shared.clone())) } } unsafe impl Sync for MTx {} impl MTx { #[inline] pub(crate) fn new(shared: Arc>) -> Self { Self(Tx::new(shared)) } #[inline] pub fn into_async(self) -> MAsyncTx { self.into() } /// Get a weak reference of sender. /// /// # Example /// ``` /// use crossfire::*; /// let (tx, rx) = mpsc::bounded_blocking::(100); /// let weak_tx = tx.downgrade(); /// assert_eq!(tx.get_tx_count(), 1); /// let tx_clone = weak_tx.upgrade::>().unwrap(); /// assert_eq!(tx.get_tx_count(), 2); /// drop(tx); /// drop(tx_clone); /// assert!(weak_tx.upgrade::>().is_none()); /// assert_eq!(weak_tx.get_tx_count(), 0); /// ``` #[inline] pub fn downgrade(&self) -> WeakTx { WeakTx(self.shared.clone()) } } impl Clone for MTx { #[inline] fn clone(&self) -> Self { let inner = &self.0; inner.shared.add_tx(); Self(Tx::new(inner.shared.clone())) } } impl Deref for MTx { type Target = Tx; /// Inherits all the functions of [Tx]. #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } /// For writing generic code with MTx & Tx pub trait BlockingTxTrait: Send + 'static + fmt::Debug + fmt::Display { /// Sends a message. This method will block until the message is sent or the channel is closed. /// /// Returns `Ok(())` on success. /// /// Returns Err([SendError]) if the receiver has been dropped. fn send(&self, _item: T) -> Result<(), SendError>; /// Attempts to send a message without blocking. /// /// Returns `Ok(())` when successful. /// /// Returns `Err([TrySendError::Full])` if the channel is full. /// /// Returns Err([TrySendError::Disconnected]) if the receiver has been dropped. fn try_send(&self, _item: T) -> Result<(), TrySendError>; /// Sends a message with a timeout. /// Will block when channel is empty. /// /// Returns `Ok(())` when successful. /// /// Returns Err([SendTimeoutError::Timeout]) if the message could not be sent because the channel is full and the operation timed out. /// /// Returns Err([SendTimeoutError::Disconnected]) if the receiver has been dropped. fn send_timeout(&self, item: T, timeout: Duration) -> Result<(), SendTimeoutError>; /// The number of messages in the channel at the moment fn len(&self) -> usize; /// The capacity of the channel, return None for unbounded channel. fn capacity(&self) -> Option; /// Whether channel is empty at the moment fn is_empty(&self) -> bool; /// Whether the channel is full at the moment fn is_full(&self) -> bool; /// Return true if the other side has closed fn is_disconnected(&self) -> bool; /// Return the number of senders fn get_tx_count(&self) -> usize; /// Return the number of receivers fn get_rx_count(&self) -> usize; fn clone_to_vec(self, count: usize) -> Vec where Self: Sized; fn get_wakers_count(&self) -> (usize, usize); } impl BlockingTxTrait for Tx { #[inline(always)] fn clone_to_vec(self, _count: usize) -> Vec { assert_eq!(_count, 1); vec![self] } #[inline(always)] fn send(&self, item: F::Item) -> Result<(), SendError> { Tx::send(self, item) } #[inline(always)] fn try_send(&self, item: F::Item) -> Result<(), TrySendError> { Tx::try_send(self, item) } #[inline(always)] fn send_timeout( &self, item: F::Item, timeout: Duration, ) -> Result<(), SendTimeoutError> { Tx::send_timeout(self, item, timeout) } /// The number of messages in the channel at the moment #[inline(always)] fn len(&self) -> usize { self.as_ref().len() } /// The capacity of the channel, return None for unbounded channel. #[inline(always)] fn capacity(&self) -> Option { self.as_ref().capacity() } /// Whether channel is empty at the moment #[inline(always)] fn is_empty(&self) -> bool { self.as_ref().is_empty() } /// Whether the channel is full at the moment #[inline(always)] fn is_full(&self) -> bool { self.as_ref().is_full() } /// Return true if the other side has closed #[inline(always)] fn is_disconnected(&self) -> bool { self.as_ref().get_rx_count() == 0 } #[inline(always)] fn get_tx_count(&self) -> usize { self.as_ref().get_tx_count() } #[inline(always)] fn get_rx_count(&self) -> usize { self.as_ref().get_rx_count() } fn get_wakers_count(&self) -> (usize, usize) { self.as_ref().get_wakers_count() } } impl BlockingTxTrait for MTx { #[inline(always)] fn clone_to_vec(self, count: usize) -> Vec { let mut v = Vec::with_capacity(count); for _ in 0..count - 1 { v.push(self.clone()); } v.push(self); v } #[inline(always)] fn send(&self, item: F::Item) -> Result<(), SendError> { self.0.send(item) } #[inline(always)] fn try_send(&self, item: F::Item) -> Result<(), TrySendError> { self.0.try_send(item) } #[inline(always)] fn send_timeout( &self, item: F::Item, timeout: Duration, ) -> Result<(), SendTimeoutError> { self.0.send_timeout(item, timeout) } /// The number of messages in the channel at the moment #[inline(always)] fn len(&self) -> usize { self.as_ref().len() } /// The capacity of the channel, return None for unbounded channel. #[inline(always)] fn capacity(&self) -> Option { self.as_ref().capacity() } /// Whether channel is empty at the moment #[inline(always)] fn is_empty(&self) -> bool { self.as_ref().is_empty() } /// Whether the channel is full at the moment #[inline(always)] fn is_full(&self) -> bool { self.as_ref().is_full() } /// Return true if the other side has closed #[inline(always)] fn is_disconnected(&self) -> bool { self.as_ref().get_rx_count() == 0 } #[inline(always)] fn get_tx_count(&self) -> usize { self.as_ref().get_tx_count() } #[inline(always)] fn get_rx_count(&self) -> usize { self.as_ref().get_rx_count() } fn get_wakers_count(&self) -> (usize, usize) { self.as_ref().get_wakers_count() } } impl Deref for Tx { type Target = ChannelShared; #[inline(always)] fn deref(&self) -> &ChannelShared { &self.shared } } impl AsRef> for Tx { #[inline(always)] fn as_ref(&self) -> &ChannelShared { &self.shared } } impl AsRef> for MTx { #[inline(always)] fn as_ref(&self) -> &ChannelShared { &self.0.shared } } impl> SenderType for Tx { type Flavor = F; #[inline(always)] fn new(shared: Arc>) -> Self { Self::new(shared) } } impl NotCloneable for Tx {} impl + FlavorMP> SenderType for MTx { type Flavor = F; #[inline(always)] fn new(shared: Arc>) -> Self { MTx::new(shared) } } ================================================ FILE: src/collections.rs ================================================ use std::ptr; use std::sync::{ atomic::{AtomicPtr, Ordering}, Arc, Weak, }; pub struct ArcCell { ptr: AtomicPtr, } impl Drop for ArcCell { #[inline] fn drop(&mut self) { self.clear(); } } unsafe impl Send for ArcCell {} unsafe impl Sync for ArcCell {} impl ArcCell { #[inline(always)] pub fn new() -> Self { Self { ptr: AtomicPtr::new(ptr::null_mut()) } } #[inline(always)] pub fn exists(&self) -> bool { !self.ptr.load(Ordering::Acquire).is_null() } #[inline(always)] pub fn pop(&self) -> Option> { let ptr = self.ptr.swap(ptr::null_mut(), Ordering::SeqCst); if !ptr.is_null() { Some(unsafe { Arc::from_raw(ptr) }) } else { None } } #[allow(dead_code)] #[inline(always)] pub fn clear(&self) { let ptr = self.ptr.swap(ptr::null_mut(), Ordering::SeqCst); if !ptr.is_null() { // Convert into Weak and drop let _ = unsafe { Arc::from_raw(ptr) }; } } #[inline(always)] pub fn try_put(&self, item: Arc) { let item_ptr = Arc::into_raw(item) as *mut T; match self.ptr.compare_exchange( ptr::null_mut(), item_ptr, Ordering::SeqCst, Ordering::Relaxed, ) { Ok(_) => {} Err(_) => { let _ = unsafe { Arc::from_raw(item_ptr) }; } } } } #[allow(dead_code)] pub struct WeakCell { ptr: AtomicPtr, } unsafe impl Send for WeakCell {} unsafe impl Sync for WeakCell {} impl Drop for WeakCell { #[inline] fn drop(&mut self) { self.clear(); } } impl WeakCell { #[inline(always)] pub fn new() -> Self { Self { ptr: AtomicPtr::new(ptr::null_mut()) } } #[inline(always)] pub fn is_empty(&self) -> bool { self.ptr.load(Ordering::SeqCst).is_null() } #[inline(always)] pub fn pop(&self) -> Option> { let mut v = self.ptr.load(Ordering::SeqCst); if v.is_null() { return None; } loop { match self.ptr.compare_exchange(v, ptr::null_mut(), Ordering::SeqCst, Ordering::Acquire) { Ok(_) => return unsafe { Weak::from_raw(v) }.upgrade(), Err(_v) => { if _v.is_null() { return None; } v = _v; } } } } //// it is allow to fail, with only one shot and weak Ops #[inline(always)] pub fn clear(&self) -> bool { // Don't need accurate, it's optional let v = self.ptr.load(Ordering::Acquire); if v.is_null() { return false; } match self.ptr.compare_exchange(v, ptr::null_mut(), Ordering::Release, Ordering::Relaxed) { Ok(_) => { let _ = unsafe { Weak::from_raw(v) }; true } Err(_v) => { // We don't really have to clear this on spurious failure false } } } #[inline(always)] pub fn replace(&self, item: Weak) { let old_ptr = self.ptr.swap(item.into_raw() as *mut T, Ordering::SeqCst); if !old_ptr.is_null() { let _ = unsafe { Weak::from_raw(old_ptr) }; } } } #[cfg(test)] mod tests { #[test] fn test_weak_cell() { use super::*; use std::sync::Arc; let cell = WeakCell::new(); assert!(cell.is_empty()); let item = Arc::new(1); cell.replace(Arc::downgrade(&item)); assert!(!cell.is_empty()); let _item = cell.pop().unwrap(); assert!(cell.is_empty()); assert!(Arc::ptr_eq(&item, &_item)); cell.replace(Arc::downgrade(&item)); assert!(!cell.is_empty()); // it is allow to fail under miri println!("clear"); while !cell.clear() { assert!(!cell.is_empty()); println!("try clear again"); } assert!(cell.is_empty()); drop(_item); assert_eq!(Arc::strong_count(&item), 1); assert_eq!(Arc::weak_count(&item), 0); } } ================================================ FILE: src/compat.rs ================================================ //! compatible layer for V2.0 API //! //! # Migration from v2.* to v3 //! //! If you want to migrate to v3 API, you may add the flavor type in [MTx], [MRx], [Tx], [Rx] type, //! and change the channel initialization function accordingly. //! //! If you have a large project that use v2 API, and want to migrate gradually, //! only need to change original import from `use crossfire::*` to `use crossfire::compat::*`. //! This module provides the [CompatFlavor] which erase the difference between `List` and `Array`, //! but registry only use RegistryMulti for spsc and mpsc for compatibility. //! //! # Compatible consideration //! //! - In the legacy API, the sender/receiver types had erased the signature between bounded or unbounded channels //! - The low level queue implement is for MPMC regardless of MPSC/SPSC model (which is exactly the //! same with V2.1) //! - The module structure in `crossfire::compat::*`, is exactly the same as v2.x `crossfire::*`. //! //! # Incompatible notes //! //! - keeping Into> for `AsyncRxTrait` is not possible, due to `AsyncRxTrait` //! is erased out Flavor parameter, so we add `AsyncRxTrait::to_stream()` which returns `Pin>>`. //! //! # The reason of complete API refactor //! //! I know we all hate the contagious nature of generic code, and reluctant to use trait object, //! it's common practice to use static dispatch like `enum-dispatch`. Originally crossfire only //! have 2 channel variance ([CompatFlavor]), when adding more channel flavor for specific scenario, //! other than common list and array, and specialized implement for spsc, mpsc, etc, //! I notice that when the flavor enum grow from 2 types to 4+ types, //! although the positive result can be observed on Arm, there was a regression in x86 async benchmark, //! which offset the optimization effort. //! It's impossible to erased the type while keeping the performance goal having so much types. //! //! From the aspect of compiler: //! - In blocking context, the compiler can eliminate the unused branch according to the context, //! and keeping the function calls inline, unless you put multiple variant of enum together into a //! collection. //! - In async context, the compiler is ignorance, since most of the async code is indirect calls. //! We can see in generated asm from cargo-show-asm, even you initialize the channel with ArrayQueue, there's still //! SeqQueue match branch inside the `RecvFuture::poll()`. What's worse when we have 4 types //! variant in the flavor enum, the compiler think the internal queue ops function no longer worth //! to inline (because overall flatten code will be too big), and the match branch might fallen //! back to a big match table instead of simple comparison. This is the reason of performance regression. //! //! From the aspect of CPU: //! - I had tried a manual Vtable by putting method ptr inside AsyncTx/AsyncRx, which is ok on X86, //! but Arm will have -50% penalty. It looks like Arm is poor on loading / caching function ptr. //! - Generic Arm CPU has overall poor performance (1/3 ~ 1/2) compared to mainstream x86_64, and //! bad at atomic CAS, a big match branch might be not so obvious than the positive effect from //! changing some CAS to direct load/store in the lockless algorithm. //! //! From the aspect of API usage: //! - There're already nice native select mechanisms on async ecology, we don't have to worry about the //! difference of receiver types, for flexibility. //! - For blocking context, it might be more common scenario to select from the same type of channels for efficiency. //! - The crossbeam implementation of select is decouple from channel types and message type, which //! means the API is possible for crossfire too. use crate::flavor::{ flavor_dispatch, flavor_select_dispatch, queue_dispatch, Flavor, FlavorImpl, FlavorMC, FlavorMP, Queue, }; use crate::shared::*; pub use crate::{AsyncRxTrait, AsyncTxTrait, BlockingRxTrait, BlockingTxTrait}; use std::mem::MaybeUninit; /// Compatible flavor that wraps the Array and list type #[allow(clippy::large_enum_variant)] pub enum CompatFlavor { Array(crate::flavor::Array), List(crate::flavor::List), } macro_rules! wrap_compat { ($self: expr, $method:ident $($arg:expr)*)=>{ match $self { Self::Array(inner) => inner.$method($($arg)*), Self::List(inner) => inner.$method($($arg)*), } }; } impl Queue for CompatFlavor { type Item = T; queue_dispatch!(wrap_compat); } impl FlavorImpl for CompatFlavor { flavor_dispatch!(wrap_compat); } impl FlavorSelect for CompatFlavor { flavor_select_dispatch!(wrap_compat); } impl FlavorMP for CompatFlavor {} impl FlavorMC for CompatFlavor {} // There's not much performance difference between old RegistrySingle and RegistryMulti, // we just use RegistryMulti here since this is just for compatible reason. impl Flavor for CompatFlavor { type Send = RegistryMultiSend; type Recv = RegistryMultiRecv; } #[inline(always)] fn new_list() -> CompatFlavor { CompatFlavor::::List(crate::flavor::List::new()) } #[inline(always)] fn new_array(mut size: usize) -> CompatFlavor { if size <= 1 { size = 1; } CompatFlavor::::Array(crate::flavor::Array::::new(size)) } pub type Tx = crate::Tx>; pub type MTx = crate::MTx>; pub type Rx = crate::Rx>; pub type MRx = crate::MRx>; pub type AsyncTx = crate::AsyncTx>; pub type MAsyncTx = crate::MAsyncTx>; pub type AsyncRx = crate::AsyncRx>; pub type MAsyncRx = crate::MAsyncRx>; pub use crate::{ RecvError, RecvTimeoutError, SendError, SendTimeoutError, TryRecvError, TrySendError, }; pub mod sink { use super::*; pub type AsyncSink = crate::sink::AsyncSink>; } pub mod stream { use super::*; pub type AsyncStream = crate::stream::AsyncStream>; } pub mod spsc { use super::*; macro_rules! init_share { ($flavor: expr) => {{ ChannelShared::new($flavor, RegistryMultiSend::new(), RegistryMultiRecv::new()) }}; } /// Creates an unbounded channel for use in a blocking context. /// /// The sender will never block, so we use the same `Tx` for all threads. pub fn unbounded_blocking() -> (Tx, Rx) { let shared = init_share!(new_list::()); let tx = Tx::new(shared.clone()); let rx = Rx::new(shared); (tx, rx) } /// Creates an unbounded channel for use in an async context. /// /// The sender will never block, so we use the same `Tx` for all threads. pub fn unbounded_async() -> (Tx, AsyncRx) { let shared = init_share!(new_list::()); let tx = Tx::new(shared.clone()); let rx = AsyncRx::new(shared); (tx, rx) } /// Creates a bounded channel for use in a blocking context. /// /// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1. pub fn bounded_blocking(size: usize) -> (Tx, Rx) { let shared = init_share!(new_array::(size)); let tx = Tx::new(shared.clone()); let rx = Rx::new(shared); (tx, rx) } /// Creates a bounded channel where both the sender and receiver are async. /// /// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1. pub fn bounded_async(size: usize) -> (AsyncTx, AsyncRx) { let shared = init_share!(new_array::(size)); let tx = AsyncTx::new(shared.clone()); let rx = AsyncRx::new(shared); (tx, rx) } /// Creates a bounded channel where the sender is async and the receiver is blocking. /// /// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1. pub fn bounded_tx_async_rx_blocking( size: usize, ) -> (AsyncTx, Rx) { let shared = init_share!(new_array::(size)); let tx = AsyncTx::new(shared.clone()); let rx = Rx::new(shared); (tx, rx) } /// Creates a bounded channel where the sender is blocking and the receiver is async. /// /// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1. pub fn bounded_tx_blocking_rx_async( size: usize, ) -> (Tx, AsyncRx) { let shared = init_share!(new_array::(size)); let tx = Tx::new(shared.clone()); let rx = AsyncRx::new(shared); (tx, rx) } } pub mod mpsc { use super::*; macro_rules! init_share { ($flavor: expr) => {{ ChannelShared::new($flavor, RegistryMultiSend::new(), RegistryMultiRecv::new()) }}; } /// Creates an unbounded channel for use in a blocking context. /// /// The sender will never block, so we use the same `Tx` for all threads. pub fn unbounded_blocking() -> (MTx, Rx) { let shared = init_share!(new_list::()); let tx = MTx::new(shared.clone()); let rx = Rx::new(shared); (tx, rx) } /// Creates an unbounded channel for use in an async context. /// /// Although the sender type is `MTx`, it will never block. pub fn unbounded_async() -> (MTx, AsyncRx) { let shared = init_share!(new_list::()); let tx = MTx::new(shared.clone()); let rx = AsyncRx::new(shared); (tx, rx) } /// Creates a bounded channel for use in a blocking context. /// /// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1. pub fn bounded_blocking(size: usize) -> (MTx, Rx) { let shared = init_share!(new_array::(size)); let tx = MTx::new(shared.clone()); let rx = Rx::new(shared); (tx, rx) } /// Creates a bounded channel where both the sender and receiver are async. /// /// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1. pub fn bounded_async(size: usize) -> (MAsyncTx, AsyncRx) { let shared = init_share!(new_array::(size)); let tx = MAsyncTx::new(shared.clone()); let rx = AsyncRx::new(shared); (tx, rx) } /// Creates a bounded channel where the sender is async and the receiver is blocking. /// /// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1. pub fn bounded_tx_async_rx_blocking( size: usize, ) -> (MAsyncTx, Rx) { let shared = init_share!(new_array::(size)); let tx = MAsyncTx::new(shared.clone()); let rx = Rx::new(shared); (tx, rx) } /// Creates a bounded channel where the sender is blocking and the receiver is async. /// /// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1. pub fn bounded_tx_blocking_rx_async( size: usize, ) -> (MTx, AsyncRx) { let shared = init_share!(new_array::(size)); let tx = MTx::new(shared.clone()); let rx = AsyncRx::new(shared); (tx, rx) } } pub mod mpmc { //! v2 API Compatible Multiple producers, multiple consumers. use super::*; macro_rules! init_share { ($flavor: expr) => {{ ChannelShared::new($flavor, RegistryMultiSend::new(), RegistryMultiRecv::new()) }}; } /// Creates an unbounded channel for use in a blocking context. /// /// The sender will never block, so we use the same `Tx` for all threads. pub fn unbounded_blocking() -> (MTx, MRx) { let shared = init_share!(new_list::()); let tx = MTx::new(shared.clone()); let rx = MRx::new(shared); (tx, rx) } /// Creates an unbounded channel for use in an async context. /// /// Although the sender type is `MTx`, it will never block. pub fn unbounded_async() -> (MTx, MAsyncRx) { let shared = init_share!(new_list::()); let tx = MTx::new(shared.clone()); let rx = MAsyncRx::new(shared); (tx, rx) } /// Creates a bounded channel for use in a blocking context. /// /// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1. pub fn bounded_blocking(size: usize) -> (MTx, MRx) { let shared = init_share!(new_array::(size)); let tx = MTx::new(shared.clone()); let rx = MRx::new(shared); (tx, rx) } /// Creates a bounded channel for use in an async context. /// /// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1. pub fn bounded_async(size: usize) -> (MAsyncTx, MAsyncRx) { let shared = init_share!(new_array::(size)); let tx = MAsyncTx::new(shared.clone()); let rx = MAsyncRx::new(shared); (tx, rx) } /// Creates a bounded channel where the sender is async and the receiver is blocking. /// /// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1. pub fn bounded_tx_async_rx_blocking( size: usize, ) -> (MAsyncTx, MRx) { let shared = init_share!(new_array::(size)); let tx = MAsyncTx::new(shared.clone()); let rx = MRx::new(shared); (tx, rx) } /// Creates a bounded channel where the sender is blocking and the receiver is async. /// /// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1. pub fn bounded_tx_blocking_rx_async( size: usize, ) -> (MTx, MAsyncRx) { let shared = init_share!(new_array::(size)); let tx = MTx::new(shared.clone()); let rx = MAsyncRx::new(shared); (tx, rx) } } ================================================ FILE: src/crossbeam/array_queue.rs ================================================ //! Modify by frostyplanet@gmail.com for the crossfire crate: //! //! - Optimise for single consumer scenario; //! - Add token interface according to crossbeam-channel //! - Modified push() to push_with_ptr(); //! - Add try_push_oneshot() which combinds the logic of push and check_full in one step; //! - Remove unused functions. //! //! Fork from crossbeam-queue crate commit 5a154def002304814d50f3c7658bd30eb46b2fad //! //! The MIT License (MIT) //! //! Copyright (c) 2025, 2026 frostyplanet@gmail.com //! //! Copyright (c) 2019 The Crossbeam Project Developers //! //! Permission is hereby granted, free of charge, to any //! person obtaining a copy of this software and associated //! documentation files (the "Software"), to deal in the //! Software without restriction, including without //! limitation the rights to use, copy, modify, merge, //! publish, distribute, sublicense, and/or sell copies of //! the Software, and to permit persons to whom the Software //! is furnished to do so, subject to the following //! conditions: //! //! The above copyright notice and this permission notice //! shall be included in all copies or substantial portions //! of the Software. //! //! THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF //! ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED //! TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A //! PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT //! SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY //! CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION //! OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR //! IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER //! DEALINGS IN THE SOFTWARE. //! //! The implementation is based on Dmitry Vyukov's bounded MPMC queue. //! //! Source: //! - use core::cell::UnsafeCell; use crate::flavor::Token; use core::mem::{self, MaybeUninit}; use core::panic::{RefUnwindSafe, UnwindSafe}; use core::ptr; use core::sync::atomic::{self, AtomicUsize, Ordering}; use crossbeam_utils::{Backoff, CachePadded}; /// A slot in a queue. struct Slot { /// The current stamp. /// /// If the stamp equals the tail, this node will be next written to. If it equals head + 1, /// this node will be next read from. stamp: AtomicUsize, /// The value in this slot. value: UnsafeCell>, } /// A bounded multi-producer multi-consumer queue. /// /// This queue allocates a fixed-capacity buffer on construction, which is used to store pushed /// elements. The queue cannot hold more elements than the buffer allows. Attempting to push an /// element into a full queue will fail. Alternatively, [`force_push`] makes it possible for /// this queue to be used as a ring-buffer. Having a buffer allocated upfront makes this queue /// a bit faster than [`SegQueue`]. /// /// [`SegQueue`]: super::SegQueue pub struct ArrayQueue { /// The head of the queue. /// /// This value is a "stamp" consisting of an index into the buffer and a lap, but packed into a /// single `usize`. The lower bits represent the index, while the upper bits represent the lap. /// /// Elements are popped from the head of the queue. head: CachePadded, /// The tail of the queue. /// /// This value is a "stamp" consisting of an index into the buffer and a lap, but packed into a /// single `usize`. The lower bits represent the index, while the upper bits represent the lap. /// /// Elements are pushed into the tail of the queue. tail: CachePadded, /// The buffer holding slots. buffer: Box<[Slot]>, /// A stamp with the value of `{ lap: 1, index: 0 }`. one_lap: usize, } unsafe impl Sync for ArrayQueue {} unsafe impl Send for ArrayQueue {} impl UnwindSafe for ArrayQueue {} impl RefUnwindSafe for ArrayQueue {} impl ArrayQueue { /// Creates a new bounded queue with the given capacity. /// /// # Panics /// /// Panics if the capacity is zero. pub fn new(cap: usize) -> Self { assert!(cap > 0, "capacity must be non-zero"); // Head is initialized to `{ lap: 0, index: 0 }`. // Tail is initialized to `{ lap: 0, index: 0 }`. let head = 0; let tail = 0; // Allocate a buffer of `cap` slots initialized // with stamps. let buffer: Box<[Slot]> = (0..cap) .map(|i| { // Set the stamp to `{ lap: 0, index: i }`. Slot { stamp: AtomicUsize::new(i), value: UnsafeCell::new(MaybeUninit::uninit()) } }) .collect(); // One lap is the smallest power of two greater than `cap`. let one_lap = (cap + 1).next_power_of_two(); Self { buffer, one_lap, head: CachePadded::new(AtomicUsize::new(head)), tail: CachePadded::new(AtomicUsize::new(tail)), } } /// This function is optimise for channel suspected to be full, /// It's an equal replacement to is_full(), if not try only oneshot, /// return Ok(true) when push ok, Ok(false) when channel is full. /// None when uncertain (normally needs a loop) #[allow(dead_code)] #[inline(always)] pub unsafe fn try_push_oneshot(&self, value: *const T) -> Option { // Use two SeqCst to compare tail & head, it's an equal replacement to is_full() let tail = self.tail.load(Ordering::SeqCst); macro_rules! check_full { ($tail: expr) => { let head = self.head.load(Ordering::SeqCst); // If the head lags one lap behind the tail as well... if head.wrapping_add(self.one_lap) == $tail { // ...then the queue is full. return Some(false); } }; } check_full!(tail); match self._try_push(tail, value) { Ok(_) => Some(true), Err((_stamp, _new_tail)) => { // after the first check_full with both loads are SeqCst, this is unlikely full, but also a hot path None } } } /// return stamp, new_tail #[inline] fn _try_push(&self, tail: usize, value: *const T) -> Result)> { let cap = self.capacity(); // Deconstruct the tail. let index = tail & (self.one_lap - 1); // Inspect the corresponding slot. debug_assert!(index < self.buffer.len()); let slot = unsafe { self.buffer.get_unchecked(index) }; let stamp = slot.stamp.load(Ordering::Acquire); // If the tail and the stamp match, we may attempt to push. if tail == stamp { let new_tail = if index + 1 < cap { // Same lap, incremented index. // Set to `{ lap: lap, index: index + 1 }`. tail + 1 } else { let lap = tail & !(self.one_lap - 1); // One lap forward, index wraps around to zero. // Set to `{ lap: lap.wrapping_add(1), index: 0 }`. lap.wrapping_add(self.one_lap) }; if MP { // Try moving the tail. if let Err(t) = self.tail.compare_exchange_weak( tail, new_tail, Ordering::SeqCst, Ordering::Relaxed, ) { return Err((stamp, Some(t))); } } else { self.tail.store(new_tail, Ordering::SeqCst); } // Write the value into the slot and update the stamp. unsafe { let item: &mut MaybeUninit = &mut *slot.value.get(); item.write(ptr::read(value)); } slot.stamp.store(tail + 1, Ordering::Release); Ok(true) } else { Err((stamp, None)) } } #[inline(always)] pub unsafe fn push_with_ptr(&self, value: *const T) -> bool { let backoff = Backoff::new(); let mut tail = if MP { self.tail.load(Ordering::Relaxed) } else { self.tail.load(Ordering::Acquire) }; macro_rules! check_full { ($tail: expr) => { let head = if MP || MC { // NOTE: The fence is preventing livestock atomic::fence(Ordering::SeqCst); self.head.load(Ordering::Relaxed) } else { self.head.load(Ordering::SeqCst) }; // If the head lags one lap behind the tail as well... if head.wrapping_add(self.one_lap) == $tail { // ...then the queue is full. return false; } }; } loop { match self._try_push(tail, value) { Ok(res) => return res, Err((stamp, new_tail)) => { if let Some(_tail) = new_tail { tail = _tail; backoff.spin(); continue; } if stamp.wrapping_add(self.one_lap) == tail + 1 { check_full!(tail); } backoff.snooze(); if MP { tail = self.tail.load(Ordering::Relaxed); } } } } } #[inline] pub fn start_read(&self, final_check: bool) -> Option { if let Some((slot, stamp)) = self._start_read(final_check) { Some(Token::new(slot as *const Slot as *const u8, stamp)) } else { None } } #[inline] pub fn pop(&self, final_check: bool) -> Option { if let Some((slot, stamp)) = self._start_read(final_check) { let msg = unsafe { slot.value.get().read().assume_init() }; slot.stamp.store(stamp, Ordering::Release); Some(msg) } else { None } } #[inline] fn _start_read(&self, final_check: bool) -> Option<(&Slot, usize)> { let mut head; if final_check { // because we need to check is_empty before park, // use SeqCst to make Miri happy head = self.head.load(Ordering::SeqCst); } else { let order = if MC { Ordering::Relaxed } else { Ordering::Acquire }; head = self.head.load(order); } let backoff = Backoff::new(); loop { // Deconstruct the head. let index = head & (self.one_lap - 1); // Inspect the corresponding slot. debug_assert!(index < self.buffer.len()); let slot = unsafe { self.buffer.get_unchecked(index) }; let stamp = slot.stamp.load(Ordering::Acquire); // If the stamp is ahead of the head by 1, we may attempt to pop. if head + 1 == stamp { let new = if index + 1 < self.capacity() { // Same lap, incremented index. // Set to `{ lap: lap, index: index + 1 }`. head + 1 } else { let lap = head & !(self.one_lap - 1); // One lap forward, index wraps around to zero. // Set to `{ lap: lap.wrapping_add(1), index: 0 }`. lap.wrapping_add(self.one_lap) }; if MC { // Try moving the head. if let Err(new_head) = self.head.compare_exchange_weak( head, new, Ordering::SeqCst, Ordering::Relaxed, ) { head = new_head; backoff.spin(); continue; } } else { self.head.store(new, Ordering::SeqCst); } let new_head = head.wrapping_add(self.one_lap); return Some((slot, new_head)); } else { if stamp == head { // Check full let tail = if MP || MC { // NOTE: The fence is preventing live lock atomic::fence(Ordering::SeqCst); self.tail.load(Ordering::Relaxed) } else { self.tail.load(Ordering::SeqCst) }; // If the tail equals the head, that means the channel is empty. if tail == head { return None; } backoff.spin(); } else { // Snooze because we need to wait for the stamp to get updated. backoff.snooze(); } if MC { head = self.head.load(Ordering::Relaxed); } continue; } } } #[inline(always)] pub fn read(&self, token: Token) -> T { let slot: &Slot = unsafe { &*token.pos.cast::>() }; let msg = unsafe { slot.value.get().read().assume_init() }; slot.stamp.store(token.stamp, Ordering::Release); msg } /// Returns the capacity of the queue. #[inline] pub fn capacity(&self) -> usize { self.buffer.len() } /// Returns `true` if the queue is empty. #[inline(always)] pub fn is_empty(&self) -> bool { let head = self.head.load(Ordering::SeqCst); let tail = self.tail.load(Ordering::SeqCst); // Is the tail lagging one lap behind head? // Is the tail equal to the head? // // Note: If the head changes just before we load the tail, that means there was a moment // when the channel was not empty, so it is safe to just return `false`. tail == head } /// Returns `true` if the queue is full. #[inline(always)] pub fn is_full(&self) -> bool { let tail = self.tail.load(Ordering::SeqCst); let head = self.head.load(Ordering::SeqCst); // Is the head lagging one lap behind tail? // // Note: If the tail changes just before we load the head, that means there was a moment // when the queue was not full, so it is safe to just return `false`. head.wrapping_add(self.one_lap) == tail } /// Returns the number of elements in the queue. #[inline] pub fn len(&self) -> usize { loop { // Load the tail, then load the head. let tail = self.tail.load(Ordering::SeqCst); let head = self.head.load(Ordering::SeqCst); // If the tail didn't change, we've got consistent values to work with. if self.tail.load(Ordering::SeqCst) == tail { let hix = head & (self.one_lap - 1); let tix = tail & (self.one_lap - 1); return if hix < tix { tix - hix } else if hix > tix { self.capacity() - hix + tix } else if tail == head { 0 } else { self.capacity() }; } } } } impl Drop for ArrayQueue { fn drop(&mut self) { if mem::needs_drop::() { // Get the index of the head. let head = *self.head.get_mut(); let tail = *self.tail.get_mut(); let hix = head & (self.one_lap - 1); let tix = tail & (self.one_lap - 1); let len = if hix < tix { tix - hix } else if hix > tix { self.capacity() - hix + tix } else if tail == head { 0 } else { self.capacity() }; // Loop over all slots that hold a message and drop them. for i in 0..len { // Compute the index of the next slot holding a message. let index = if hix + i < self.capacity() { hix + i } else { hix + i - self.capacity() }; unsafe { debug_assert!(index < self.buffer.len()); let slot = self.buffer.get_unchecked_mut(index); (*slot.value.get()).assume_init_drop(); } } } } } ================================================ FILE: src/crossbeam/array_queue_mpsc.rs ================================================ //! Modify by frostyplanet@gmail.com for the crossfire crate: //! //! - Optimise for MPSC scenario; //! - Pack head/tail for cache efficiency; //! - Add token interface according to crossbeam-channel //! - Modified push() to push_with_ptr(); //! - Add try_push_oneshot(); //! //! Fork from crossbeam-queue crate commit 5a154def002304814d50f3c7658bd30eb46b2fad //! //! The MIT License (MIT) //! //! Copyright (c) 2025, 2026 frostyplanet@gmail.com //! //! Copyright (c) 2019 The Crossbeam Project Developers //! //! Permission is hereby granted, free of charge, to any //! person obtaining a copy of this software and associated //! documentation files (the "Software"), to deal in the //! Software without restriction, including without //! limitation the rights to use, copy, modify, merge, //! publish, distribute, sublicense, and/or sell copies of //! the Software, and to permit persons to whom the Software //! is furnished to do so, subject to the following //! conditions: //! //! The above copyright notice and this permission notice //! shall be included in all copies or substantial portions //! of the Software. //! //! THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF //! ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED //! TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A //! PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT //! SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY //! CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION //! OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR //! IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER //! DEALINGS IN THE SOFTWARE. //! //! The implementation is based on Dmitry Vyukov's bounded MPMC queue. //! //! Source: //! - use core::cell::UnsafeCell; use crate::flavor::Token; use core::mem::{self, MaybeUninit}; use core::panic::{RefUnwindSafe, UnwindSafe}; use core::ptr; use core::sync::atomic::{AtomicU64, AtomicUsize, Ordering}; use crossbeam_utils::{Backoff, CachePadded}; /// A slot in a queue. struct Slot { /// The current stamp. stamp: AtomicUsize, /// The value in this slot. value: UnsafeCell>, } /// A bounded multi-producer single-consumer queue. pub struct ArrayQueueMpsc { /// The sender state. /// /// High bits: head_cached /// Low bits: tail sender: CachePadded, /// The receiver state. /// /// High bits: tail_cached /// Low bits: head recv: CachePadded, /// The buffer holding slots. buffer: Box<[Slot]>, /// A stamp with the value of `{ lap: 1, index: 0 }`. one_lap: u32, } unsafe impl Sync for ArrayQueueMpsc {} unsafe impl Send for ArrayQueueMpsc {} impl UnwindSafe for ArrayQueueMpsc {} impl RefUnwindSafe for ArrayQueueMpsc {} impl ArrayQueueMpsc { /// Creates a new bounded queue with the given capacity. /// /// # Panics /// /// Panics if the capacity is zero. pub fn new(cap: usize) -> Self { assert!(cap > 0, "capacity must be non-zero"); assert!(cap < (1 << 31), "capacity too large for u32 logic"); // Head is initialized to `{ lap: 0, index: 0 }`. // Tail is initialized to `{ lap: 0, index: 0 }`. let head = 0; let tail = 0; // Allocate a buffer of `cap` slots initialized // with stamps. let buffer: Box<[Slot]> = (0..cap) .map(|i| { // Set the stamp to `i`. Slot { stamp: AtomicUsize::new(i), value: UnsafeCell::new(MaybeUninit::uninit()) } }) .collect(); // One lap is the smallest power of two greater than `cap`. let one_lap = (cap + 1).next_power_of_two() as u32; Self { buffer, one_lap, recv: CachePadded::new(AtomicU64::new(((tail as u64) << 32) | (head as u64))), sender: CachePadded::new(AtomicU64::new(((head as u64) << 32) | (tail as u64))), } } #[inline(always)] fn _try_push( &self, sender_val: u64, tail: u32, head_cached: u32, value: *const T, ) -> Result { let index = (tail & (self.one_lap - 1)) as usize; let new_tail = if index + 1 < self.buffer.len() { tail + 1 } else { let lap = tail & !(self.one_lap - 1); lap.wrapping_add(self.one_lap) }; let new_sender_val = ((head_cached as u64) << 32) | (new_tail as u64); match self.sender.compare_exchange_weak( sender_val, new_sender_val, Ordering::SeqCst, Ordering::Acquire, ) { Ok(_) => { debug_assert!(index < self.buffer.len()); unsafe { let slot = self.buffer.get_unchecked(index); let item: &mut MaybeUninit = &mut *slot.value.get(); item.write(ptr::read(value)); slot.stamp.store((tail as usize).wrapping_add(1), Ordering::Release); } Ok(true) } Err(current) => Err(current), } } #[inline(always)] pub unsafe fn push_with_ptr(&self, value: *const T) -> bool { let backoff = Backoff::new(); let mut sender_val = self.sender.load(Ordering::Relaxed); loop { let tail = sender_val as u32; let mut head_cached = (sender_val >> 32) as u32; if head_cached.wrapping_add(self.one_lap) == tail { backoff.spin(); let head = self.recv.load(Ordering::SeqCst) as u32; if head == head_cached { return false; } head_cached = head; } match self._try_push(sender_val, tail, head_cached, value) { Ok(res) => return res, Err(current) => { sender_val = current; backoff.snooze(); } } } } /// This function is optimise for channel suspected to be full, /// It's an equal replacement to is_full(), if not try only oneshot, /// return Ok(true) when push ok, Ok(false) when channel is full. /// None when uncertain (normally needs a loop) #[inline(always)] pub unsafe fn try_push_oneshot(&self, value: *const T) -> Option { let sender_val = self.sender.load(Ordering::SeqCst); let tail = sender_val as u32; let mut head_cached = (sender_val >> 32) as u32; if head_cached.wrapping_add(self.one_lap) == tail { let head = self.recv.load(Ordering::SeqCst) as u32; if head == head_cached { return Some(false); } head_cached = head; } self._try_push(sender_val, tail, head_cached, value).ok() } #[inline] pub fn start_read(&self, final_check: bool) -> Option { if let Some((head, tail_cached)) = self._start_read::(final_check) { let (slot, packed_recv) = self._read(head, tail_cached); Some(Token::new(slot as *const Slot as *const u8, packed_recv as usize)) } else { None } } #[inline] pub fn pop(&self, final_check: bool) -> Option { if let Some((head, tail_cached)) = self._start_read::(final_check) { let (slot, packed_recv) = self._read(head, tail_cached); let msg = unsafe { slot.value.get().read().assume_init() }; // Update recv (which contains head) to free the slot. self.recv.store(packed_recv, Ordering::SeqCst); Some(msg) } else { None } } #[inline] pub fn pop_cached(&self) -> Option { if let Some((head, tail_cached)) = self._start_read::(false) { let (slot, packed_recv) = self._read(head, tail_cached); let msg = unsafe { slot.value.get().read().assume_init() }; // Update recv (which contains head) to free the slot. self.recv.store(packed_recv, Ordering::SeqCst); Some(msg) } else { None } } /// return head, tail_cached #[inline] fn _start_read(&self, _final_check: bool) -> Option<(u32, u32)> { let recv_val = self.recv.load(Ordering::Relaxed); let head = recv_val as u32; let mut tail_cached = (recv_val >> 32) as u32; if tail_cached == head { if SPIN { core::hint::spin_loop(); let tail = if _final_check { self.sender.load(Ordering::SeqCst) as u32 } else { self.sender.load(Ordering::Acquire) as u32 }; if head == tail { return None; } tail_cached = tail; } else { return None; } } Some((head, tail_cached)) } #[inline] fn _read(&self, head: u32, tail_cached: u32) -> (&Slot, u64) { // Deconstruct the head. let index = (head & (self.one_lap - 1)) as usize; debug_assert!(index < self.buffer.len()); let slot = unsafe { self.buffer.get_unchecked(index) }; // Wait for stamp update let target_stamp = (head as usize).wrapping_add(1); loop { let stamp = slot.stamp.load(Ordering::Acquire); if stamp == target_stamp { break; } core::hint::spin_loop(); } // Update head let new_head = if index + 1 < self.buffer.len() { head + 1 } else { let lap = head & !(self.one_lap - 1); lap.wrapping_add(self.one_lap) }; (slot, ((tail_cached as u64) << 32) | (new_head as u64)) } #[inline(always)] pub fn read(&self, token: Token) -> T { let slot: &Slot = unsafe { &*token.pos.cast::>() }; let msg = unsafe { slot.value.get().read().assume_init() }; // Do not update stamp self.recv.store(token.stamp as u64, Ordering::SeqCst); msg } /// Returns the capacity of the queue. #[inline] pub fn capacity(&self) -> usize { self.buffer.len() } /// Returns `true` if the queue is empty. #[inline(always)] pub fn is_empty(&self) -> bool { let head = self.recv.load(Ordering::SeqCst) as u32; let tail = self.sender.load(Ordering::SeqCst) as u32; tail == head } /// Returns `true` if the queue is full. #[inline(always)] pub fn is_full(&self) -> bool { let tail = self.sender.load(Ordering::SeqCst) as u32; let head = self.recv.load(Ordering::SeqCst) as u32; head.wrapping_add(self.one_lap) == tail } /// Returns the number of elements in the queue. #[inline] pub fn len(&self) -> usize { loop { let tail = self.sender.load(Ordering::SeqCst) as u32; let head = self.recv.load(Ordering::SeqCst) as u32; if self.sender.load(Ordering::SeqCst) as u32 == tail { let hix = head & (self.one_lap - 1); let tix = tail & (self.one_lap - 1); return if hix < tix { (tix - hix) as usize } else if hix > tix { self.capacity() - (hix - tix) as usize } else if tail == head { 0 } else { self.capacity() }; } } } } impl Drop for ArrayQueueMpsc { fn drop(&mut self) { if mem::needs_drop::() { let recv_val = *self.recv.get_mut(); let sender_val = *self.sender.get_mut(); let head = recv_val as u32; let tail = sender_val as u32; let hix = head & (self.one_lap - 1); let tix = tail & (self.one_lap - 1); let len = if hix < tix { tix - hix } else if hix > tix { self.capacity() as u32 - hix + tix } else if tail == head { 0 } else { self.capacity() as u32 }; for i in 0..(len as usize) { let index = if (hix as usize) + i < self.capacity() { (hix as usize) + i } else { (hix as usize) + i - self.capacity() }; unsafe { debug_assert!(index < self.buffer.len()); let slot = self.buffer.get_unchecked_mut(index); (*slot.value.get()).assume_init_drop(); } } } } } ================================================ FILE: src/crossbeam/array_queue_spsc.rs ================================================ //! Modify by frostyplanet@gmail.com for the crossfire crate: //! //! - Modify for SPSC, remove the `stamp` field //! - Optimization: pack head/tail and their cached counterparts into single AtomicU64 (u32 each). //! - Add token interface according to crossbeam-channel //! - Modified push() to push_with_ptr(); //! - Add try_push_oneshot() which combinds the logic of push and check_full in one step; //! - Remove unused functions. //! //! Fork from crossbeam-queue crate commit 5a154def002304814d50f3c7658bd30eb46b2fad //! //! The MIT License (MIT) //! //! Copyright (c) 2025, 2026 frostyplanet@gmail.com //! //! Copyright (c) 2019 The Crossbeam Project Developers //! //! Permission is hereby granted, free of charge, to any //! person obtaining a copy of this software and associated //! documentation files (the "Software"), to deal in the //! Software without restriction, including without //! limitation the rights to use, copy, modify, merge, //! publish, distribute, sublicense, and/or sell copies of //! the Software, and to permit persons to whom the Software //! is furnished to do so, subject to the following //! conditions: //! //! The above copyright notice and this permission notice //! shall be included in all copies or substantial portions //! of the Software. //! //! THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF //! ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED //! TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A //! PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT //! SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY //! CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION //! OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR //! IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER //! DEALINGS IN THE SOFTWARE. //! //! The implementation is based on Dmitry Vyukov's bounded MPMC queue. //! //! Source: //! - use core::cell::UnsafeCell; use crate::flavor::Token; use core::mem::{self, MaybeUninit}; use core::panic::{RefUnwindSafe, UnwindSafe}; use core::ptr; use core::sync::atomic::{AtomicU64, Ordering}; use crossbeam_utils::CachePadded; /// A slot in a queue. struct Slot { /// The value in this slot. value: UnsafeCell>, } /// A bounded multi-producer multi-consumer queue. /// /// This queue allocates a fixed-capacity buffer on construction, which is used to store pushed /// elements. The queue cannot hold more elements than the buffer allows. Attempting to push an /// element into a full queue will fail. Alternatively, [`force_push`] makes it possible for /// this queue to be used as a ring-buffer. Having a buffer allocated upfront makes this queue /// a bit faster than [`SegQueue`]. /// /// [`SegQueue`]: super::SegQueue pub struct ArrayQueueSpsc { /// The head of the queue. /// /// This value is a "stamp" consisting of an index into the buffer and a lap, but packed into a /// single `usize`. The lower bits represent the index, while the upper bits represent the lap. /// /// Elements are popped from the head of the queue. /// /// High bits: head_cached /// Low bits: tail sender: CachePadded, /// The tail of the queue. /// /// This value is a "stamp" consisting of an index into the buffer and a lap, but packed into a /// single `usize`. The lower bits represent the index, while the upper bits represent the lap. /// /// Elements are pushed into the tail of the queue. /// /// High bits: tail_cached /// Low bits: head recv: CachePadded, /// The buffer holding slots. buffer: Box<[Slot]>, /// A stamp with the value of `{ lap: 1, index: 0 }`. one_lap: u32, } unsafe impl Sync for ArrayQueueSpsc {} unsafe impl Send for ArrayQueueSpsc {} impl UnwindSafe for ArrayQueueSpsc {} impl RefUnwindSafe for ArrayQueueSpsc {} impl ArrayQueueSpsc { /// Creates a new bounded queue with the given capacity. /// /// # Panics /// /// Panics if the capacity is zero. pub fn new(cap: usize) -> Self { assert!(cap > 0, "capacity must be non-zero"); assert!(cap < (1 << 31), "capacity too large for u32 logic"); // Head is initialized to `{ lap: 0, index: 0 }`. // Tail is initialized to `{ lap: 0, index: 0 }`. let head = 0; let tail = 0; // Allocate a buffer of `cap` slots initialized // with stamps. let buffer: Box<[Slot]> = (0..cap).map(|_i| Slot { value: UnsafeCell::new(MaybeUninit::uninit()) }).collect(); // One lap is the smallest power of two greater than `cap`. let one_lap = (cap + 1).next_power_of_two() as u32; Self { buffer, one_lap, recv: CachePadded::new(AtomicU64::new(((tail as u64) << 32) | (head as u64))), sender: CachePadded::new(AtomicU64::new(((head as u64) << 32) | (tail as u64))), } } #[inline(always)] fn _try_push(&self, order: Ordering, value: *const T) -> bool { let sender_val = self.sender.load(Ordering::Relaxed); let tail = sender_val as u32; let mut head_cached = (sender_val >> 32) as u32; if head_cached.wrapping_add(self.one_lap) == tail { let head = self.recv.load(order) as u32; if head == head_cached { return false; } head_cached = head; } let cap = self.capacity(); // Deconstruct the tail. let index = (tail & (self.one_lap - 1)) as usize; // Inspect the corresponding slot. debug_assert!(index < self.buffer.len()); let slot = unsafe { self.buffer.get_unchecked(index) }; let new_tail = if index + 1 < cap { // Same lap, incremented index. // Set to `{ lap: lap, index: index + 1 }`. tail + 1 } else { let lap = tail & !(self.one_lap - 1); // One lap forward, index wraps around to zero. // Set to `{ lap: lap.wrapping_add(1), index: 0 }`. lap.wrapping_add(self.one_lap) }; // Write the value into the slot. unsafe { let item: &mut MaybeUninit = &mut *slot.value.get(); item.write(ptr::read(value)); } self.sender.store(((head_cached as u64) << 32) | (new_tail as u64), Ordering::SeqCst); true } #[inline(always)] pub unsafe fn push_with_ptr(&self, value: *const T) -> bool { self._try_push(Ordering::Acquire, value) } #[inline(always)] pub unsafe fn push_with_ptr_final(&self, value: *const T) -> bool { self._try_push(Ordering::SeqCst, value) } #[inline] pub fn start_read(&self, final_check: bool) -> Option { if let Some((head, tail_cached)) = self._start_read::(final_check) { let (slot, packed_recv) = self._read(head, tail_cached); Some(Token::new(slot as *const Slot as *const u8, packed_recv as usize)) } else { None } } #[inline] pub fn pop(&self, final_check: bool) -> Option { if let Some((head, tail_cached)) = self._start_read::(final_check) { let (slot, packed_recv) = self._read(head, tail_cached); let msg = unsafe { slot.value.get().read().assume_init() }; self.recv.store(packed_recv, Ordering::SeqCst); Some(msg) } else { None } } #[inline] pub fn pop_cached(&self) -> Option { if let Some((head, tail_cached)) = self._start_read::(false) { let (slot, packed_recv) = self._read(head, tail_cached); let msg = unsafe { slot.value.get().read().assume_init() }; self.recv.store(packed_recv, Ordering::SeqCst); Some(msg) } else { None } } /// return (head, tail_cached) #[inline] fn _start_read(&self, _final_check: bool) -> Option<(u32, u32)> { let recv_val = self.recv.load(Ordering::Relaxed); let head = recv_val as u32; let mut tail_cached = (recv_val >> 32) as u32; if tail_cached == head { if SPIN { // because we don't have stamp, and no spinning loop, // this line is critical for performance std::hint::spin_loop(); let tail = { if _final_check { // because we need to check is_empty before park, // use SeqCst to make Miri happy self.sender.load(Ordering::SeqCst) as u32 } else { self.sender.load(Ordering::Acquire) as u32 } }; if head == tail { return None; } tail_cached = tail; } else { return None; } } Some((head, tail_cached)) } #[inline] fn _read(&self, head: u32, tail_cached: u32) -> (&Slot, u64) { // Deconstruct the head. let index = (head & (self.one_lap - 1)) as usize; // Inspect the corresponding slot. debug_assert!(index < self.buffer.len()); let slot = unsafe { self.buffer.get_unchecked(index) }; // If the stamp is ahead of the head by 1, we may attempt to pop. let new_head = if index + 1 < self.capacity() { // Same lap, incremented index. // Set to `{ lap: lap, index: index + 1 }`. head + 1 } else { let lap = head & !(self.one_lap - 1); // One lap forward, index wraps around to zero. // Set to `{ lap: lap.wrapping_add(1), index: 0 }`. lap.wrapping_add(self.one_lap) }; (slot, ((tail_cached as u64) << 32) | (new_head as u64)) } #[inline(always)] pub fn read(&self, token: Token) -> T { let slot: &Slot = unsafe { &*token.pos.cast::>() }; let msg = unsafe { slot.value.get().read().assume_init() }; self.recv.store(token.stamp as u64, Ordering::SeqCst); msg } /// Returns the capacity of the queue. #[inline] pub fn capacity(&self) -> usize { self.buffer.len() } /// Returns `true` if the queue is empty. #[inline(always)] pub fn is_empty(&self) -> bool { let head = self.recv.load(Ordering::SeqCst) as u32; let tail = self.sender.load(Ordering::SeqCst) as u32; // Is the tail lagging one lap behind head? // Is the tail equal to the head? // // Note: If the head changes just before we load the tail, that means there was a moment // when the channel was not empty, so it is safe to just return `false`. tail == head } /// Returns `true` if the queue is full. #[inline(always)] pub fn is_full(&self) -> bool { let tail = self.sender.load(Ordering::SeqCst) as u32; let head = self.recv.load(Ordering::SeqCst) as u32; // Is the head lagging one lap behind tail? // // Note: If the tail changes just before we load the head, that means there was a moment // when the queue was not full, so it is safe to just return `false`. head.wrapping_add(self.one_lap) == tail } /// Returns the number of elements in the queue. #[inline] pub fn len(&self) -> usize { loop { // Load the tail, then load the head. let tail = self.sender.load(Ordering::SeqCst) as u32; let head = self.recv.load(Ordering::SeqCst) as u32; // If the tail didn't change, we've got consistent values to work with. if self.sender.load(Ordering::SeqCst) as u32 == tail { let hix = head & (self.one_lap - 1); let tix = tail & (self.one_lap - 1); return if hix < tix { (tix - hix) as usize } else if hix > tix { self.capacity() - (hix - tix) as usize } else if tail == head { 0 } else { self.capacity() }; } } } } impl Drop for ArrayQueueSpsc { fn drop(&mut self) { if mem::needs_drop::() { // Get the index of the head. let head = (*self.recv.get_mut()) as u32; let tail = (*self.sender.get_mut()) as u32; let hix = head & (self.one_lap - 1); let tix = tail & (self.one_lap - 1); let len = if hix < tix { tix - hix } else if hix > tix { self.capacity() as u32 - hix + tix } else if tail == head { 0 } else { self.capacity() as u32 }; // Loop over all slots that hold a message and drop them. for i in 0..(len as usize) { // Compute the index of the next slot holding a message. let index = if (hix as usize) + i < self.capacity() { (hix as usize) + i } else { (hix as usize) + i - self.capacity() }; unsafe { debug_assert!(index < self.buffer.len()); let slot = self.buffer.get_unchecked_mut(index); (*slot.value.get()).assume_init_drop(); } } } } } ================================================ FILE: src/crossbeam/err.rs ================================================ //! The MIT License (MIT) //! //! Copyright (c) 2019 The Crossbeam Project Developers //! //! Permission is hereby granted, free of charge, to any //! person obtaining a copy of this software and associated //! documentation files (the "Software"), to deal in the //! Software without restriction, including without //! limitation the rights to use, copy, modify, merge, //! publish, distribute, sublicense, and/or sell copies of //! the Software, and to permit persons to whom the Software //! is furnished to do so, subject to the following //! conditions: //! //! The above copyright notice and this permission notice //! shall be included in all copies or substantial portions //! of the Software. //! //! THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF //! ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED //! TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A //! PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT //! SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY //! CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION //! OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR //! IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER //! DEALINGS IN THE SOFTWARE. use std::error; use std::fmt; /// An error returned from the `send` method. /// /// The message could not be sent because the channel is disconnected. /// /// The error contains the message so it can be recovered. #[derive(PartialEq, Eq, Clone, Copy)] pub struct SendError(pub T); /// An error returned from the `try_send` method. /// /// The error contains the message being sent so it can be recovered. #[derive(PartialEq, Eq, Clone, Copy)] pub enum TrySendError { /// The message could not be sent because the channel is full. /// /// If this is a zero-capacity channel, then the error indicates that there was no receiver /// available to receive the message at the time. Full(T), /// The message could not be sent because the channel is disconnected. Disconnected(T), } /// An error returned from the `send_timeout` method. /// /// The error contains the message being sent so it can be recovered. #[derive(PartialEq, Eq, Clone, Copy)] pub enum SendTimeoutError { /// The message could not be sent because the channel is full and the operation timed out. /// /// If this is a zero-capacity channel, then the error indicates that there was no receiver /// available to receive the message and the operation timed out. Timeout(T), /// The message could not be sent because the channel is disconnected. Disconnected(T), } /// An error returned from the `recv` method. /// /// A message could not be received because the channel is empty and disconnected. #[derive(PartialEq, Eq, Clone, Copy, Debug)] pub struct RecvError; /// An error returned from the `try_recv` method. #[derive(PartialEq, Eq, Clone, Copy, Debug)] pub enum TryRecvError { /// A message could not be received because the channel is empty. /// /// If this is a zero-capacity channel, then the error indicates that there was no sender /// available to send a message at the time. Empty, /// The message could not be received because the channel is empty and disconnected. Disconnected, } /// An error returned from the `recv_timeout` method. #[derive(PartialEq, Eq, Clone, Copy, Debug)] pub enum RecvTimeoutError { /// A message could not be received because the channel is empty and the operation timed out. /// /// If this is a zero-capacity channel, then the error indicates that there was no sender /// available to send a message and the operation timed out. Timeout, /// The message could not be received because the channel is empty and disconnected. Disconnected, } impl fmt::Debug for SendError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { "SendError(..)".fmt(f) } } impl fmt::Display for SendError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { "sending on a disconnected channel".fmt(f) } } impl error::Error for SendError {} impl SendError { /// Unwraps the message. /// /// # Examples /// /// ``` /// use crossfire::mpmc; /// /// let (s, r) = mpmc::bounded_blocking::<&str>(10); /// drop(r); /// /// if let Err(err) = s.send("foo") { /// assert_eq!(err.into_inner(), "foo"); /// } /// ``` pub fn into_inner(self) -> T { self.0 } } impl fmt::Debug for TrySendError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { Self::Full(..) => "Full(..)".fmt(f), Self::Disconnected(..) => "Disconnected(..)".fmt(f), } } } impl fmt::Display for TrySendError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { Self::Full(..) => "sending on a full channel".fmt(f), Self::Disconnected(..) => "sending on a disconnected channel".fmt(f), } } } impl error::Error for TrySendError {} impl From> for TrySendError { fn from(err: SendError) -> Self { match err { SendError(t) => Self::Disconnected(t), } } } impl TrySendError { /// Unwraps the message. /// /// # Examples /// /// ``` /// use crossfire::mpmc; /// /// let (s, r) = mpmc::bounded_blocking::<&str>(0); /// /// if let Err(err) = s.try_send("foo") { /// assert_eq!(err.into_inner(), "foo"); /// } /// ``` pub fn into_inner(self) -> T { match self { Self::Full(v) => v, Self::Disconnected(v) => v, } } /// Returns `true` if the send operation failed because the channel is full. pub fn is_full(&self) -> bool { matches!(self, Self::Full(_)) } /// Returns `true` if the send operation failed because the channel is disconnected. pub fn is_disconnected(&self) -> bool { matches!(self, Self::Disconnected(_)) } } impl fmt::Debug for SendTimeoutError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { "SendTimeoutError(..)".fmt(f) } } impl fmt::Display for SendTimeoutError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { Self::Timeout(..) => "timed out waiting on send operation".fmt(f), Self::Disconnected(..) => "sending on a disconnected channel".fmt(f), } } } impl error::Error for SendTimeoutError {} impl From> for SendTimeoutError { fn from(err: SendError) -> Self { match err { SendError(e) => Self::Disconnected(e), } } } impl SendTimeoutError { /// Unwraps the message. /// /// # Examples /// /// ``` /// use std::time::Duration; /// use crossfire::mpmc; /// /// let (s, r) = mpmc::bounded_blocking::<&str>(10); /// drop(r); /// /// if let Err(err) = s.send_timeout("foo", Duration::from_secs(1)) { /// assert_eq!(err.into_inner(), "foo"); /// } /// ``` pub fn into_inner(self) -> T { match self { Self::Timeout(v) => v, Self::Disconnected(v) => v, } } /// Returns `true` if the send operation timed out. pub fn is_timeout(&self) -> bool { matches!(self, Self::Timeout(_)) } /// Returns `true` if the send operation failed because the channel is disconnected. pub fn is_disconnected(&self) -> bool { matches!(self, Self::Disconnected(_)) } } impl fmt::Display for RecvError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { "receiving on an empty and disconnected channel".fmt(f) } } impl error::Error for RecvError {} impl fmt::Display for TryRecvError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { Self::Empty => "receiving on an empty channel".fmt(f), Self::Disconnected => "receiving on an empty and disconnected channel".fmt(f), } } } impl error::Error for TryRecvError {} impl From for TryRecvError { fn from(err: RecvError) -> Self { match err { RecvError => Self::Disconnected, } } } impl TryRecvError { /// Returns `true` if the receive operation failed because the channel is empty. pub fn is_empty(&self) -> bool { matches!(self, Self::Empty) } /// Returns `true` if the receive operation failed because the channel is disconnected. pub fn is_disconnected(&self) -> bool { matches!(self, Self::Disconnected) } } impl fmt::Display for RecvTimeoutError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { Self::Timeout => "timed out waiting on receive operation".fmt(f), Self::Disconnected => "channel is empty and disconnected".fmt(f), } } } impl error::Error for RecvTimeoutError {} impl From for RecvTimeoutError { fn from(err: RecvError) -> Self { match err { RecvError => Self::Disconnected, } } } impl RecvTimeoutError { /// Returns `true` if the receive operation timed out. pub fn is_timeout(&self) -> bool { matches!(self, Self::Timeout) } /// Returns `true` if the receive operation failed because the channel is disconnected. pub fn is_disconnected(&self) -> bool { matches!(self, Self::Disconnected) } } ================================================ FILE: src/crossbeam/mod.rs ================================================ pub mod array_queue; pub mod array_queue_mpsc; pub mod array_queue_spsc; pub mod err; pub mod seg_queue; ================================================ FILE: src/crossbeam/seg_queue.rs ================================================ //! Modify by frostyplanet@gmail.com for the crossfire crate: //! //! - Modify for select according to crossbeam-channel, but without disconnect mark bit //! //! Fork from crossbeam-queue crate commit 5a154def002304814d50f3c7658bd30eb46b2fad //! //! The MIT License (MIT) //! //! Copyright (c) 2026 frostyplanet@gmail.com //! //! Copyright (c) 2019 The Crossbeam Project Developers //! //! Permission is hereby granted, free of charge, to any //! person obtaining a copy of this software and associated //! documentation files (the "Software"), to deal in the //! Software without restriction, including without //! limitation the rights to use, copy, modify, merge, //! publish, distribute, sublicense, and/or sell copies of //! the Software, and to permit persons to whom the Software //! is furnished to do so, subject to the following //! conditions: //! //! The above copyright notice and this permission notice //! shall be included in all copies or substantial portions //! of the Software. //! //! THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF //! ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED //! TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A //! PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT //! SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY //! CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION //! OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR //! IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER //! DEALINGS IN THE SOFTWARE. // use crate::flavor::Token; use core::cell::UnsafeCell; use core::fmt; use core::marker::PhantomData; use core::mem::MaybeUninit; use core::panic::{RefUnwindSafe, UnwindSafe}; use core::ptr; use core::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering}; use crossbeam_utils::{Backoff, CachePadded}; use std::alloc::{alloc_zeroed, handle_alloc_error, Layout}; use std::boxed::Box; // Bits indicating the state of a slot: // * If a value has been written into the slot, `WRITE` is set. // * If a value has been read from the slot, `READ` is set. // * If the block is being destroyed, `DESTROY` is set. const WRITE: usize = 1; const READ: usize = 2; const DESTROY: usize = 4; // Each block covers one "lap" of indices. const LAP: usize = 32; // The maximum number of values a block can hold. const BLOCK_CAP: usize = LAP - 1; // How many lower bits are reserved for metadata. const SHIFT: usize = 1; // Indicates that the block is not the last one. const HAS_NEXT: usize = 1; /// A slot in a block. struct Slot { /// The value. value: UnsafeCell>, /// The state of the slot. state: AtomicUsize, } impl Slot { /// Waits until a value is written into the slot. fn wait_write(&self) { let backoff = Backoff::new(); while self.state.load(Ordering::Acquire) & WRITE == 0 { backoff.snooze(); } } } /// A block in a linked list. /// /// Each block in the list can hold up to `BLOCK_CAP` values. struct Block { /// The next block in the linked list. next: AtomicPtr>, /// Slots for values. slots: [Slot; BLOCK_CAP], } impl Block { const LAYOUT: Layout = { let layout = Layout::new::(); assert!( layout.size() != 0, "Block should never be zero-sized, as it has an AtomicPtr field" ); layout }; /// Creates an empty block. fn new() -> Box { // SAFETY: layout is not zero-sized let ptr = unsafe { alloc_zeroed(Self::LAYOUT) }; // Handle allocation failure if ptr.is_null() { handle_alloc_error(Self::LAYOUT) } // SAFETY: This is safe because: // [1] `Block::next` (AtomicPtr) may be safely zero initialized. // [2] `Block::slots` (Array) may be safely zero initialized because of [3, 4]. // [3] `Slot::value` (UnsafeCell) may be safely zero initialized because it // holds a MaybeUninit. // [4] `Slot::state` (AtomicUsize) may be safely zero initialized. // TODO: unsafe { Box::new_zeroed().assume_init() } unsafe { Box::from_raw(ptr.cast()) } } /// Waits until the next pointer is set. fn wait_next(&self) -> *mut Self { let backoff = Backoff::new(); loop { let next = self.next.load(Ordering::Acquire); if !next.is_null() { return next; } backoff.snooze(); } } /// Sets the `DESTROY` bit in slots starting from `start` and destroys the block. unsafe fn destroy(this: *mut Self, start: usize) { // It is not necessary to set the `DESTROY` bit in the last slot because that slot has // begun destruction of the block. for i in start..BLOCK_CAP - 1 { let slot = unsafe { (*this).slots.get_unchecked(i) }; // Mark the `DESTROY` bit if a thread is still using the slot. if slot.state.load(Ordering::Acquire) & READ == 0 && slot.state.fetch_or(DESTROY, Ordering::AcqRel) & READ == 0 { // If a thread is still using the slot, it will continue destruction of the block. return; } } // No thread is using the block, now it is safe to destroy it. drop(unsafe { Box::from_raw(this) }); } } /// A position in a queue. struct Position { /// The index in the queue. index: AtomicUsize, /// The block in the linked list. block: AtomicPtr>, } /// An unbounded multi-producer multi-consumer queue. /// /// This queue is implemented as a linked list of segments, where each segment is a small buffer /// that can hold a handful of elements. There is no limit to how many elements can be in the queue /// at a time. However, since segments need to be dynamically allocated as elements get pushed, /// this queue is somewhat slower than [`ArrayQueue`]. /// /// [`ArrayQueue`]: super::ArrayQueue pub struct SegQueue { /// The head of the queue. head: CachePadded>, /// The tail of the queue. tail: CachePadded>, /// Indicates that dropping a `SegQueue` may drop values of type `T`. _marker: PhantomData, } unsafe impl Send for SegQueue {} unsafe impl Sync for SegQueue {} impl UnwindSafe for SegQueue {} impl RefUnwindSafe for SegQueue {} impl SegQueue { /// Creates a new unbounded queue. pub const fn new() -> Self { Self { head: CachePadded::new(Position { block: AtomicPtr::new(ptr::null_mut()), index: AtomicUsize::new(0), }), tail: CachePadded::new(Position { block: AtomicPtr::new(ptr::null_mut()), index: AtomicUsize::new(0), }), _marker: PhantomData, } } /// Pushes back an element to the tail. #[inline(always)] pub fn push(&self, value: T) { let backoff = Backoff::new(); let mut tail = self.tail.index.load(Ordering::Acquire); let mut block = self.tail.block.load(Ordering::Acquire); let mut next_block = None; loop { // Calculate the offset of the index into the block. let offset = (tail >> SHIFT) % LAP; // If we reached the end of the block, wait until the next one is installed. if offset == BLOCK_CAP { backoff.snooze(); tail = self.tail.index.load(Ordering::Acquire); block = self.tail.block.load(Ordering::Acquire); continue; } // If we're going to have to install the next block, allocate it in advance in order to // make the wait for other threads as short as possible. if offset + 1 == BLOCK_CAP && next_block.is_none() { next_block = Some(Block::::new()); } // If this is the first push operation, we need to allocate the first block. if block.is_null() { let new = Box::into_raw(Block::::new()); if self .tail .block .compare_exchange(block, new, Ordering::Release, Ordering::Relaxed) .is_ok() { self.head.block.store(new, Ordering::Release); block = new; } else { next_block = unsafe { Some(Box::from_raw(new)) }; tail = self.tail.index.load(Ordering::Acquire); block = self.tail.block.load(Ordering::Acquire); continue; } } let new_tail = tail + (1 << SHIFT); // Try advancing the tail forward. match self.tail.index.compare_exchange_weak( tail, new_tail, Ordering::SeqCst, Ordering::Acquire, ) { Ok(_) => unsafe { // If we've reached the end of the block, install the next one. if offset + 1 == BLOCK_CAP { let next_block = Box::into_raw(next_block.unwrap()); let next_index = new_tail.wrapping_add(1 << SHIFT); self.tail.block.store(next_block, Ordering::Release); self.tail.index.store(next_index, Ordering::Release); (*block).next.store(next_block, Ordering::Release); } // Write the value into the slot. let slot = (*block).slots.get_unchecked(offset); slot.value.get().write(MaybeUninit::new(value)); slot.state.fetch_or(WRITE, Ordering::Release); return; }, Err(t) => { tail = t; block = self.tail.block.load(Ordering::Acquire); backoff.spin(); } } } } #[inline(always)] pub fn start_read(&self) -> Option { if let Some((block, offset)) = self._pop::() { Some(Token::new(block as *const u8, offset)) } else { None } } #[inline(always)] pub fn pop(&self) -> Option { if let Some((block, offset)) = self._pop::() { Some(self._read(block, offset)) } else { None } } #[inline(always)] fn _pop(&self) -> Option<(*mut Block, usize)> { let backoff = Backoff::new(); let mut head; if FINAL { head = self.head.index.load(Ordering::SeqCst); let tail = self.tail.index.load(Ordering::SeqCst); if head >> SHIFT == tail >> SHIFT { return None; } } else { head = self.head.index.load(Ordering::Acquire); } let mut block = self.head.block.load(Ordering::Acquire); loop { // Calculate the offset of the index into the block. let offset = (head >> SHIFT) % LAP; // If we reached the end of the block, wait until the next one is installed. if offset == BLOCK_CAP { backoff.snooze(); head = self.head.index.load(Ordering::Acquire); block = self.head.block.load(Ordering::Acquire); continue; } let mut new_head = head + (1 << SHIFT); if new_head & HAS_NEXT == 0 { atomic::fence(Ordering::SeqCst); let tail = self.tail.index.load(Ordering::Relaxed); // If the tail equals the head, that means the queue is empty. if head >> SHIFT == tail >> SHIFT { return None; } // If head and tail are not in the same block, set `HAS_NEXT` in head. if (head >> SHIFT) / LAP != (tail >> SHIFT) / LAP { new_head |= HAS_NEXT; } } // The block can be null here only if the first push operation is in progress. In that // case, just wait until it gets initialized. if block.is_null() { backoff.snooze(); head = self.head.index.load(Ordering::Acquire); block = self.head.block.load(Ordering::Acquire); continue; } // Try moving the head index forward. match self.head.index.compare_exchange_weak( head, new_head, Ordering::SeqCst, Ordering::Acquire, ) { Ok(_) => unsafe { // If we've reached the end of the block, move to the next one. if offset + 1 == BLOCK_CAP { let next = (*block).wait_next(); let mut next_index = (new_head & !HAS_NEXT).wrapping_add(1 << SHIFT); if !(*next).next.load(Ordering::Relaxed).is_null() { next_index |= HAS_NEXT; } self.head.block.store(next, Ordering::Release); self.head.index.store(next_index, Ordering::Release); } return Some((block, offset)); }, Err(h) => { head = h; block = self.head.block.load(Ordering::Acquire); backoff.spin(); } } } } #[inline(always)] pub fn read(&self, token: Token) -> T { let block = token.pos as *mut Block; let offset = token.stamp; self._read(block, offset) } #[inline(always)] fn _read(&self, block: *mut Block, offset: usize) -> T { unsafe { let slot = (*block).slots.get_unchecked(offset); // Read the value. slot.wait_write(); let value = slot.value.get().read().assume_init(); // Destroy the block if we've reached the end, or if another thread wanted to // destroy but couldn't because we were busy reading from the slot. if offset + 1 == BLOCK_CAP { Block::destroy(block, 0); } else if slot.state.fetch_or(READ, Ordering::AcqRel) & DESTROY != 0 { Block::destroy(block, offset + 1); } value } } /// Returns `true` if the queue is empty. #[inline(always)] pub fn is_empty(&self) -> bool { let head = self.head.index.load(Ordering::SeqCst); let tail = self.tail.index.load(Ordering::SeqCst); head >> SHIFT == tail >> SHIFT } /// Returns the number of elements in the queue. pub fn len(&self) -> usize { loop { // Load the tail index, then load the head index. let mut tail = self.tail.index.load(Ordering::SeqCst); let mut head = self.head.index.load(Ordering::SeqCst); // If the tail index didn't change, we've got consistent indices to work with. if self.tail.index.load(Ordering::SeqCst) == tail { // Erase the lower bits. tail &= !((1 << SHIFT) - 1); head &= !((1 << SHIFT) - 1); // Fix up indices if they fall onto block ends. if (tail >> SHIFT) & (LAP - 1) == LAP - 1 { tail = tail.wrapping_add(1 << SHIFT); } if (head >> SHIFT) & (LAP - 1) == LAP - 1 { head = head.wrapping_add(1 << SHIFT); } // Rotate indices so that head falls into the first block. let lap = (head >> SHIFT) / LAP; tail = tail.wrapping_sub((lap * LAP) << SHIFT); head = head.wrapping_sub((lap * LAP) << SHIFT); // Remove the lower bits. tail >>= SHIFT; head >>= SHIFT; // Return the difference minus the number of blocks between tail and head. return tail - head - tail / LAP; } } } } impl Drop for SegQueue { #[inline] fn drop(&mut self) { let mut head = *self.head.index.get_mut(); let mut tail = *self.tail.index.get_mut(); let mut block = *self.head.block.get_mut(); // Erase the lower bits. head &= !((1 << SHIFT) - 1); tail &= !((1 << SHIFT) - 1); unsafe { // Drop all values between `head` and `tail` and deallocate the heap-allocated blocks. while head != tail { let offset = (head >> SHIFT) % LAP; if offset < BLOCK_CAP { // Drop the value in the slot. let slot = (*block).slots.get_unchecked(offset); (*slot.value.get()).assume_init_drop(); } else { // Deallocate the block and move to the next one. let next = *(*block).next.get_mut(); drop(Box::from_raw(block)); block = next; } head = head.wrapping_add(1 << SHIFT); } // Deallocate the last remaining block. if !block.is_null() { drop(Box::from_raw(block)); } } } } impl fmt::Debug for SegQueue { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.pad("SegQueue { .. }") } } impl Default for SegQueue { fn default() -> Self { Self::new() } } impl IntoIterator for SegQueue { type Item = T; type IntoIter = IntoIter; fn into_iter(self) -> Self::IntoIter { IntoIter { value: self } } } #[derive(Debug)] pub struct IntoIter { value: SegQueue, } impl Iterator for IntoIter { type Item = T; fn next(&mut self) -> Option { let value = &mut self.value; let head = *value.head.index.get_mut(); let tail = *value.tail.index.get_mut(); if head >> SHIFT == tail >> SHIFT { None } else { let block = *value.head.block.get_mut(); let offset = (head >> SHIFT) % LAP; // SAFETY: We have mutable access to this, so we can read without // worrying about concurrency. Furthermore, we know this is // initialized because it is the value pointed at by `value.head` // and this is a non-empty queue. let item = unsafe { let slot = (*block).slots.get_unchecked(offset); slot.value.get().read().assume_init() }; if offset + 1 == BLOCK_CAP { // Deallocate the block and move to the next one. // SAFETY: The block is initialized because we've been reading // from it this entire time. We can drop it b/c everything has // been read out of it, so nothing is pointing to it anymore. unsafe { let next = *(*block).next.get_mut(); drop(Box::from_raw(block)); *value.head.block.get_mut() = next; } // The last value in a block is empty, so skip it *value.head.index.get_mut() = head.wrapping_add(2 << SHIFT); // Double-check that we're pointing to the first item in a block. debug_assert_eq!((*value.head.index.get_mut() >> SHIFT) % LAP, 0); } else { *value.head.index.get_mut() = head.wrapping_add(1 << SHIFT); } Some(item) } } } ================================================ FILE: src/flavor/array.rs ================================================ use super::{FlavorBounded, FlavorImpl, FlavorSelect, Queue, Token}; use crate::crossbeam::array_queue::ArrayQueue; use std::mem::MaybeUninit; /// Which Equals to crossbeam_queue::ArrayQueue pub type Array = _Array; pub struct _Array(ArrayQueue); impl _Array { pub fn new(mut bound: usize) -> Self { assert!(bound <= u32::MAX as usize); if bound == 0 { bound = 1; } Self(ArrayQueue::::new(bound)) } } impl Queue for _Array { type Item = T; #[inline(always)] fn pop(&self) -> Option { self.0.pop(true) } #[inline(always)] fn push(&self, item: T) -> Result<(), T> { let _item = MaybeUninit::new(item); if unsafe { self.0.push_with_ptr(_item.as_ptr()) } { Ok(()) } else { Err(unsafe { _item.assume_init_read() }) } } #[inline(always)] fn is_full(&self) -> bool { self.0.is_full() } #[inline(always)] fn is_empty(&self) -> bool { self.0.is_empty() } #[inline(always)] fn len(&self) -> usize { self.0.len() } #[inline(always)] fn capacity(&self) -> Option { Some(self.0.capacity()) } } impl FlavorImpl for _Array { #[inline(always)] fn try_send(&self, item: &MaybeUninit) -> bool { unsafe { self.0.push_with_ptr(item.as_ptr()) } } #[inline(always)] fn try_send_oneshot(&self, item: *const T) -> Option { unsafe { self.0.try_push_oneshot(item) } } #[inline] fn try_recv(&self) -> Option { self.0.pop(false) } #[inline] fn try_recv_final(&self) -> Option { self.0.pop(true) } #[inline] fn backoff_limit(&self) -> u16 { if self.0.capacity() > 10 { crate::backoff::DEFAULT_LIMIT } else { #[cfg(target_arch = "x86_64")] { crate::backoff::DEFAULT_LIMIT } #[cfg(not(target_arch = "x86_64"))] { crate::backoff::MAX_LIMIT } } } #[inline] fn may_direct_copy(&self) -> bool { if MP { true } else { // sender has no CAS, not safe to direct copy false } } } impl FlavorSelect for _Array { #[inline] fn try_select(&self, final_check: bool) -> Option { self.0.start_read(final_check) } #[inline(always)] fn read_with_token(&self, token: Token) -> T { self.0.read(token) } } impl FlavorBounded for _Array { #[inline(always)] fn new_with_bound(size: usize) -> Self { Self::new(size) } } ================================================ FILE: src/flavor/array_mpsc.rs ================================================ use super::{FlavorBounded, FlavorImpl, FlavorSelect, Queue, Token}; use crate::crossbeam::array_queue_mpsc::ArrayQueueMpsc; use std::mem::MaybeUninit; /// Simplified ArrayQueue tweaks for MPSC /// /// Push and pop fast path reduced one atomic ops compared to its MPMC version (only 3 ops instead /// of 4), /// and it's faster to detect the empty / full condition (2 ops instead of 3). pub struct ArrayMpsc(ArrayQueueMpsc); impl ArrayMpsc { pub fn new(mut bound: usize) -> Self { assert!(bound <= u32::MAX as usize); if bound == 0 { bound = 1; } Self(ArrayQueueMpsc::::new(bound)) } } impl Queue for ArrayMpsc { type Item = T; #[inline(always)] fn pop(&self) -> Option { self.0.pop(true) } #[inline(always)] fn push(&self, item: T) -> Result<(), T> { let _item = MaybeUninit::new(item); if unsafe { self.0.push_with_ptr(_item.as_ptr()) } { Ok(()) } else { Err(unsafe { _item.assume_init_read() }) } } #[inline(always)] fn is_full(&self) -> bool { self.0.is_full() } #[inline(always)] fn is_empty(&self) -> bool { self.0.is_empty() } #[inline(always)] fn len(&self) -> usize { self.0.len() } #[inline(always)] fn capacity(&self) -> Option { Some(self.0.capacity()) } } impl FlavorImpl for ArrayMpsc { #[inline(always)] fn try_send(&self, item: &MaybeUninit) -> bool { unsafe { self.0.push_with_ptr(item.as_ptr()) } } #[inline(always)] fn try_send_oneshot(&self, item: *const T) -> Option { unsafe { self.0.try_push_oneshot(item) } } #[inline(always)] fn try_recv_cached(&self) -> Option { self.0.pop_cached() } #[inline] fn try_recv(&self) -> Option { self.0.pop(false) } #[inline] fn try_recv_final(&self) -> Option { self.0.pop(true) } #[inline] fn backoff_limit(&self) -> u16 { if self.0.capacity() > 10 { crate::backoff::DEFAULT_LIMIT } else { #[cfg(target_arch = "x86_64")] { crate::backoff::DEFAULT_LIMIT } #[cfg(not(target_arch = "x86_64"))] { crate::backoff::MAX_LIMIT } } } #[inline] fn may_direct_copy(&self) -> bool { true } } impl FlavorSelect for ArrayMpsc { #[inline] fn try_select(&self, final_check: bool) -> Option { self.0.start_read(final_check) } #[inline(always)] fn read_with_token(&self, token: Token) -> T { self.0.read(token) } } impl FlavorBounded for ArrayMpsc { #[inline(always)] fn new_with_bound(size: usize) -> Self { Self::new(size) } } ================================================ FILE: src/flavor/array_spsc.rs ================================================ use super::{FlavorBounded, FlavorImpl, FlavorSelect, Queue, Token}; use crate::crossbeam::array_queue_spsc::ArrayQueueSpsc; use std::mem::MaybeUninit; /// Ultra light-weight bounded SPSC /// /// which derives from ArrayQueue, but without stamp. /// With only two atomics for cache affinity, the fastpath only require two ops to one atomic. /// pub struct ArraySpsc(ArrayQueueSpsc); impl ArraySpsc { pub fn new(mut bound: usize) -> Self { assert!(bound <= u32::MAX as usize); if bound == 0 { bound = 1; } Self(ArrayQueueSpsc::::new(bound)) } } impl Queue for ArraySpsc { type Item = T; #[inline(always)] fn pop(&self) -> Option { self.0.pop(true) } #[inline(always)] fn push(&self, item: T) -> Result<(), T> { let _item = MaybeUninit::new(item); if unsafe { self.0.push_with_ptr_final(_item.as_ptr()) } { Ok(()) } else { Err(unsafe { _item.assume_init_read() }) } } #[inline(always)] fn is_full(&self) -> bool { self.0.is_full() } #[inline(always)] fn is_empty(&self) -> bool { self.0.is_empty() } #[inline(always)] fn len(&self) -> usize { self.0.len() } #[inline(always)] fn capacity(&self) -> Option { Some(self.0.capacity()) } } impl FlavorImpl for ArraySpsc { #[inline(always)] fn try_send(&self, item: &MaybeUninit) -> bool { unsafe { self.0.push_with_ptr(item.as_ptr()) } } #[inline(always)] fn try_send_oneshot(&self, item: *const T) -> Option { Some(unsafe { self.0.push_with_ptr_final(item) }) } #[inline] fn try_recv_cached(&self) -> Option { self.0.pop_cached() } #[inline] fn try_recv(&self) -> Option { self.0.pop(false) } #[inline] fn try_recv_final(&self) -> Option { self.0.pop(true) } #[inline] fn backoff_limit(&self) -> u16 { crate::backoff::MAX_LIMIT } #[inline] fn may_direct_copy(&self) -> bool { // NOTE: // The spsc is not safe for direct copy, // because it has no cas, consumer cannot touch the producers pointer false } } impl FlavorSelect for ArraySpsc { #[inline] fn try_select(&self, final_check: bool) -> Option { self.0.start_read(final_check) } #[inline(always)] fn read_with_token(&self, token: Token) -> T { self.0.read(token) } } impl FlavorBounded for ArraySpsc { #[inline(always)] fn new_with_bound(size: usize) -> Self { Self::new(size) } } ================================================ FILE: src/flavor/list.rs ================================================ use super::{FlavorImpl, FlavorNew, FlavorSelect, Queue, Token}; use crate::crossbeam::seg_queue::SegQueue; use std::mem::MaybeUninit; /// Which equals to crossbeam_queue::SeqQueue pub struct List(SegQueue); impl List { #[inline(always)] pub fn new() -> Self { Self(SegQueue::::new()) } } impl Queue for List { type Item = T; #[inline(always)] fn pop(&self) -> Option { self.0.pop::() } #[inline(always)] fn push(&self, item: T) -> Result<(), T> { self.0.push(item); Ok(()) } #[inline(always)] fn len(&self) -> usize { self.0.len() } #[inline(always)] fn capacity(&self) -> Option { None } #[inline(always)] fn is_full(&self) -> bool { false } #[inline(always)] fn is_empty(&self) -> bool { self.0.is_empty() } } impl FlavorImpl for List { #[inline(always)] fn try_send(&self, item: &MaybeUninit) -> bool { self.0.push(unsafe { item.assume_init_read() }); true } #[inline] fn try_recv(&self) -> Option { self.0.pop::() } #[inline] fn try_recv_final(&self) -> Option { self.0.pop::() } #[inline] fn backoff_limit(&self) -> u16 { crate::backoff::DEFAULT_LIMIT } #[inline] fn may_direct_copy(&self) -> bool { false } } impl FlavorNew for List { #[inline] fn new() -> Self { List::new() } } impl FlavorSelect for List { #[inline] fn try_select(&self, final_check: bool) -> Option { if final_check && self.0.is_empty() { return None; } self.0.start_read() } #[inline(always)] fn read_with_token(&self, token: Token) -> T { self.0.read(token) } } ================================================ FILE: src/flavor/mod.rs ================================================ use crate::waker_registry::*; use std::marker::PhantomData; use std::mem::MaybeUninit; use std::ops::Deref; pub mod array; pub use array::Array; mod array_mpsc; pub use array_mpsc::ArrayMpsc; mod array_spsc; pub use array_spsc::ArraySpsc; mod list; pub use list::*; mod one; pub use one::*; mod one_mpsc; pub use one_mpsc::OneMpsc; mod one_spmc; pub use one_spmc::OneSpsc; /// Essential struct for select and read interface pub(crate) struct Token { pub(crate) pos: *const u8, pub(crate) stamp: usize, } impl Token { #[inline] pub(crate) fn new(pos: *const u8, stamp: usize) -> Self { Self { pos, stamp } } } impl Default for Token { #[inline] fn default() -> Self { Self { pos: std::ptr::null_mut(), stamp: 0 } } } // The queue trait should be public because AsyncStream, AsyncRx ... all use it's associate type `Item` /// Trait for lockless queue, it's safe to use if you don't want the channel mechanisms pub trait Queue { type Item; fn pop(&self) -> Option; fn push(&self, item: Self::Item) -> Result<(), Self::Item>; fn len(&self) -> usize; fn capacity(&self) -> Option; fn is_full(&self) -> bool; fn is_empty(&self) -> bool; } /// Internal flavor interface pub(crate) trait FlavorImpl: Queue { fn try_send(&self, item: &MaybeUninit) -> bool; #[inline] fn try_send_oneshot(&self, _item: *const Self::Item) -> Option { unimplemented!() } /// For multiplex, only using cached value /// /// (without spin and loading sender value) #[inline] fn try_recv_cached(&self) -> Option { self.try_recv() } fn try_recv(&self) -> Option; fn try_recv_final(&self) -> Option; fn backoff_limit(&self) -> u16; #[inline(always)] fn may_direct_copy(&self) -> bool { false } } pub(crate) trait FlavorSelect: Queue { /// Note: this is internal function, it does not check if the token has other result fn try_select(&self, final_check: bool) -> Option; /// Note: this is internal function, it does not check if the token is valid fn read_with_token(&self, token: Token) -> Self::Item; } // because enum_dispatch does not support associate type macro_rules! queue_dispatch { ($wrap_method: ident)=>{ #[inline(always)] fn pop(&self) -> Option { $wrap_method!(self, pop) } #[inline(always)] fn push(&self, item: Self::Item) -> Result<(), Self::Item> { $wrap_method!(self, push item) } #[inline(always)] fn len(&self) -> usize { $wrap_method!(self, len) } #[inline(always)] fn capacity(&self) -> Option { $wrap_method!(self, capacity) } #[inline(always)] fn is_full(&self) -> bool { $wrap_method!(self, is_full) } #[inline(always)] fn is_empty(&self) -> bool { $wrap_method!(self, is_empty) } }; } pub(super) use queue_dispatch; // because enum_dispatch does not support associate type macro_rules! flavor_dispatch { ($wrap_method: ident)=>{ #[inline(always)] fn try_send(&self, item: &MaybeUninit) -> bool { $wrap_method!(self, try_send item) } #[inline] fn try_send_oneshot(&self, _item: *const Self::Item) -> Option { $wrap_method!(self, try_send_oneshot _item) } #[inline(always)] fn try_recv_cached(&self) -> Option { $wrap_method!(self, try_recv_cached) } #[inline(always)] fn try_recv(&self) -> Option { $wrap_method!(self, try_recv) } #[inline(always)] fn try_recv_final(&self) -> Option { $wrap_method!(self, try_recv_final) } #[inline(always)] fn backoff_limit(&self) -> u16 { $wrap_method!(self, backoff_limit) } #[inline(always)] fn may_direct_copy(&self) -> bool { $wrap_method!(self, may_direct_copy) } }; } pub(super) use flavor_dispatch; // because enum_dispatch does not support associate type macro_rules! flavor_select_dispatch { ($wrap_method: ident) => { #[inline(always)] fn try_select(&self, final_check: bool) -> Option { $wrap_method!(self, try_select final_check) } #[inline(always)] fn read_with_token(&self, token: Token) -> Self::Item { $wrap_method!(self, read_with_token token) } }; } #[allow(unused_imports)] pub(super) use flavor_select_dispatch; pub trait Flavor: Send + 'static + FlavorImpl { type Send: RegistrySend; type Recv: RegistryRecv; } pub trait FlavorMP {} pub trait FlavorMC {} pub trait FlavorNew { fn new() -> Self; } pub trait FlavorBounded { fn new_with_bound(size: usize) -> Self; } /// A type wrapper for channel flavor pub struct FlavorWrap { inner: F, _phan: PhantomData, } /// break evaluation overflow of F unsafe impl Send for FlavorWrap {} impl FlavorWrap where F: FlavorImpl, S: RegistrySend, R: RegistryRecv, { #[inline(always)] pub fn new() -> Self where F: FlavorNew, { Self::from_inner(::new()) } #[inline(always)] pub(crate) fn from_inner(f: F) -> Self { Self { inner: f, _phan: Default::default() } } } impl FlavorNew for FlavorWrap where F: FlavorImpl + FlavorNew, S: RegistrySend, R: RegistryRecv, { #[inline(always)] fn new() -> Self { Self::from_inner(::new()) } } impl FlavorBounded for FlavorWrap where F: FlavorImpl + FlavorBounded, S: RegistrySend, R: RegistryRecv, { #[inline(always)] fn new_with_bound(size: usize) -> Self { Self::from_inner(::new_with_bound(size)) } } impl Flavor for FlavorWrap where F: FlavorImpl + 'static, S: RegistrySend, R: RegistryRecv, { type Send = S; type Recv = R; } impl Deref for FlavorWrap where F: FlavorImpl, S: RegistrySend, R: RegistryRecv, { type Target = F; #[inline(always)] fn deref(&self) -> &F { &self.inner } } impl FlavorMP for FlavorWrap where F: FlavorImpl, R: RegistryRecv, { } impl FlavorMP for FlavorWrap, R> where F: FlavorImpl, R: RegistryRecv, { } impl FlavorMC for FlavorWrap {} macro_rules! wrap_new_type { ($self: expr, $method:ident $($arg:expr)*)=>{ $self.inner.$method($($arg)*) }; } impl Queue for FlavorWrap where F: FlavorImpl, S: RegistrySend, R: RegistryRecv, { type Item = F::Item; queue_dispatch!(wrap_new_type); } impl FlavorImpl for FlavorWrap where F: FlavorImpl, S: RegistrySend, R: RegistryRecv, { flavor_dispatch!(wrap_new_type); } impl FlavorSelect for FlavorWrap where F: FlavorImpl + FlavorSelect, S: RegistrySend, R: RegistryRecv, { flavor_select_dispatch!(wrap_new_type); } #[cfg(test)] mod tests { use super::*; use std::mem::size_of; #[test] fn print_flavor_size() { // println!("Flavor size {}", size_of::>()); println!("one size {}", size_of::>()); println!("array size {}", size_of::>()); println!("list size {}", size_of::>()); } } ================================================ FILE: src/flavor/one.rs ================================================ use super::{FlavorImpl, FlavorNew, FlavorSelect, Queue, Token}; use crate::backoff::*; use core::cell::UnsafeCell; use core::mem::{needs_drop, MaybeUninit}; use core::ptr; use core::sync::atomic::{ compiler_fence, AtomicU16, AtomicU32, Ordering::{self, Acquire, Relaxed, Release, SeqCst}, }; use crossbeam_utils::CachePadded; /// A simplify ArrayQueue specialized for size=1 /// /// It contains two slots, allow sender and receiver works truly concurrent, /// while the buffer capacity is still 1. /// For one-sized queue, contention are higher than larger ArrayQueue, so it's better to use one atomic, /// which packs head & tail, to reduce the operation cost, and the stamps in the slot are guards to /// access the slot. pub struct One { pos: CachePadded, /// The value in this slot. slots: [Slot; 2], } unsafe impl Sync for One {} unsafe impl Send for One {} impl Queue for One { type Item = T; #[inline(always)] fn pop(&self) -> Option { self._pop(Ordering::SeqCst) } #[inline(always)] fn push(&self, item: T) -> Result<(), T> { let _item = MaybeUninit::new(item); if unsafe { self._try_push(SeqCst, _item.as_ptr(), Acquire).is_ok() } { Ok(()) } else { Err(unsafe { _item.assume_init_read() }) } } #[inline(always)] fn len(&self) -> usize { if self.is_full() { 1 } else { 0 } } #[inline(always)] fn capacity(&self) -> Option { Some(1) } #[inline(always)] fn is_full(&self) -> bool { !self.is_empty() } #[inline(always)] fn is_empty(&self) -> bool { let pos = self.pos.load(SeqCst); let (head, tail) = Self::unpack(pos); head == tail } } impl One { #[inline] pub fn new() -> Self { Self { pos: CachePadded::new(AtomicU32::new(0)), slots: [Slot::init(0), Slot::init(1)] } } #[inline(always)] fn unpack(pos: u32) -> (u16, u16) { let head = (pos >> 16) as u16; let tail = pos as u16; (head, tail) } #[inline(always)] fn pack(head: u16, tail: u16) -> u32 { ((head as u32) << 16) | (tail as u32) } /// return Ok(true) on ok, Ok(false) on full, Err(()) to spin #[inline(always)] unsafe fn _try_push( &self, order: Ordering, value: *const T, failure: Ordering, ) -> Result<(), ()> { let mut pos = self.pos.load(order); compiler_fence(Acquire); loop { let (head, tail) = Self::unpack(pos); if head == tail { let new_pos = Self::pack(head, tail.wrapping_add(1)); match self.pos.compare_exchange_weak(pos, new_pos, SeqCst, failure) { Ok(_) => { let index = tail & 0x1; self.slots[index as usize].write(tail, value); return Ok(()); } Err(_pos) => { pos = _pos; } } } else { return Err(()); } } } #[inline(always)] fn _start_read(&self, order: Ordering) -> Option<(u16, u16)> { let mut pos = self.pos.load(order); compiler_fence(Acquire); loop { let (head, tail) = Self::unpack(pos); if head == tail { return None; } let new_pos = Self::pack(tail, tail); match self.pos.compare_exchange_weak(pos, new_pos, SeqCst, Acquire) { Err(_pos) => { pos = _pos; } Ok(_) => { let index = head & 0x1; return Some((index, tail)); } } } } #[inline(always)] fn _pop(&self, order: Ordering) -> Option { if let Some((index, new_head)) = self._start_read(order) { Some(self.slots[index as usize].read(new_head)) } else { None } } } struct Slot { value: UnsafeCell>, stamp: AtomicU16, } impl Slot { #[inline] fn init(i: u16) -> Self { Self { value: UnsafeCell::new(MaybeUninit::uninit()), stamp: AtomicU16::new(i) } } #[inline(always)] fn write(&self, tail: u16, value: *const T) { let mut stamp = self.stamp.load(Acquire); if stamp != tail { let mut backoff = Backoff::new(); loop { backoff.spin(); stamp = self.stamp.load(Acquire); if stamp == tail { break; } } } unsafe { (*self.value.get()).write(ptr::read(value)) }; self.stamp.store(tail.wrapping_add(1), Release); } #[inline(always)] fn read(&self, head: u16) -> T { let mut stamp = self.stamp.load(Acquire); if stamp != head { let mut backoff = Backoff::new(); loop { backoff.spin(); stamp = self.stamp.load(Acquire); if stamp == head { break; } } } let msg = unsafe { self.value.get().read().assume_init() }; // there might be slow reader, update the stamp to allow writer reuse the slot self.stamp.store(head.wrapping_add(1), Release); msg } #[inline(always)] fn drop(&self) { unsafe { self.value.get().read().assume_init_drop() }; } } impl Drop for One { fn drop(&mut self) { if needs_drop::() { let pos = *self.pos.get_mut(); let (head, tail) = Self::unpack(pos); if head != tail { let index = head & 0x1; self.slots[index as usize].drop(); } } } } impl FlavorImpl for One { #[inline(always)] fn try_send(&self, item: &MaybeUninit) -> bool { // Will always double-check with is_full or try_send_oneshot() unsafe { self._try_push(Relaxed, item.as_ptr(), Relaxed).is_ok() } } #[inline(always)] fn try_send_oneshot(&self, item: *const T) -> Option { Some(unsafe { self._try_push(SeqCst, item, Acquire).is_ok() }) } #[inline(always)] fn try_recv(&self) -> Option { self._pop(Relaxed) } #[inline(always)] fn try_recv_final(&self) -> Option { self._pop(SeqCst) } #[inline] fn backoff_limit(&self) -> u16 { // Due to bound is too small, // yield with MAX_LIMIT to prevent collapse in high contention crate::backoff::MAX_LIMIT } #[inline] fn may_direct_copy(&self) -> bool { true } } impl FlavorNew for One { #[inline] fn new() -> Self { One::new() } } impl FlavorSelect for One { #[inline] fn try_select(&self, final_check: bool) -> Option { if let Some((index, head)) = self._start_read(if final_check { Ordering::SeqCst } else { Ordering::Acquire }) { Some(Token::new( &self.slots[index as usize] as *const Slot as *const u8, head as usize, )) } else { None } } #[inline(always)] fn read_with_token(&self, token: Token) -> T { let slot: &Slot = unsafe { &*token.pos.cast::>() }; slot.read(token.stamp as u16) } } ================================================ FILE: src/flavor/one_mpsc.rs ================================================ use super::{FlavorImpl, FlavorNew, FlavorSelect, Queue, Token}; use crate::backoff::*; use core::cell::UnsafeCell; use core::mem::{needs_drop, MaybeUninit}; use core::ptr; use core::sync::atomic::{ AtomicU16, AtomicU32, Ordering::{self, Acquire, Release, SeqCst}, }; use crossbeam_utils::CachePadded; /// A simplify ArrayQueue specialized for size=1 pub struct OneMpsc { pos: CachePadded, /// The value in this slot. slots: [Slot; 2], } unsafe impl Sync for OneMpsc {} unsafe impl Send for OneMpsc {} impl Queue for OneMpsc { type Item = T; #[inline(always)] fn pop(&self) -> Option { self._pop(Ordering::SeqCst) } #[inline(always)] fn push(&self, item: T) -> Result<(), T> { let _item = MaybeUninit::new(item); if unsafe { self._try_push(SeqCst, _item.as_ptr(), Acquire).is_ok() } { Ok(()) } else { Err(unsafe { _item.assume_init_read() }) } } #[inline(always)] fn len(&self) -> usize { if self.is_full() { 1 } else { 0 } } #[inline(always)] fn capacity(&self) -> Option { Some(1) } #[inline(always)] fn is_full(&self) -> bool { !self.is_empty() } #[inline(always)] fn is_empty(&self) -> bool { let pos = self.pos.load(SeqCst); let (head, tail) = Self::unpack(pos); head == tail } } impl OneMpsc { #[inline] pub fn new() -> Self { Self { pos: CachePadded::new(AtomicU32::new(0)), slots: [Slot::init(0), Slot::init(1)] } } #[inline(always)] fn unpack(pos: u32) -> (u16, u16) { let head = (pos >> 16) as u16; let tail = pos as u16; (head, tail) } #[inline(always)] fn pack(head: u16, tail: u16) -> u32 { ((head as u32) << 16) | (tail as u32) } /// return Ok(true) on ok, Ok(false) on full, Err(()) to spin #[inline(always)] unsafe fn _try_push( &self, order: Ordering, value: *const T, failure: Ordering, ) -> Result<(), ()> { let mut pos = self.pos.load(order); loop { let (head, tail) = Self::unpack(pos); if head == tail { let new_pos = Self::pack(head, tail.wrapping_add(1)); match self.pos.compare_exchange_weak(pos, new_pos, SeqCst, failure) { Ok(_) => { let index = tail & 0x1; self.slots[index as usize].write(tail, value); return Ok(()); } Err(_pos) => { pos = _pos; } } } else { return Err(()); } } } #[inline(always)] fn _start_read(&self, order: Ordering) -> Option<(u16, u16)> { let pos = self.pos.load(order); let (head, tail) = Self::unpack(pos); if head == tail { return None; } let index = head & 0x1; Some((index, tail)) } #[inline(always)] fn _read(&self, slot: &Slot, next_head: u16) -> T { let new_pos = Self::pack(next_head, next_head); // Because we have two slot, the sender will write to next index, // it's safe to update the pos before we read, so that sender may begin to write self.pos.store(new_pos, SeqCst); slot.read(next_head) } #[inline(always)] fn _pop(&self, order: Ordering) -> Option { if let Some((index, new_head)) = self._start_read(order) { Some(self._read(&self.slots[index as usize], new_head)) } else { None } } } struct Slot { value: UnsafeCell>, stamp: CachePadded, } impl Slot { #[inline] fn init(i: u16) -> Self { Self { value: UnsafeCell::new(MaybeUninit::uninit()), stamp: CachePadded::new(AtomicU16::new(i)), } } #[inline(always)] fn write(&self, tail: u16, value: *const T) { unsafe { (*self.value.get()).write(ptr::read(value)) }; self.stamp.store(tail.wrapping_add(1), Release); } #[inline(always)] fn read(&self, head: u16) -> T { let mut stamp = self.stamp.load(Acquire); if stamp != head { let mut backoff = Backoff::new(); loop { backoff.snooze(); stamp = self.stamp.load(Acquire); if stamp == head { break; } } } unsafe { self.value.get().read().assume_init() } } #[inline(always)] fn drop(&self) { unsafe { self.value.get().read().assume_init_drop() }; } } impl Drop for OneMpsc { #[inline(always)] fn drop(&mut self) { if needs_drop::() { let pos = *self.pos.get_mut(); let (head, tail) = Self::unpack(pos); if head != tail { let index = head & 0x1; self.slots[index as usize].drop(); } } } } impl FlavorImpl for OneMpsc { #[inline(always)] fn try_send(&self, item: &MaybeUninit) -> bool { // Will always double-check with is_full or try_send_oneshot() unsafe { self._try_push(Acquire, item.as_ptr(), Acquire).is_ok() } } #[inline(always)] fn try_send_oneshot(&self, item: *const T) -> Option { Some(unsafe { self._try_push(SeqCst, item, Acquire).is_ok() }) } #[inline(always)] fn try_recv(&self) -> Option { self._pop(Acquire) } #[inline(always)] fn try_recv_final(&self) -> Option { self._pop(SeqCst) } #[inline] fn backoff_limit(&self) -> u16 { // Due to bound is too small, // yield with MAX_LIMIT to prevent collapse in high contention crate::backoff::MAX_LIMIT } #[inline] fn may_direct_copy(&self) -> bool { true } } impl FlavorNew for OneMpsc { #[inline] fn new() -> Self { OneMpsc::new() } } impl FlavorSelect for OneMpsc { #[inline] fn try_select(&self, final_check: bool) -> Option { if let Some((index, head)) = self._start_read(if final_check { Ordering::SeqCst } else { Ordering::Acquire }) { Some(Token::new( &self.slots[index as usize] as *const Slot as *const u8, head as usize, )) } else { None } } #[inline(always)] fn read_with_token(&self, token: Token) -> T { let slot: &Slot = unsafe { &*token.pos.cast::>() }; self._read(slot, token.stamp as u16) } } ================================================ FILE: src/flavor/one_spmc.rs ================================================ use super::{FlavorImpl, FlavorNew, FlavorSelect, Queue, Token}; use core::cell::UnsafeCell; use core::mem::{needs_drop, MaybeUninit}; use crossbeam_utils::CachePadded; use std::ptr; use std::sync::atomic::{ AtomicU64, Ordering::{self, Acquire, SeqCst}, }; /// This is a spsc version of `One` without stamp. /// /// The sender side allow to push and drop it's own previous value, if receivers had not consumed it. pub type OneSpsc = OneSp; ///// This is a spmc version of `One` without stamp, allow replace() on the sender side. ///// ///// The sender side allow to push and drop it's own previous value, if receivers had not consumed it. ///// ///// NOTE: use lockless technique inspired by the OFLIT paper, miri will probably report data racing issue, ///// but it's intentional. ///// This module cannot not separate pop into start_read/read interface, ///// so it cannot implement Flavor interface. //type OneSpmc = OneSp; pub struct OneSp { pos: CachePadded, /// The value in this slot. slots: [Slot; 2], } unsafe impl Sync for OneSp {} unsafe impl Send for OneSp {} impl OneSp { #[inline] pub fn new() -> Self { Self { pos: CachePadded::new(AtomicU64::new(0)), slots: [Slot::init(), Slot::init()] } } #[inline(always)] fn unpack(pos: u64) -> (u32, u32) { let head = (pos >> 32) as u32; let tail = pos as u32; (head, tail) } #[inline(always)] fn pack(head: u32, tail: u32) -> u64 { ((head as u64) << 32) | (tail as u64) } #[inline(always)] pub fn is_empty(&self) -> bool { let pos = self.pos.load(SeqCst); let (head, tail) = Self::unpack(pos); head == tail } #[inline(always)] pub fn len(&self) -> usize { if self.is_empty() { 0 } else { 1 } } #[inline] fn try_push(&self, value: *const T, order: Ordering) -> bool { let pos = self.pos.load(order); let (head, tail) = Self::unpack(pos); if head == tail { let new_tail = tail.wrapping_add(1); let index = new_tail & 0x1; self.slots[index as usize].write(value); let new_pos = Self::pack(head, new_tail); self.pos.store(new_pos, Ordering::SeqCst); true } else { false } } } impl Drop for OneSp { fn drop(&mut self) { if needs_drop::() { let pos = *self.pos.get_mut(); let (head, tail) = Self::unpack(pos); if head != tail { let index = tail & 0x1; self.slots[index as usize].drop(); } } } } impl OneSpsc { #[inline(always)] fn _read(&self, slot: &Slot, next_head: u32) -> T { // NOTE: This is only valid for SPSC (not for Spmc) // Because we have two slot, the sender will write to next index, // it's safe to update the pos before we read, so that sender may begin to write let new_pos = Self::pack(next_head, next_head); self.pos.store(new_pos, SeqCst); slot.read() } #[inline(always)] fn _pop(&self, order: Ordering) -> Option { if let Some(tail) = self.start_read(order) { let index = (tail & 0x1) as usize; Some(self._read(&self.slots[index], tail)) } else { None } } #[inline(always)] fn start_read(&self, order: Ordering) -> Option { let pos = self.pos.load(order); let (head, tail) = Self::unpack(pos); if head == tail { None } else { debug_assert_eq!(head.wrapping_add(1), tail); Some(tail) } } } struct Slot { value: UnsafeCell>, } impl Slot { #[inline] fn init() -> Self { Self { value: UnsafeCell::new(MaybeUninit::uninit()) } } #[inline(always)] fn write(&self, value: *const T) { unsafe { (*self.value.get()).write(ptr::read(value)) }; } // #[inline(always)] // fn read_into(&self, dest: *mut T) { // unsafe { // let src_ptr = (*self.value.get()).as_ptr(); // ptr::copy_nonoverlapping(src_ptr, dest, 1); // } // } #[inline(always)] fn read(&self) -> T { unsafe { self.value.get().read().assume_init() } } #[inline(always)] fn drop(&self) { unsafe { self.value.get().read().assume_init_drop() }; } } /* impl OneSpmc { #[inline] pub fn replace(&self, value: T) { let item = MaybeUninit::new(value); self._replace(item.as_ptr()); } /// return Ok(true) on ok, Ok(false) on full, Err(()) to spin #[inline(always)] fn _replace(&self, value: *const T) { // No one will advance tail except me let mut pos = self.pos.load(Acquire); let (mut head, tail) = Self::unpack(pos); let new_tail = tail.wrapping_add(1); let index = new_tail & 0x1; self.slots[index as usize].write(value); loop { if head == tail { let new_pos = Self::pack(head, new_tail); self.pos.store(new_pos, Ordering::SeqCst); return; } else { debug_assert_eq!(head.wrapping_add(1), tail); let new_pos = Self::pack(tail, new_tail); match self.pos.compare_exchange_weak(pos, new_pos, SeqCst, Acquire) { Ok(_) => { let index = tail & 0x1; self.slots[index as usize].drop(); return; } Err(_pos) => { if pos != _pos { pos = _pos; let _tail; (head, _tail) = Self::unpack(_pos); debug_assert_eq!(_tail, tail); } continue; } } } } } #[inline(always)] fn _pop(&self, order: Ordering) -> Option { let mut pos = self.pos.load(order); let mut value_copy: MaybeUninit = MaybeUninit::uninit(); loop { let (head, tail) = Self::unpack(pos); if head == tail { return None; } let index = tail & 0x1; self.slots[index as usize].read_into(value_copy.as_mut_ptr()); debug_assert_eq!(head.wrapping_add(1), tail); let new_pos = Self::pack(tail, tail); match self.pos.compare_exchange_weak(pos, new_pos, SeqCst, order) { Err(_pos) => { // Other might read the value, or send might use replace to cancel the value, // should be cas suc to confirm pos = _pos; } Ok(_) => { return Some(unsafe { value_copy.assume_init_read() }); } } } } } impl Queue for OneSpmc { type Item = T; #[inline(always)] fn len(&self) -> usize { if self.is_empty() { 0 } else { 1 } } #[inline(always)] fn is_empty(&self) -> bool { Self::is_empty(self) } #[inline(always)] fn capacity(&self) -> Option { Some(1) } #[inline(always)] fn is_full(&self) -> bool { !Self::is_empty(self) } #[inline(always)] fn pop(&self) -> Option where T: Send { self._pop(Ordering::SeqCst) } #[inline] fn push(&self, value: T) -> Result<(), T> where T: Send { let item = MaybeUninit::new(value); if self.try_push(item.as_ptr(), Ordering::SeqCst) { Ok(()) } else { Err(unsafe { item.assume_init_read() }) } } } */ impl Queue for OneSpsc { type Item = T; #[inline(always)] fn len(&self) -> usize { if self.is_empty() { 0 } else { 1 } } #[inline(always)] fn is_empty(&self) -> bool { Self::is_empty(self) } #[inline(always)] fn capacity(&self) -> Option { Some(1) } #[inline(always)] fn is_full(&self) -> bool { !Self::is_empty(self) } #[inline(always)] fn pop(&self) -> Option { self._pop(Ordering::SeqCst) } #[inline] fn push(&self, value: T) -> Result<(), T> { let item = MaybeUninit::new(value); if self.try_push(item.as_ptr(), Ordering::SeqCst) { Ok(()) } else { Err(unsafe { item.assume_init_read() }) } } } impl FlavorImpl for OneSpsc { #[inline(always)] fn try_send(&self, item: &MaybeUninit) -> bool { self.try_push(item.as_ptr(), Acquire) } #[inline(always)] fn try_send_oneshot(&self, item: *const T) -> Option { Some(self.try_push(item, SeqCst)) } #[inline(always)] fn try_recv(&self) -> Option { self._pop(Ordering::Acquire) } #[inline] fn try_recv_final(&self) -> Option { self._pop(Ordering::SeqCst) } #[inline] fn backoff_limit(&self) -> u16 { // Due to bound is too small, // yield with MAX_LIMIT to prevent collapse in high contention crate::backoff::MAX_LIMIT } #[inline] fn may_direct_copy(&self) -> bool { // NOTE sender has no CAS, not safe to direct copy false } } impl FlavorNew for OneSpsc { #[inline] fn new() -> Self { OneSpsc::new() } } impl FlavorSelect for OneSpsc { #[inline] fn try_select(&self, final_check: bool) -> Option { if let Some(tail) = self.start_read(if final_check { Ordering::SeqCst } else { Ordering::Acquire }) { let index = (tail & 0x1) as usize; Some(Token::new(&self.slots[index] as *const Slot as *const u8, tail as usize)) } else { None } } #[inline(always)] fn read_with_token(&self, token: Token) -> T { let slot: &Slot = unsafe { &*token.pos.cast::>() }; self._read(slot, token.stamp as u32) } } ================================================ FILE: src/lib.rs ================================================ #![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, allow(unused_attributes))] //! # Crossfire //! //! High-performance lockless spsc/mpsc/mpmc channels, algorithm derives crossbeam with improvements. //! //! It supports async contexts and bridges the gap between async and blocking contexts. //! //! For the concept, please refer to the [wiki](https://github.com/frostyplanet/crossfire-rs/wiki). //! //! ## Version history //! //! * v1.0: Used in production since 2022.12. //! //! * v2.0: [2025.6] Refactored the codebase and API //! by removing generic types from the ChannelShared type, which made it easier to code with. //! //! * v2.1: [2025.9] Removed the dependency on crossbeam-channel //! and implemented with [a modified version of crossbeam-queue](https://github.com/frostyplanet/crossfire-rs/wiki/crossbeam-related), //! brings 2x performance improvements for both async and blocking contexts. //! //! * v3.0: [2026.1] Refactored API back to generic flavor interface, added [select]. //! Dedicated optimization: Bounded SPSC +70%, MPSC +30%, one-size +20%. //! Eliminate enum dispatch cost, async performance improved for another 33%. Checkout [compat] for migiration from v2.x. //! //! ## Test status //! //! Refer to the [README](https://github.com/frostyplanet/crossfire-rs?tab=readme-ov-file#test-status) page for known issue on specified platform and runtime. //! //! ## Performance //! //! Being a lockless channel, crossfire outperforms other async-capable channels. //! And thanks to a lighter notification mechanism, most cases in blocking context are even //! better than the original crossbeam-channel, //! //! benchmark data is posted on [wiki](https://github.com/frostyplanet/crossfire-rs/wiki/benchmark-v3.0.0-2026%E2%80%9001%E2%80%9018). //! //! Also, being a lockless channel, the algorithm relies on spinning and yielding. Spinning is good on //! multi-core systems, but not friendly to single-core systems (like virtual machines). //! So we provide a function [detect_backoff_cfg()] to detect the running platform. //! Calling it within the initialization section of your code, will get a 2x performance boost on //! VPS. //! //! The benchmark is written in the criterion framework. You can run the benchmark by: //! //! ``` shell //! make bench crossfire //! make bench crossfire_select //! ``` //! //! ## APIs //! //! ### Concurrency Modules //! //! - [spsc], [mpsc], [mpmc]. Each has different underlying implementation //! optimized to its concurrent model. //! The SP or SC interface is only for non-concurrent operation. It's more memory-efficient in waker registration, //! and has atomic ops cost reduced in the lockless algorithm. //! //! - [oneshot] has its special sender/receiver type because using `Tx` / `Rx` will be too heavy. //! //! - [select]: //! - [Select<'a>](crate::select::Select): crossbeam-channel style type erased API, borrows receiver address and select with "token" //! - [Multiplex](crate::select::Multiplex): Multiplex stream that owns multiple receiver, select from the same type of //! channel flavors, for the same type of message. //! //! - [waitgroup]: High performance WaitGroup which allows custom threshold //! //! ### Flavors //! //! The following lockless queues are expose in [flavor] module, and each one have type alias in spsc/mpsc/mpmc: //! //! - `List` (which use crossbeam `SegQueue`) //! - `Array` (which is an enum that wraps crossbeam `ArrayQueue`, and a `One` if init with size<=1) //! - For a bounded channel, a 0 size case is not supported yet. (rewrite as 1 size). //! - The implementation for spsc & mpsc is simplified from mpmc version. //! - `One` (which derives from `ArrayQueue` algorithm, but have better performance in size=1 //! scenario, because it have two slots to allow reader and writer works concurrently) //! - `Null` (See the doc [crate::null]), for cancellation purpose channel, that only wakeup on //! closing. //! //! **NOTE** : //! Although the name [Array](crate::mpmc::Array), [List](crate::mpmc::List) are the same between spsc/mpsc/mpmc module, //! they are different type alias local to its parent module. We suggest distinguish by //! namespace when import for use. //! //! ### Channel builder function //! //! Aside from function `bounded_*`, `unbounded_*` which specify the sender / receiver type, //! each module has [build()](crate::mpmc::build()) and [new()](crate::mpmc::new()) function, which can apply to any channel flavors, and any async/blocking combinations. //! //! //! ### Types //! //! //! //! //! //! //! //! //! //! //! //! //! //! //! //! //! //! //! //! //!
ContextSender (Producer) Receiver (Consumer)
Single MultipleSingleMultiple
BlockingBlockingTxTraitBlockingRxTrait
TxMTxRxMRx
Weak referenceWeakTx
AsyncAsyncTxTraitAsyncRxTrait
AsyncTxMAsyncTxAsyncRxMAsyncRx
//! //! *Safety*: For the SP / SC version, [AsyncTx], [AsyncRx], [Tx], and [Rx] are not `Clone` and without `Sync`. //! Although can be moved to other threads, but not allowed to use send/recv while in an Arc. (Refer to the compile_fail //! examples in the type document). //! //! The benefit of using the SP / SC API is completely lockless waker registration, in exchange for a performance boost. //! //! The sender/receiver can use the **`From`** trait to convert between blocking and async context //! counterparts (refer to the [example](#example) below) //! //! ### Error types //! //! Error types are the same as crossbeam-channel: //! //! [TrySendError], [SendError], [SendTimeoutError], [TryRecvError], [RecvError], [RecvTimeoutError] //! //! ### Async compatibility //! //! Tested on tokio-1.x and async-std-1.x, crossfire is runtime-agnostic. //! //! The following scenarios are considered: //! //! * The [AsyncTx::send()] and [AsyncRx::recv()] operations are **cancellation-safe** in an async context. //! You can safely use the select! macro and timeout() function in tokio/futures in combination with recv(). //! On cancellation, [SendFuture] and [RecvFuture] will trigger drop(), which will clean up the state of the waker, //! making sure there is no memory-leak and deadlock. //! But you cannot know the true result from SendFuture, since it's dropped //! upon cancellation. Thus, we suggest using [AsyncTx::send_timeout()] instead. //! //! * When the "tokio" or "async_std" feature is enabled, we also provide two additional functions: //! //! - [send_timeout()](crate::AsyncTx::send_timeout()), which will return the message that failed to be sent in //! [SendTimeoutError]. We guarantee the result is atomic. Alternatively, you can use //! [send_with_timer()](crate::AsyncTx::send_with_timer()). //! //! - [recv_timeout()](crate::AsyncRx::recv_timeout()), we guarantee the result is atomic. //! Alternatively, you can use [recv_with_timer()](crate::AsyncRx::recv_with_timer()) //! //! * The waker footprint: //! //! When using a multi-producer and multi-consumer scenario, there's a small memory overhead to pass along a `Weak` //! reference of wakers. //! Because we aim to be lockless, when the sending/receiving futures are canceled (like tokio::time::timeout()), //! it might trigger an immediate cleanup if the try-lock is successful, otherwise will rely on lazy cleanup. //! (This won't be an issue because weak wakers will be consumed by actual message send and recv). //! On an idle-select scenario, like a notification for close, the waker will be reused as much as possible //! if poll() returns pending. //! //! * Handle written future: //! //! The future object created by [AsyncTx::send()], [AsyncTx::send_timeout()], [AsyncRx::recv()], //! [AsyncRx::recv_timeout()] is `Sized`. You don't need to put them in `Box`. //! //! If you like to use poll function directly for complex behavior, you can call //! [AsyncSink::poll_send()](crate::sink::AsyncSink::poll_send()) or [AsyncStream::poll_item()](crate::stream::AsyncStream::poll_item()) with Context. //! //! ## Usage //! //! Cargo.toml: //! ```toml //! [dependencies] //! crossfire = "3.1" //! ``` //! //! ### Feature flags //! //! * `compat`: Enable the [compat] model, which has the same API namespace struct as V2.x //! //! * `tokio`: Enable [send_timeout](crate::AsyncTx::send_timeout()), [recv_timeout](crate::AsyncRx::recv_timeout()) with tokio sleep function. (conflict //! with `async_std` feature) //! //! * `async_std`: Enable send_timeout, recv_timeout with async-std sleep function. (conflict //! with `tokio` feature) //! //! * `trace_log`: Development mode, to enable internal log while testing or benchmark, to debug deadlock issues. //! //! ### Example //! //! blocking / async sender receiver mixed together //! //! ```rust //! //! extern crate crossfire; //! use crossfire::*; //! #[macro_use] //! extern crate tokio; //! use tokio::time::{sleep, interval, Duration}; //! //! #[tokio::main] //! async fn main() { //! let (tx, rx) = mpmc::bounded_async::(100); //! let mut recv_counter = 0; //! let mut co_tx = Vec::new(); //! let mut co_rx = Vec::new(); //! const ROUND: usize = 1000; //! //! let _tx: MTx> = tx.clone().into_blocking(); //! co_tx.push(tokio::task::spawn_blocking(move || { //! for i in 0..ROUND { //! _tx.send(i).expect("send ok"); //! } //! })); //! co_tx.push(tokio::spawn(async move { //! for i in 0..ROUND { //! tx.send(i).await.expect("send ok"); //! } //! })); //! let _rx: MRx> = rx.clone().into_blocking(); //! co_rx.push(tokio::task::spawn_blocking(move || { //! let mut count: usize = 0; //! 'A: loop { //! match _rx.recv() { //! Ok(_i) => { //! count += 1; //! } //! Err(_) => break 'A, //! } //! } //! count //! })); //! co_rx.push(tokio::spawn(async move { //! let mut count: usize = 0; //! 'A: loop { //! match rx.recv().await { //! Ok(_i) => { //! count += 1; //! } //! Err(_) => break 'A, //! } //! } //! count //! })); //! for th in co_tx { //! let _ = th.await.unwrap(); //! } //! for th in co_rx { //! recv_counter += th.await.unwrap(); //! } //! assert_eq!(recv_counter, ROUND * 2); //! } //! ``` #[allow(private_bounds)] /// lockless queue implementation and channel flavor traits pub mod flavor; mod shared; pub use shared::ChannelShared; mod backoff; pub use backoff::detect_backoff_cfg; #[allow(dead_code)] mod collections; #[allow(dead_code)] mod waker; #[allow(private_bounds)] mod waker_registry; pub mod mpmc; pub mod mpsc; pub mod oneshot; pub mod spsc; pub mod waitgroup; mod blocking_tx; pub use blocking_tx::*; #[allow(private_bounds)] mod blocking_rx; pub use blocking_rx::*; mod async_tx; pub use async_tx::*; #[allow(private_bounds)] mod async_rx; pub use async_rx::*; mod weak; pub use weak::WeakTx; #[cfg(feature = "compat")] pub mod compat; pub mod null; pub mod sink; pub mod stream; mod crossbeam; pub use crossbeam::err::*; #[allow(private_bounds)] pub mod select; /// logging macro for development #[macro_export(local_inner_macros)] macro_rules! trace_log { ($($arg:tt)+)=>{ #[cfg(feature="trace_log")] { log::debug!($($arg)+); } }; } /// logging macro for development under tokio #[macro_export(local_inner_macros)] macro_rules! tokio_task_id { () => {{ #[cfg(all(feature = "trace_log", feature = "tokio"))] { tokio::task::try_id() } #[cfg(not(all(feature = "trace_log", feature = "tokio")))] { "" } }}; } use flavor::Flavor; use std::sync::Arc; /// type limiter for channel builder pub trait SenderType { type Flavor: Flavor; fn new(shared: Arc>) -> Self; } /// type limiter for channel builder pub trait ReceiverType: AsRef> { type Flavor: Flavor; fn new(shared: Arc>) -> Self; } pub trait NotCloneable {} ================================================ FILE: src/mpmc.rs ================================================ //! Multiple producers, multiple consumers. //! //! The optimization assumes multiple consumers. The waker registration of the receiver is less efficient compared to `mpsc`. //! //! **NOTE**: For the MC (multiple consumer) version, [MAsyncTx], [MAsyncRx], [MTx] and [MRx] are `Clone` and implement `Sync`. //! They can be safely used with `send`/`recv` while in an `Arc`. //! //! # Examples //! //! ``` //! use crossfire::*; //! use std::thread; //! //! struct Worker { //! tx: MAsyncTx>, //! } //! //! impl Worker { //! pub fn new() -> Self { //! // use type hint //! let (tx, rx): (MAsyncTx<_>, MRx<_>) = mpmc::build(mpmc::Array::::new(100)); //! // equals to //! // let (tx, rx): (MAsyncTx<_>, MRx<_>) = mpmc::bounded_blocking::(100); //! for _ in 0..4 { //! let _rx = rx.clone(); //! thread::spawn(move || { //! match _rx.recv() { //! Ok(item)=>{ //! println!("recv job {}", item); //! } //! Err(_)=>return, //! } //! }); //! } //! Self{ //! tx, //! } //! } //! pub async fn submit(&self, msg: usize) { //! self.tx.send(msg).await.expect("send"); //! } //! } //! ``` use crate::async_rx::*; use crate::async_tx::*; use crate::blocking_rx::*; use crate::blocking_tx::*; use crate::flavor::{ flavor_dispatch, flavor_select_dispatch, queue_dispatch, Flavor, FlavorBounded, FlavorImpl, FlavorMC, FlavorMP, FlavorNew, FlavorWrap, Queue, }; use crate::null::CloseHandle; use crate::shared::*; use crate::{ReceiverType, SenderType}; use std::mem::MaybeUninit; /// Flavor Type for unbounded MPMC channel pub type List = FlavorWrap, RegistryDummy, RegistryMultiRecv>; /// Flavor Type for one-sized MPMC channel pub type One = FlavorWrap, RegistryMultiSend, RegistryMultiRecv>; /// Flavor Type for bounded MPMC channel #[allow(clippy::large_enum_variant)] pub enum Array { Array(crate::flavor::Array), One(crate::flavor::One), } impl Array { #[inline] pub fn new(size: usize) -> Self { if size <= 1 { Self::One(crate::flavor::One::new()) } else { Self::Array(crate::flavor::Array::::new(size)) } } } impl FlavorMP for Array {} impl FlavorMC for Array {} macro_rules! wrap_array { ($self: expr, $method:ident $($arg:expr)*)=>{ match $self { Self::Array(inner) => inner.$method($($arg)*), Self::One(inner) => inner.$method($($arg)*), } }; } impl Queue for Array { type Item = T; queue_dispatch!(wrap_array); } impl FlavorImpl for Array { flavor_dispatch!(wrap_array); } impl FlavorSelect for Array { flavor_select_dispatch!(wrap_array); } impl FlavorBounded for Array { #[inline(always)] fn new_with_bound(size: usize) -> Self { Self::new(size) } } impl Flavor for Array { type Send = RegistryMultiSend; type Recv = RegistryMultiRecv; } /// The generic builder for all mpmc channel types with a new method (except Array). /// /// Initialize sender and receiver types from a flavor type, /// you can let the compiler to infer the type according to return type signature. /// (the falvor might have different new() method, but the rest is the same. /// # Examples /// /// ```rust /// use crossfire::*; /// let (tx, rx): (MTx<_>, MRx<_>) = mpmc::new::, _, _>(); /// let (tx, rx): (MAsyncTx>, MRx>) = mpmc::new(); /// ``` #[inline(always)] pub fn new() -> (S, R) where F: Flavor + FlavorNew + FlavorMP + FlavorMC, S: SenderType + Clone, R: ReceiverType + Clone, { build::(F::new()) } /// The generic builder for all mpmc channel types. /// /// Initialize sender and receiver types from a flavor type, /// you can let the compiler to infer the type according to return type signature. /// (the falvor might have different new() method, but the rest is the same. /// /// # Examples /// /// ```rust /// use crossfire::{*, mpmc::*}; /// let (tx, rx): (MTx<_>, MRx<_>) = build::, _, _>(List::new()); /// let (tx, rx): (MAsyncTx>, MRx>) = build(One::new()); /// ``` #[inline(always)] pub fn build(flavor: F) -> (S, R) where F: Flavor + FlavorMP + FlavorMC, S: SenderType + Clone, R: ReceiverType + Clone, { let shared = ChannelShared::new(flavor, F::Send::new(), F::Recv::new()); (S::new(shared.clone()), R::new(shared)) } #[inline] fn unbounded_new() -> (MTx>, R) where T: 'static, R: ReceiverType> + Clone, { build::, MTx>, R>(List::::from_inner(crate::flavor::List::::new())) } #[inline] pub fn unbounded_blocking() -> (MTx>, MRx>) where T: 'static, { unbounded_new() } #[inline] pub fn unbounded_async() -> (MTx>, MAsyncRx>) where T: 'static, { unbounded_new() } fn bounded_new(size: usize) -> (S, R) where T: 'static, S: SenderType> + Clone, R: ReceiverType> + Clone, { build::, S, R>(Array::::new(size)) } /// MPMC Bounded channel builder /// /// # Examples /// /// ```rust /// use crossfire::{mpmc, *}; /// let (tx, rx) = mpmc::bounded_blocking::(10); /// tx.send(42).unwrap(); /// assert_eq!(rx.recv(), Ok(42)); /// ``` /// Creates a bounded channel with a pair of blocking sender and receiver. /// /// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1. #[inline] pub fn bounded_blocking(size: usize) -> (MTx>, MRx>) where T: 'static, { bounded_new(size) } /// Creates a bounded channel with a pair of async sender and receiver. /// /// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1. #[inline] pub fn bounded_async(size: usize) -> (MAsyncTx>, MAsyncRx>) where T: 'static, { bounded_new(size) } /// Creates a bounded channel with a pair of blocking sender and async receiver. /// /// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1. #[inline] pub fn bounded_blocking_async(size: usize) -> (MTx>, MAsyncRx>) where T: 'static, { bounded_new(size) } /// Creates a bounded channel with a pair of async sender and blocking receiver. /// /// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1. #[inline] pub fn bounded_async_blocking(size: usize) -> (MAsyncTx>, MRx>) where T: 'static, { bounded_new(size) } /// Flavor type for close notification, refer to [crate::null] for usage pub type Null = FlavorWrap; impl Null { #[inline(always)] pub fn new_blocking(&self) -> (CloseHandle, MRx) { new() } #[inline(always)] pub fn new_async(self) -> (CloseHandle, MAsyncRx) { new() } } ================================================ FILE: src/mpsc.rs ================================================ //! Multiple producers, single consumer. //! //! The optimization assumes a single consumer. The waker registration of the receiver is lossless compared to `mpmc`. //! //! **NOTE**: //! - [AsyncRx] and [Rx] are not `Clone` and do not implement `Sync`. //! Although they can be moved to other threads, they are not allowed to be used with `send`/`recv` while in an `Arc`. //! - [MAsyncTx] and [MTx] are `Clone` and `Sync` //! //! The following code is OK: //! //! ``` rust //! use crossfire::*; //! async fn foo() { //! let (tx, rx) = mpsc::bounded_async::(100); //! tokio::spawn(async move { //! let _ = rx.recv().await; //! }); //! drop(tx); //! } //! ``` //! //! Because the `AsyncRx` does not have the `Sync` marker, using `Arc` will lose the `Send` marker. //! //! For your safety, the following code **should not compile**: //! //! ``` compile_fail //! use crossfire::*; //! use std::sync::Arc; //! async fn foo() { //! let (tx, rx) = mpsc::bounded_async::(100); //! let rx = Arc::new(rx); //! tokio::spawn(async move { //! let _ = rx.recv().await; //! }); //! drop(tx); //! } //! ``` use crate::async_rx::*; use crate::async_tx::*; use crate::blocking_rx::*; use crate::blocking_tx::*; use crate::flavor::{ flavor_dispatch, flavor_select_dispatch, queue_dispatch, Flavor, FlavorBounded, FlavorImpl, FlavorMP, FlavorNew, FlavorWrap, Queue, }; use crate::null::CloseHandle; use crate::shared::*; use crate::{NotCloneable, ReceiverType, SenderType}; use std::mem::MaybeUninit; /// Flavor Type alias for unbounded MPSC channel pub type List = FlavorWrap, RegistryDummy, RegistrySingle>; /// Flavor type for one-sized MPSC channel pub type One = FlavorWrap, RegistryMultiSend, RegistrySingle>; /// Flavor Type alias for bounded MPSC channel wrapped with specified One impl #[allow(clippy::large_enum_variant)] pub enum Array { Array(crate::flavor::ArrayMpsc), One(crate::flavor::OneMpsc), } impl Array { #[inline] pub fn new(size: usize) -> Self { if size <= 1 { Self::One(crate::flavor::OneMpsc::new()) } else { Self::Array(crate::flavor::ArrayMpsc::::new(size)) } } } impl FlavorMP for Array {} macro_rules! wrap_array { ($self: expr, $method:ident $($arg:expr)*)=>{ match $self { Self::Array(inner) => inner.$method($($arg)*), Self::One(inner) => inner.$method($($arg)*), } }; } impl Queue for Array { type Item = T; queue_dispatch!(wrap_array); } impl FlavorImpl for Array { flavor_dispatch!(wrap_array); } impl FlavorSelect for Array { flavor_select_dispatch!(wrap_array); } impl FlavorBounded for Array { #[inline(always)] fn new_with_bound(size: usize) -> Self { Self::new(size) } } impl Flavor for Array { type Send = RegistryMultiSend; type Recv = RegistrySingle; } /// The generic builder for all mpsc channel types with a new method (except Array). /// /// Initialize sender and receiver types from a flavor type, /// you can let the compiler to infer the type according to return type signature. /// (the falvor might have different new() method, but the rest is the same. /// # Examples /// /// ```rust /// use crossfire::*; /// let (tx, rx): (MTx<_>, Rx<_>) = mpsc::new::, _, _>(); /// let (tx, rx): (MAsyncTx>, Rx>) = mpsc::new(); /// ``` #[inline(always)] pub fn new() -> (S, R) where F: Flavor + FlavorNew + FlavorMP, S: SenderType + Clone, R: ReceiverType + NotCloneable, { build::(F::new()) } /// The generic builder for all mpsc channel types /// /// Initialize sender and receiver types from a flavor type, /// you can let the compiler to infer the type according to return type signature. /// (the flavor might have different new() method, but the rest is the same. /// /// # Examples /// /// ```rust /// use crossfire::{*, mpsc::*}; /// let (tx, rx): (MTx<_>, Rx<_>) = build::, _, _>(List::new()); /// let (tx, rx): (MAsyncTx>, Rx>) = build(One::new()); /// ``` #[inline(always)] pub fn build(flavor: F) -> (S, R) where F: Flavor + FlavorMP, S: SenderType + Clone, R: ReceiverType + NotCloneable, { let shared = ChannelShared::new(flavor, F::Send::new(), F::Recv::new()); (S::new(shared.clone()), R::new(shared)) } #[inline] fn unbounded_new() -> (MTx>, R) where T: 'static, R: ReceiverType> + NotCloneable, { build::, MTx>, R>(List::::from_inner(crate::flavor::List::::new())) } #[inline] pub fn unbounded_blocking() -> (MTx>, Rx>) where T: 'static, { unbounded_new() } #[inline] pub fn unbounded_async() -> (MTx>, AsyncRx>) where T: 'static, { unbounded_new() } fn bounded_new(size: usize) -> (S, R) where T: 'static, S: SenderType> + Clone, R: ReceiverType> + NotCloneable, { build::, S, R>(Array::::new(size)) } /// Creates a bounded channel with a pair of blocking sender and receiver. /// /// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1. #[inline] pub fn bounded_blocking(size: usize) -> (MTx>, Rx>) where T: 'static, { bounded_new(size) } /// Creates a bounded channel with a pair of async sender and receiver. /// /// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1. #[inline] pub fn bounded_async(size: usize) -> (MAsyncTx>, AsyncRx>) where T: 'static, { bounded_new(size) } /// Creates a bounded channel with a pair of blocking sender and async receiver. /// /// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1. #[inline] pub fn bounded_blocking_async(size: usize) -> (MTx>, AsyncRx>) where T: 'static, { bounded_new(size) } /// Creates a bounded channel with a pair of async sender and blocking receiver. /// /// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1. #[inline] pub fn bounded_async_blocking(size: usize) -> (MAsyncTx>, Rx>) where T: 'static, { bounded_new(size) } /// Flavor type for close notification, refer to [crate::null] for usage pub type Null = FlavorWrap; impl Null { #[inline(always)] pub fn new_blocking(&self) -> (CloseHandle, Rx) { new() } #[inline(always)] pub fn new_async(self) -> (CloseHandle, AsyncRx) { new() } } ================================================ FILE: src/null.rs ================================================ //! A null flavor type that use to notify thread/future to close //! //! It's common practice we use `()` channel in async code, not intended for any message, just //! subscribe for close event. (For example, cancelling socket operations, stopping worker loops...) //! This is a module designed for that, with minimized polling cost. //! //! You can initialize a null channel with [crate::mpsc::Null::new_async()] or //! [crate::mpmc::Null::new_async()], which return a [CloseHandle], (which can only be `clone` or `drop`, //! but unable to send any message), and a normal receiver type (which recv method is always //! blocked until the all copy of `CloseHandle` is dropped). //! //! > NOTE: using mpsc version has less cost then mpmc version. //! //! # Examples //! //! Use null channel to stop a background loop. //! //! ```rust //! use crossfire::{null::CloseHandle, *}; //! use std::time::Duration; //! //! # #[tokio::main] //! # async fn main() { //! // Create a null channel //! let (stop_tx, stop_rx): (CloseHandle, MAsyncRx) = mpmc::Null::new().new_async(); //! let (data_tx, data_rx): (MAsyncTx>, MAsyncRx>) = mpmc::bounded_async::(10); //! //! // Spawn a background task //! let task = tokio::spawn(async move { //! loop { //! tokio::select! { //! // If the null channel is closed (stop_tx dropped), this branch will be selected //! res = stop_rx.recv() => { //! if res.is_err() { //! println!("Stopping task"); //! break; //! } //! } //! res = data_rx.recv() => { //! match res { //! Ok(data) => println!("Received data: {}", data), //! Err(_) => break, //! } //! } //! } //! } //! }); //! //! data_tx.send(1).await.unwrap(); //! tokio::time::sleep(Duration::from_millis(10)).await; //! //! // Drop the stop handle to signal the task to stop //! drop(stop_tx); //! //! task.await.unwrap(); //! # } //! ``` use crate::flavor::Flavor; use crate::flavor::{FlavorImpl, FlavorNew, FlavorSelect, Queue, Token}; use crate::shared::ChannelShared; use crate::SenderType; use core::mem::MaybeUninit; use std::sync::Arc; /// an flavor type can never receive any message pub struct Null(); impl Queue for Null { type Item = (); #[inline(always)] fn pop(&self) -> Option<()> { None } #[inline(always)] fn push(&self, _item: ()) -> Result<(), ()> { unreachable!(); } #[inline(always)] fn len(&self) -> usize { 0 } #[inline(always)] fn capacity(&self) -> Option { None } #[inline(always)] fn is_full(&self) -> bool { true } #[inline(always)] fn is_empty(&self) -> bool { true } } impl FlavorImpl for Null { #[inline(always)] fn try_send(&self, _item: &MaybeUninit<()>) -> bool { // work as an /dev/null, although normally init with CloseHandle which don't have send() method true } #[inline(always)] fn try_send_oneshot(&self, _item: *const ()) -> Option { Some(true) } #[inline(always)] fn try_recv(&self) -> Option { // always empty None } #[inline(always)] fn try_recv_final(&self) -> Option { None } #[inline] fn backoff_limit(&self) -> u16 { 0 } } impl FlavorNew for Null { #[inline] fn new() -> Self { Self() } } impl FlavorSelect for Null { #[inline(always)] fn try_select(&self, _final_check: bool) -> Option { None } #[inline(always)] fn read_with_token(&self, _token: Token) { unreachable!(); } } /// The CloseHandle is a special type for flavor [Null], only impl `Clone` and `Drop` pub struct CloseHandle(Arc>); impl Clone for CloseHandle { #[inline(always)] fn clone(&self) -> Self { self.0.add_tx(); Self(self.0.clone()) } } impl Drop for CloseHandle { #[inline(always)] fn drop(&mut self) { self.0.close_tx(); } } impl SenderType for CloseHandle where F: Flavor, { type Flavor = F; #[inline(always)] fn new(shared: Arc>) -> Self { CloseHandle(shared) } } ================================================ FILE: src/oneshot.rs ================================================ //! OneShot channel support both thread and async //! //! NOTE: In order to reduce initialization and teardown cost, this module use specialized sender [TxOneshot] and //! receiver [RxOneshot] types. //! //! # Examples //! //! ## Thread Context //! //! ``` //! use crossfire::oneshot::oneshot; //! //! let (tx, rx) = oneshot(); //! //! std::thread::spawn(move || { //! tx.send("Hello from sender!"); //! }); //! //! let received = rx.recv().unwrap(); //! assert_eq!(received, "Hello from sender!"); //! ``` //! //! ## Async Context //! //! ``` //! use crossfire::oneshot::oneshot; //! //! async fn example() { //! let (tx, rx) = oneshot(); //! //! tokio::spawn(async move { //! tx.send("Hello from async sender!"); //! }); //! //! let received = rx.await.unwrap(); //! assert_eq!(received, "Hello from async sender!"); //! } //! ``` use crate::backoff::Backoff; use crate::shared::*; #[allow(unused_imports)] use crate::{tokio_task_id, trace_log}; use core::cell::UnsafeCell; use std::future::Future; use std::pin::Pin; use std::ptr::NonNull; use std::sync::atomic::{ fence, AtomicU8, Ordering::{self, AcqRel, Acquire, SeqCst}, }; use std::task::{Context, Poll}; use std::thread; use std::time::{Duration, Instant}; /// Send/TxOneshot::drop will set this flag once, never changed. const LOCK_FLAG: u8 = 0x1; /// set by RxOneshot const WAKER_SET_FLAG: u8 = 0x2; /// set by any of TxOneshot/RxOneshot if it exit const CLOSE_FLAG: u8 = 0x4; const EXIST_FLAG: u8 = 0x8; struct OneShotInner { state: AtomicU8, value: UnsafeCell>, o_waker: UnsafeCell>, } unsafe impl Send for OneShotInner {} unsafe impl Sync for OneShotInner {} impl OneShotInner { #[inline] fn new() -> Box { Box::new(Self { value: UnsafeCell::new(None), state: AtomicU8::new(0), o_waker: UnsafeCell::new(None), }) } #[inline] fn get_waker(&self) -> &mut Option { unsafe { &mut *self.o_waker.get() } } #[inline(always)] fn value_mut(&self) -> &mut Option { unsafe { &mut *self.value.get() } } #[inline(always)] fn set_state(&self, flag: u8) -> u8 { self.state.fetch_or(flag, Ordering::AcqRel) } #[inline(always)] fn _try_recv(&self, order: Ordering) -> Result { let state = self.state.load(order); if state & LOCK_FLAG > 0 { Ok(state) } else { Err(state) } } // NOTE: in order to avoid miri borrow checker, use raw ptr here #[inline(always)] fn _consume_value(p: NonNull, mut state: u8) -> Option { debug_assert!( state & LOCK_FLAG > 0, "oneshot:({:?}) consume value unexpected {state}", tokio_task_id!() ); let this = unsafe { p.as_ref() }; let item = if state & EXIST_FLAG > 0 { this.value_mut().take() } else { None }; loop { if state & CLOSE_FLAG > 0 { trace_log!( "oneshot:({:?}) recv value={} & destroy", tokio_task_id!(), item.is_some() ); fence(Acquire); let _ = unsafe { Box::from_raw(p.as_ptr()) }; // they close first return item; } if let Err(s) = this.state.compare_exchange(state, CLOSE_FLAG | state, AcqRel, Acquire) { trace_log!( "oneshot:({:?}) recv value={} {state} close retry", tokio_task_id!(), item.is_some() ); state = s; } else { trace_log!( "oneshot:({:?}) recv value={} {state}", tokio_task_id!(), item.is_some() ); // we close first return item; } } } /// return true to destroy #[inline(always)] fn _notify_rx(p: NonNull, exist: bool) -> bool { let this = unsafe { p.as_ref() }; let mut old_state = 0; let exist_flag: u8 = if exist { EXIST_FLAG } else { 0 }; loop { let new_state = if old_state == 0 { LOCK_FLAG | CLOSE_FLAG | exist_flag } else if old_state == WAKER_SET_FLAG { LOCK_FLAG | WAKER_SET_FLAG | exist_flag } else if old_state & CLOSE_FLAG > 0 { // WAKER_SET_FLAG | CLOSE_FLAG, or just CLOSE_FLAG trace_log!("oneshot:({:?}) rx closed", tokio_task_id!()); return true; } else { panic!("unexpected state {}", old_state); }; match this.state.compare_exchange_weak(old_state, new_state, AcqRel, Acquire) { Ok(_) => { if old_state == 0 { trace_log!("oneshot:({:?}) send value", tokio_task_id!()); return false; } else { if let Some(waker) = this.get_waker().as_ref() { // the sender should never move the waker, because rx::poll will // validate it. trace_log!("oneshot:({:?}) wake rx", tokio_task_id!()); waker.wake_by_ref(); } else { unreachable!(); } if let Err(state) = this.state.compare_exchange( new_state, CLOSE_FLAG | LOCK_FLAG | exist_flag, AcqRel, Acquire, ) { // Safety: although we have no use for fail value other than debug log, // but consider use failure ordering Acquire instead of Relaxed for miri, // as a fence (stop the following from_raw to re-ordering). debug_assert!(state & CLOSE_FLAG > 0, "unexpected state {state}"); trace_log!("oneshot:({:?}) rx closed {state}", tokio_task_id!()); return true; } else { // we close first, let rx do the cleanup return false; } } } Err(s) => { old_state = s; } } } } #[inline(always)] fn set_waker(&self, waker: ThinWaker) -> Result<(), u8> { // thread context only need set waker once. // NOTE we should guarantee waker not set twice // (the recv_timeout API should not allow recv twice), // it will complicate things (like async poll). self.get_waker().replace(waker); self.state.compare_exchange(0, WAKER_SET_FLAG, AcqRel, Acquire)?; Ok(()) } #[inline(always)] fn cancel_waker(&self, abandon: bool) -> Result<(), u8> { let new_state = if abandon { CLOSE_FLAG } else { 0 }; if let Err(state) = self.state.compare_exchange(WAKER_SET_FLAG, new_state, AcqRel, Acquire) { // expect LOCK_FLAG | CLOSE_FLAG, or LOCK_FLAG | WAKER_SET_FLAG return Err(state); } else { Ok(()) } } #[inline(always)] fn is_empty(&self) -> bool { let state = self.state.load(Ordering::SeqCst); state & EXIST_FLAG == 0 } } /// Sender for oneshot channel pub struct TxOneshot(NonNull>); unsafe impl Send for TxOneshot {} unsafe impl Sync for TxOneshot {} impl TxOneshot { /// Sending the item is one-time non-blocking behavior #[inline] pub fn send(self, item: T) { unsafe { self.0.as_ref() }.value_mut().replace(item); if OneShotInner::_notify_rx(self.0, true) { // drop inner let _ = unsafe { Box::from_raw(self.0.as_ptr()) }; } std::mem::forget(self); } /// return true when RxOneshot is dropped /// /// # Safety /// /// This is not SeqCst, only Acquire, for sender we don't require to know immediately. #[inline] pub fn is_disconnected(&self) -> bool { unsafe { self.0.as_ref() }.state.load(Acquire) & CLOSE_FLAG > 0 } } impl Drop for TxOneshot { #[inline] fn drop(&mut self) { if OneShotInner::_notify_rx(self.0, false) { // drop inner let _ = unsafe { Box::from_raw(self.0.as_ptr()) }; } } } /// Receiver for oneshot channel #[must_use] pub struct RxOneshot(Option>>); unsafe impl Send for RxOneshot {} impl Drop for RxOneshot { #[inline] fn drop(&mut self) { if let Some(p) = self.0.as_ref() { let inner = unsafe { p.as_ref() }; let old_state = inner.set_state(CLOSE_FLAG); if old_state & CLOSE_FLAG > 0 { trace_log!("oneshot:({:?}) rx drop destroy, state={}", tokio_task_id!(), old_state); debug_assert_eq!( old_state & (!EXIST_FLAG), CLOSE_FLAG | LOCK_FLAG, "unexpected state {old_state}" ); // tx drop // drop inner let _ = unsafe { Box::from_raw(p.as_ptr()) }; } else { // let tx do the cleanup trace_log!("oneshot:({:?}) rx drop, state={}", tokio_task_id!(), old_state); debug_assert!( old_state == 0 // we drop first, tx not trigger || old_state == WAKER_SET_FLAG // rx.await cancel, or rx.recv_timeout() timeout || old_state | EXIST_FLAG== (EXIST_FLAG | LOCK_FLAG | WAKER_SET_FLAG), // tx waking while rx.await cancel, or rx.recv_timeout() timeout "oneshot:({:?}) rx drop, unexpected state={}", tokio_task_id!(), old_state ); } } } } impl RxOneshot { /// NOTE: this will blocking current thread #[inline] pub fn recv(self) -> Result { if let Ok(item) = self._recv_blocking(None) { return Ok(item); } Err(RecvError) } /// NOTE: this will blocking current thread with a timeout #[inline] pub fn recv_timeout(self, timeout: Duration) -> Result { let deadline = Instant::now() + timeout; match self._recv_blocking(Some(deadline)) { Ok(item) => Ok(item), Err(true) => Err(RecvTimeoutError::Timeout), Err(false) => Err(RecvTimeoutError::Disconnected), } } #[inline(always)] pub fn is_empty(&self) -> bool { if let Some(p) = self.0.as_ref() { let inner = unsafe { p.as_ref() }; inner.is_empty() } else { true } } #[inline] pub fn try_recv(&mut self) -> Result { if let Some(p) = self.0.as_ref() { let p = *p; if let Ok(state) = unsafe { p.as_ref() }._try_recv(Acquire) { self.0 = None; if let Some(item) = OneShotInner::_consume_value(p, state) { return Ok(item); } else { return Err(TryRecvError::Disconnected); } } else { Err(TryRecvError::Empty) } } else { Err(TryRecvError::Disconnected) } } #[inline] pub async fn recv_async(self) -> Result { self.await } #[inline] fn poll(&mut self, ctx: &mut Context<'_>) -> Poll> { let p: NonNull> = if let Some(p) = self.0.as_ref() { *p } else { // might poll after try_recv() finish return Poll::Ready(Err(())); }; let inner = unsafe { p.as_ref() }; macro_rules! process { ($state: expr) => { self.0 = None; if let Some(item) = OneShotInner::_consume_value(p, $state) { return Poll::Ready(Ok(item)); } else { return Poll::Ready(Err(())); } }; } macro_rules! check_exist { ($order: expr) => {{ match inner._try_recv($order) { Ok(state) => { process!(state); } Err(s) => s, } }}; } let state = check_exist!(SeqCst); if state & WAKER_SET_FLAG > 0 { let waker = inner.get_waker().as_ref().unwrap(); if waker.will_wake(ctx) { trace_log!("oneshot:({:?}) spurious waked state {}", tokio_task_id!(), state,); return Poll::Pending; } if let Err(state) = inner.cancel_waker(false) { process!(state); } } if let Err(state) = inner.set_waker(ThinWaker::Async(ctx.waker().clone())) { process!(state); } Poll::Pending } /// On Disconnected return Err(false), /// Err(true) when timeout. #[inline(always)] pub(crate) fn _recv_blocking(self, deadline: Option) -> Result { let p: NonNull> = if let Some(p) = self.0.as_ref() { *p } else { // might recv() after try_recv() ok/disconnect return Err(false); }; let inner = unsafe { p.as_ref() }; macro_rules! process { ($state: expr) => { let _ = inner; std::mem::forget(self); if let Some(item) = OneShotInner::_consume_value(p, $state) { return Ok(item); } else { return Err(false); } }; } macro_rules! try_recv { ($order: expr) => { if let Ok(state) = inner._try_recv($order) { trace_log!("try_recv got {state}"); process!(state); } }; } try_recv!(Acquire); let mut backoff = Backoff::new(); while !backoff.snooze() { try_recv!(Acquire); } if let Err(state) = inner.set_waker(ThinWaker::Blocking(thread::current())) { process!(state); } trace_log!("oneshot: waker set"); loop { try_recv!(SeqCst); match check_timeout(deadline) { Ok(None) => { std::thread::park(); } Ok(Some(dur)) => { std::thread::park_timeout(dur); } Err(_) => { trace_log!("oneshot: to cancel_waker on timeout"); if let Err(state) = inner.cancel_waker(true) { process!(state); } else { let _ = inner; // we close first std::mem::forget(self); return Err(true); } } } } } /// Wrap RxOneshot with timeout, consume self when it's done. /// The Future returns `Result` #[cfg(any(feature = "tokio", feature = "async_std"))] #[cfg_attr(docsrs, doc(cfg(any(feature = "tokio", feature = "async_std"))))] #[inline] pub async fn recv_async_timeout( self, timeout: std::time::Duration, ) -> Result { #[cfg(feature = "tokio")] { let sleep = tokio::time::sleep(timeout); self.recv_async_with_timer(sleep).await } #[cfg(feature = "async_std")] { let sleep = async_std::task::sleep(timeout); self.recv_async_with_timer(sleep).await } } /// Wrap RxOneshot with custom sleep function, consume self when it's done. /// /// The behavior is atomic: the message is either received successfully or the operation is canceled due to a timeout. /// /// Returns `Ok(T)` when successful. /// /// Returns Err([RecvTimeoutError::Timeout]) when a message could not be received because the channel is empty and the operation timed out. /// /// Returns Err([RecvTimeoutError::Disconnected]) if the sender has been dropped and the channel is empty. /// /// # Argument: /// /// * `sleep`: The sleep function. the return value of `sleep` is ignore. We add generic `R` just in order to support smol::Timer /// # Example /// /// Example with smol /// /// ```rust /// extern crate smol; /// use std::time::Duration; /// use crossfire::*; /// async fn foo() { /// let (tx, rx) = oneshot::oneshot::(); /// match rx.recv_async_with_timer(smol::Timer::after(Duration::from_secs(1))).await { /// Ok(_item)=>{ /// println!("message recv"); /// } /// Err(RecvTimeoutError::Timeout)=>{ /// println!("timeout"); /// } /// Err(RecvTimeoutError::Disconnected)=>{ /// println!("sender-side closed"); /// } /// } /// } /// ``` /// /// Example with tokio: /// /// ```rust /// use std::time::Duration; /// use crossfire::*; /// async fn foo() { /// let (tx, rx) = oneshot::oneshot::(); /// let sleep = tokio::time::sleep(Duration::from_secs(1)); /// let _r = rx.recv_async_with_timer(sleep).await; /// } /// ``` #[inline] pub fn recv_async_with_timer(self, sleep: F) -> OneshotTimeoutFuture where F: Future, { OneshotTimeoutFuture { rx: self, sleep } } } impl Future for RxOneshot { type Output = Result; #[inline] fn poll(self: Pin<&mut Self>, ctx: &mut Context) -> Poll { let this = self.get_mut(); match this.poll(ctx) { Poll::Ready(Ok(item)) => Poll::Ready(Ok(item)), Poll::Ready(Err(())) => Poll::Ready(Err(RecvError)), Poll::Pending => Poll::Pending, } } } pub struct OneshotTimeoutFuture where F: Future, { rx: RxOneshot, sleep: F, } impl Future for OneshotTimeoutFuture where F: Future, { type Output = Result; #[inline] fn poll(self: Pin<&mut Self>, ctx: &mut Context) -> Poll { // NOTE: we can use unchecked to bypass pin because we are not movig "sleep", // neither it's exposed outside let this = unsafe { self.get_unchecked_mut() }; match this.rx.poll(ctx) { Poll::Ready(Ok(item)) => return Poll::Ready(Ok(item)), Poll::Ready(Err(())) => return Poll::Ready(Err(RecvTimeoutError::Disconnected)), _ => {} } let sleep = unsafe { Pin::new_unchecked(&mut this.sleep) }; if sleep.poll(ctx).is_ready() { Poll::Ready(Err(RecvTimeoutError::Timeout)) } else { Poll::Pending } } } #[inline] pub fn oneshot() -> (TxOneshot, RxOneshot) { let p = unsafe { NonNull::new_unchecked(Box::into_raw(OneShotInner::new())) }; let tx = TxOneshot(p); let rx = RxOneshot(Some(p)); (tx, rx) } ================================================ FILE: src/select/mod.rs ================================================ //! # Selection between channels //! //! This module provides: //! - [Select]: Allows selecting from multiple borrowed receiver references, //! which is a type-erased interface similar to the select in crossbeam-channel, supporting both `mpmc`, `mpsc`, and `spsc` channels. //! - [Multiplex]: Owns and reads from multiple channels as a non-concurrent consumer, mainly for `spsc`, `mpsc`. //! //! Performance: dedicated channel > multiplex > select #[allow(clippy::module_inception)] pub(crate) mod select; pub use select::{Select, SelectResult}; #[allow(private_interfaces)] mod multiplex; pub use multiplex::{Multiplex, Mux}; #[derive(PartialEq, Debug, Clone, Copy)] #[repr(u8)] pub enum SelectMode { RR, Rand, Bias, } ================================================ FILE: src/select/multiplex.rs ================================================ use crate::backoff::*; use crate::flavor::{Flavor, FlavorBounded, FlavorImpl, FlavorNew, FlavorWrap}; use crate::shared::{check_timeout, ChannelShared}; use crate::waker::WakerState; use crate::waker_registry::{RegistrySend, SelectWaker, SelectWakerWrapper}; use crate::BlockingRxTrait; use crate::SenderType; use crate::{RecvError, RecvTimeoutError, TryRecvError}; use std::cell::Cell; use std::fmt; use std::sync::atomic::Ordering; use std::sync::Arc; use std::thread; use std::time::{Duration, Instant}; pub const DEFAULT_WEIGHT: u32 = 128; /// Type alias for multiplexed channel flavor pub type Mux = FlavorWrap::Send, SelectWakerWrapper>; /// A multiplexer that owns multi channel receivers of the same Flavor type. /// /// Unlike select, it focus on round-robin mode, allow to specified weight on each channel. /// It maintains a count of message received for each channel. /// That means if the last message recv on the `idx` channel, it will keep trying the same channel /// until the number equals to weight has been received. If the channel is empty, it will try the /// next one without touching the count. This strategy improves the hit rate of cpu cache and ensures no starvation. /// /// NOTE: The default weight is 128. (When the weight of all channel set to 1, the performance is /// the worst because of cpu cache thrashing) /// /// ## Capability and limitation: /// - New channel may be added on the fly /// - This abstraction is only designed for stable channels for most efficient select. /// - If channel close by sender, the receiver will be automatically close inside the Multiplex, /// user will not be notify until all its channels closed. /// - Due to it binds on Flavor interface, it cannot be use between different type. /// If you want to multiplex between list and array, can use the /// [CompatFlavor](crate::compat::CompatFlavor) /// - **NOTE** : It has internal mutability because it need to impl [BlockingRxTrait](crate::BlockingRxTrait), /// the adding channel process remains `&mut self`. Because `Multiplex` is a single consumer just /// like [Rx](crate::Rx), it does not have `Sync`. If you can guarantee no concurrent access you /// can manutally add the `Sync` back in parent struct. /// /// /// # Examples /// /// Basic usage with multiple senders: /// /// ``` /// use crossfire::{mpsc::Array, MTx, select::{Multiplex, Mux}}; /// use std::thread; /// /// // Create a multiplexer with Array flavor /// let mut mp = Multiplex::>::new(); /// /// // Create multiple senders through the multiplexer /// let tx1: MTx>> = mp.bounded_tx(10); /// let tx2: MTx>> = mp.bounded_tx(10); /// /// // Send values from different threads /// let h1 = thread::spawn(move || { /// tx1.send(1).unwrap(); /// }); /// let h2 = thread::spawn(move || { /// tx2.send(2).unwrap(); /// }); /// /// // Receive values through the multiplexer (order may vary) /// let val1 = mp.recv().unwrap(); /// let val2 = mp.recv().unwrap(); /// /// h1.join().unwrap(); /// h2.join().unwrap(); /// ``` pub struct Multiplex { waker: Arc, handlers: Vec>, last_idx: Cell, count: Cell, } unsafe impl Send for Multiplex {} struct MultiplexHandle { shared: Arc>>, weight: u32, } impl Multiplex { /// Initialize Select with fair, round-robin strategy pub fn new() -> Self { Self { waker: Arc::new(SelectWaker::new()), handlers: Vec::with_capacity(10), count: Cell::new(0), last_idx: Cell::new(0), } } #[inline] fn _add_item(&mut self, flavor: F, weight: u32) -> Arc>> { self.waker.add_opened(); let recvs = self.waker.clone().to_wrapper(self.handlers.len()); let shared = ChannelShared::new(Mux::::from_inner(flavor), F::Send::new(), recvs); self.handlers.push(MultiplexHandle { shared: shared.clone(), weight: weight - 1 }); self.last_idx.set(self.handlers.len() - 1); shared } /// Add a new channels with a new() method to multiplex, return its sender. /// /// # Type Parameters /// /// * `S`: The sender type that implements SenderType with the appropriate Flavor, /// may be async or blocking sender, MP or SP that match the `Flavor` type. /// /// # Note /// /// This method is only available for flavors that implement `FlavorNew` trait, /// such as `List` / `One` flavor. For flavors like Array that don't implement `FlavorNew`, /// use `bounded_tx` instead. /// /// # Example /// /// with mpsc::List (which sender type is [MTx](crate::MTx) and allow to clone) /// /// ``` /// use crossfire::{mpsc::List, MTx, select::{Multiplex, Mux}}; /// use tokio; /// /// let mut mp = Multiplex::>::new(); /// let tx1: MTx>> = mp.new_tx(); /// let tx2: MTx>> = mp.new_tx(); /// tx1.send(42).expect("send"); /// tx2.send(42).expect("send"); /// let value = mp.recv().unwrap(); /// assert_eq!(value, 42); /// let value = mp.recv().unwrap(); /// assert_eq!(value, 42); /// ``` /// /// with spsc::One (which sender type is [Tx](crate::Tx) and not cloneable) /// ``` /// use crossfire::{spsc::One, Tx, select::{Multiplex, Mux}}; /// use tokio; /// /// let mut mp = Multiplex::>::new(); /// // Creates an size-1 channel /// let tx1: Tx>> = mp.new_tx(); /// // Creates another size-1 channel /// let tx2: Tx>> = mp.new_tx(); /// std::thread::spawn(move ||{ /// tx2.send(42).expect("send"); /// }); /// let value = mp.recv().unwrap(); /// assert_eq!(value, 42); /// ``` pub fn new_tx(&mut self) -> S where F: FlavorNew, S: SenderType>, { let shared = self._add_item(F::new(), DEFAULT_WEIGHT); S::new(shared) } /// Add a channel of flavor (impl FlavorNew), with custom weight instead of default /// (the default weight is 128) pub fn new_tx_with_weight(&mut self, weight: u32) -> S where F: FlavorNew, S: SenderType>, { let shared = self._add_item(F::new(), weight); S::new(shared) } /// Creates a new bounded sender for the multiplexer /// /// # Arguments /// /// * `size` - The maximum capacity of the channel /// /// # Type Parameters /// /// * `S` - The sender type that implements SenderType with the appropriate Flavor /// /// # Example /// /// ``` /// use crossfire::{mpsc::Array, *, select::{Multiplex, Mux}}; /// /// let mut mp = Multiplex::>::new(); /// // Creates a bounded channel with capacity 10 /// let tx1: MTx>> = mp.bounded_tx(10); /// // Creates another bounded channel with capacity 20 /// let tx2: MTx>> = mp.bounded_tx(20); /// tx1.send(42).expect("send"); /// std::thread::spawn(move || { /// tx2.send(42).expect("send"); /// }); /// let value = mp.recv().unwrap(); /// assert_eq!(value, 42); /// let value = mp.recv().unwrap(); /// assert_eq!(value, 42); /// ``` pub fn bounded_tx(&mut self, size: usize) -> S where F: FlavorBounded, S: SenderType>, { let shared = self._add_item(F::new_with_bound(size), DEFAULT_WEIGHT); S::new(shared) } /// Add a bounded channel to the multiplex, with custom weight (the default is 128) pub fn bounded_tx_with_weight(&mut self, size: usize, weight: u32) -> S where F: FlavorBounded, S: SenderType>, { let shared = self._add_item(F::new_with_bound(size), weight); S::new(shared) } /// Attempts to receive a message from any of the multiplexed channels without blocking. /// /// Returns `Ok(item)` if a message is available on any of the channels. /// Returns `Err(TryRecvError::Empty)` if no messages are available. /// Returns `Err(TryRecvError::Disconnected)` if all senders have been dropped. /// /// # Example /// /// ``` /// use crossfire::{mpsc::Array, select::{Multiplex, Mux}, MTx, TryRecvError}; /// /// let mut mp = Multiplex::>::new(); /// let tx1: MTx> = mp.bounded_tx(10); /// let _tx2: MTx> = mp.bounded_tx(10); /// // No message available yet /// assert_eq!(mp.try_recv(), Err(TryRecvError::Empty)); /// tx1.send(42).unwrap(); /// // Now a message is available /// assert_eq!(mp.try_recv(), Ok(42)); /// ``` #[inline] pub fn try_recv(&self) -> Result { let last_idx = self.last_idx.get(); if let Some(item) = self._try_select_all::(last_idx, self.handlers.len()) { return Ok(item); } if self.waker.get_opened_count() == 0 { return Err(TryRecvError::Disconnected); } Err(TryRecvError::Empty) } /// Receives a message from any of the multiplexed channels, blocking if necessary. /// /// This method will block the current thread until a message is available on any of the channels, /// or until all senders are dropped. #[inline] pub fn recv(&self) -> Result { match self._recv_blocking(None) { Ok(item) => Ok(item), Err(_) => Err(RecvError), } } /// Receives a message from any of the multiplexed channels with a timeout. /// Will block when channel is empty. /// /// The behavior is atomic: the message is either received successfully or the operation is canceled due to a timeout. /// /// Returns `Ok(T)` when successful. /// /// Returns Err([RecvTimeoutError::Timeout]) when a message could not be received because the channel is empty and the operation timed out. /// /// Returns Err([RecvTimeoutError::Disconnected]) if the sender has been dropped and the channel is empty. #[inline] pub fn recv_timeout(&self, timeout: Duration) -> Result { match Instant::now().checked_add(timeout) { Some(deadline) => match self._recv_blocking(Some(deadline)) { Ok(item) => Ok(item), Err(true) => Err(RecvTimeoutError::Disconnected), Err(false) => Err(RecvTimeoutError::Timeout), }, None => self.try_recv().map_err(|e| match e { TryRecvError::Disconnected => RecvTimeoutError::Disconnected, TryRecvError::Empty => RecvTimeoutError::Timeout, }), } } /// NOTE: be aware that _try_recv_cached does not guarantee all message will be receive, /// should retry again #[inline(always)] fn _try_select_cached(&self) -> Result { let last_idx = self.last_idx.get(); let handle = unsafe { self.handlers.get_unchecked(last_idx) }; let count = self.count.get(); let loop_count = if count > 0 { if let Some(msg) = handle.shared.inner.try_recv_cached() { handle.shared.on_recv(); self.count.set(count - 1); return Ok(msg); } self.handlers.len() - 1 } else { self.handlers.len() }; if let Some(item) = self._try_select_all::(last_idx, loop_count) { return Ok(item); } Err(last_idx) } #[inline(always)] fn _try_select_all( &self, mut idx: usize, loop_count: usize, ) -> Option { let len = self.handlers.len(); for _ in 0..loop_count { idx = if idx + 1 >= len { 0 } else { idx + 1 }; let handle = unsafe { self.handlers.get_unchecked(idx) }; if let Some(msg) = if FINAL { handle.shared.inner.try_recv_final() } else { handle.shared.inner.try_recv() } { handle.shared.on_recv(); self.count.set(handle.weight); self.last_idx.set(idx); return Some(msg); } } None } /// Internal method to perform blocking receive with optional timeout /// /// # Parameters /// /// * `deadline` - Optional deadline for the operation; if None, blocks indefinitely /// /// # Returns /// /// Returns `Ok(item)` on successful receive, `Err(true)` if disconnected, `Err(false)` if timed out #[inline] fn _recv_blocking(&self, deadline: Option) -> Result { let mut start_idx; match self._try_select_cached::() { Ok(item) => return Ok(item), Err(idx) => { start_idx = idx; } } let mut backoff = Backoff::from(BackoffConfig::detect()); backoff.snooze(); let len = self.handlers.len(); loop { loop { if let Some(item) = self._try_select_all::(start_idx, len) { return Ok(item); } if backoff.snooze() { break; } } // TODO For thread, actually the waker can be reuse and not change self.waker.init_blocking(); let closing = self.waker.get_opened_count() == 0; if let Some(item) = self._try_select_all::(start_idx, len) { return Ok(item); } if closing { // NOTE: double check the channels after checking close count, otherwise we will be // missing some last messages return Err(true); } let mut state = WakerState::Init as u8; while state < WakerState::Woken as u8 { match check_timeout(deadline) { Ok(None) => { thread::park(); } Ok(Some(dur)) => { thread::park_timeout(dur); } Err(_) => { // As sc don't need to abandon return Err(false); } } state = self.waker.get_waker_state(Ordering::SeqCst); } backoff.reset(); start_idx = self.waker.get_hint(); } } } impl Drop for Multiplex { #[inline] fn drop(&mut self) { for handle in &self.handlers { handle.shared.close_rx(); } } } impl fmt::Debug for Multiplex { #[inline] fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "Multiplex<{}>", std::any::type_name::()) } } impl fmt::Display for Multiplex { #[inline] fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(self, f) } } impl BlockingRxTrait for Multiplex where F::Item: Send + 'static, { #[inline(always)] fn recv(&self) -> Result { Self::recv(self) } #[inline(always)] fn try_recv(&self) -> Result { Self::try_recv(self) } #[inline(always)] fn recv_timeout(&self, timeout: Duration) -> Result { Self::recv_timeout(self, timeout) } /// The number of messages in the channel at the moment #[inline(always)] fn len(&self) -> usize { 0 } /// always return None #[inline(always)] fn capacity(&self) -> Option { None } /// Returns true when all the channel's empty #[inline(always)] fn is_empty(&self) -> bool { for handle in &self.handlers { if !handle.shared.is_empty() { return false; } } true } /// Not practical to impl #[inline(always)] fn is_full(&self) -> bool { false } /// Return true if all sender has been close #[inline(always)] fn is_disconnected(&self) -> bool { self.get_tx_count() == 0 } /// NOTE: it does not count all the clones to the senders #[inline(always)] fn get_tx_count(&self) -> usize { self.waker.get_opened_count() } /// This is single consumer #[inline(always)] fn get_rx_count(&self) -> usize { 1 } fn get_wakers_count(&self) -> (usize, usize) { (0, 0) } fn clone_to_vec(self, _count: usize) -> Vec { unimplemented!(); } } ================================================ FILE: src/select/select.rs ================================================ // Internal Implementation Details: // // Since mixing send and receive operations is rare, and the waker types for senders and receivers // are different, we only implement `select` for receive operations. // // In `shared.rs`, `SelectHandle` is implemented for `ChannelShare` // // ## SelectWaker // // `SelectWaker` is wrapped in an `Arc`, holding the actual waker // // ### RegistryMultiRecv // - Requires `reg_waker()` to be called only once, so the `registered` flag is saved as `true`. // - Provides `cancel_waker()`. // - `RegistryMultiInner` maintains a `Vec<(channel_id, Arc)>`. // It does not remove the waker after waking it up. // - When waking up `SelectWaker`, it saves its own `channel_id` into the `SelectWaker`'s hint. // - The `is_empty` flag in `RegistryMulti` can be extended from `bool` to `u8` to represent three states: // `empty`, `has select`, and `without select`. // // ### RegistrySingle // - Needs to re-register in every select loop, so `RecvHandle` saves `registered` as `false`. // - `cancel_waker` is an empty implementation. // - During registration, it clones the `ArcWaker` (generated at the start of the select flow inside `Arc`) // into `RegistrySingle`. A new method can be added to abstract this process. // // ### Select::drop // - Unregister using `cancel_waker()` for all handles. // // ## Safety and Validation // - `SelectResult` is returned to the user and contains a pointer of receiver to the slot. // - If the user incorrectly uses a `SelectResult` from one channel on a different receiver, // this pointer address is checked, causing a panic to ensure safety. use super::SelectMode; use crate::backoff::*; use crate::flavor::Token; use crate::shared::{check_timeout, ChannelShared}; use crate::trace_log; use crate::waker::WakerState; use crate::waker_registry::SelectWaker; use crate::ReceiverType; use crate::{RecvError, RecvTimeoutError, TryRecvError}; use smallvec::SmallVec; use std::collections::hash_map::DefaultHasher; use std::fmt; use std::hash::{Hash, Hasher}; use std::ops::Add; use std::sync::{atomic::Ordering, Arc}; use std::thread; use std::time::{Duration, Instant}; /// The select interface only support select from receivers. /// /// - The user add receivers for subscription. /// - call [Select::select] or [Select::select_timeout] and get [SelectResult] /// - Use [read_select](crate::Rx::read_select) to handle [SelectResult]. (**Safety**: If `SelectResult` /// dropped without processed, will result in message leak/hang.) /// - Although the `Select` object has a lifecycle and should live inside a function scope, it can be reused in a loop. /// - On drop it will automatically cancel all registration. /// /// ## Example /// /// ```rust /// use crossfire::{mpmc, mpsc, RecvError}; /// use crossfire::select::Select; /// /// let (tx1, rx1) = mpmc::bounded_blocking::(10); /// let (tx2, rx2) = mpsc::bounded_blocking::(10); /// /// // Send some messages /// tx1.send(100).unwrap(); /// tx2.send(200).unwrap(); /// /// // Drop senders to simulate disconnection after messages are sent /// drop(tx1); /// drop(tx2); /// /// let mut select = Select::new(); /// select.add(&rx1); /// select.add(&rx2); /// /// // Loop until all channels are disconnected and removed from select /// loop { /// // When `select()` returns `Err(RecvError)`, it means all channels /// // previously added to `select` have been disconnected or removed. /// // In such a case, there's nothing left to select from, so we break. /// let res = match select.select() { /// Ok(res) => res, /// Err(RecvError) => { /// println!("All channels disconnected or removed from select. Breaking loop."); /// break; /// }, /// }; /// /// // Handle the result from the ready receiver /// if res == rx1 { /// match rx1.read_select(res) { /// Ok(val) => println!("Received from rx1: {}", val), /// Err(RecvError) => { // Now RecvError /// println!("rx1 disconnected, removing from select."); /// select.remove(&rx1); // Remove disconnected receiver /// }, /// } /// } else if res == rx2 { /// match rx2.read_select(res) { /// Ok(val) => println!("Received from rx2: {}", val), /// Err(RecvError) => { // Now RecvError /// println!("rx2 disconnected, removing from select."); /// select.remove(&rx2); // Remove disconnected receiver /// }, /// } /// } /// } /// ``` pub struct Select<'a> { handlers: SmallVec<[RecvHandle<'a>; 32]>, waker: Arc, mode: SelectMode, next_index: usize, rng: u64, } impl<'a> Select<'a> { /// Initialize Select with fair, round-robin strategy pub fn new() -> Self { Self::new_with(SelectMode::RR) } /// Initialize Select with fair strategy (check start from random channel) #[inline] pub fn new_random() -> Self { Self::new_with(SelectMode::Rand) } /// Initialize Select with bias strategy (check according to the order of `add()`) #[inline] pub fn new_bias() -> Self { Self::new_with(SelectMode::Bias) } #[inline] pub fn new_with(mode: SelectMode) -> Self { let rng = if let SelectMode::Rand = mode { let mut hasher = DefaultHasher::new(); Instant::now().hash(&mut hasher); thread::current().id().hash(&mut hasher); hasher.finish() } else { 0 }; Self { mode, handlers: SmallVec::new(), waker: Arc::new(SelectWaker::new()), next_index: 0, rng, } } /// Add a channel receiver for watch #[inline] pub fn add(&mut self, recv: &'a R) where ChannelShared: SelectHandle, { let shared: &ChannelShared = recv.as_ref(); self.handlers.push(RecvHandle { registered: false, shared: shared as &dyn SelectHandle, channel: recv as *const R as *const u8, }); } /// Remove a channel receiver from watch pub fn remove(&mut self, recv: &R) { let channel = recv as *const R as *const u8; if let Some(index) = self.handlers.iter().position(|h| h.channel == channel) { self.handlers[index].shared.cancel_waker(&self.waker); self.handlers.remove(index); if !self.handlers.is_empty() { if self.next_index >= self.handlers.len() { self.next_index = 0; } for handler in &mut self.handlers { handler.registered = false; handler.shared.cancel_waker(&self.waker); } } } } /// Attempts to select a message from any of the registered receivers without blocking. /// /// Returns: /// - `Ok(SelectResult)` if a message is immediately available from any channel. /// - `Err(TryRecvError::Empty)` if no messages are ready, but at least one channel is still connected. /// - `Err(TryRecvError::Disconnected)` if all registered channels are disconnected or removed from select. pub fn try_select(&mut self) -> Result { if self.handlers.is_empty() { return Err(TryRecvError::Disconnected); } let idx = self._try_select_begin(); if let Some(res) = self._try_select(idx, true) { return Ok(res); } Err(TryRecvError::Empty) } #[inline(always)] fn _try_select(&mut self, mut idx: usize, final_check: bool) -> Option { let len = self.handlers.len(); debug_assert!(len > 0); for _ in 0..len { // Ensure idx is within bounds for the current iteration. if idx >= len { idx = 0; } // final_check=true also check if any channel is closed. if let Ok(res) = self.handlers[idx].try_select(final_check) { trace_log!("select ok idx={}", idx); if self.mode == SelectMode::RR { self.next_index = idx + 1; } return Some(res); } else if final_check { trace_log!("select: final_check {}", idx); } idx += 1; } None } #[inline(always)] fn _try_select_begin(&mut self) -> usize { match self.mode { SelectMode::Bias => 0, SelectMode::RR => { if self.next_index >= self.handlers.len() { 0 } else { self.next_index } } SelectMode::Rand => { let mut x = self.rng; x ^= x << 13; x ^= x >> 7; x ^= x << 17; self.rng = x; (x as usize) % self.handlers.len() } } } /// Blocking current thread and wait for message from multiple receivers or close event /// /// See [crate::select] document for usage /// /// # Return conditions: /// /// - Return Ok(SelectResult) when one of the channel has result or close. /// - For closed channel, you have to remove the receiver from select, otherwise the select /// will already return immediately. /// - If there's no handler left in it, will return RecvError pub fn select(&mut self) -> Result { match self._select_blocking(None) { Ok(res) => Ok(res), Err(true) => Err(RecvError), _ => unreachable!(), } } /// Blocking current thread and wait with a timeout, for message from multiple receivers or close event /// /// See [crate::select] document for usage /// /// # Return conditions: /// /// - Return Ok(SelectResult) when one of the channel has result or close. /// - For closed channel, you have to remove the receiver from select, otherwise the select /// will already return immediately. /// - For Timeout returns RecvTimeoutError::Timeout; /// - If there's no handler left in it, will return RecvTimeoutError::Disconnected. pub fn select_timeout(&mut self, timeout: Duration) -> Result { let deadline = Instant::now().add(timeout); match self._select_blocking(Some(deadline)) { Ok(res) => Ok(res), Err(true) => Err(RecvTimeoutError::Disconnected), Err(false) => Err(RecvTimeoutError::Timeout), } } #[inline(always)] fn _select_blocking(&mut self, deadline: Option) -> Result { // Initial non-blocking check, respecting SelectMode if self.handlers.is_empty() { return Err(true); // All handlers are disconnected or removed } let mut idx = self._try_select_begin(); if let Some(res) = self._try_select(idx, false) { return Ok(res); } let mut backoff = Backoff::from(BackoffConfig::detect()); backoff.snooze(); // If try_select returned None, we check if all handlers are gone. loop { loop { if let Some(res) = self._try_select(idx, false) { return Ok(res); } if backoff.snooze() { break; } } // init SelectWaker self.waker.init_blocking(); // Register all handlers (handlers with `registered=true` may be skipped). for (i, handler) in self.handlers.iter_mut().enumerate() { handler.reg_waker(i, &self.waker); } // After registration, do another check, this time with final_check=true if let Some(res) = self._try_select(idx, true) { return Ok(res); } trace_log!("select: park"); let mut state = WakerState::Init as u8; while state < WakerState::Woken as u8 { match check_timeout(deadline) { Ok(None) => { std::thread::park(); } Ok(Some(dur)) => { std::thread::park_timeout(dur); } Err(_) => { return Err(false); } } state = self.waker.get_waker_state(Ordering::SeqCst); trace_log!("select: unpark state={}", state); } // NOTE: there may be spurious wakeup, but since the SelectWaker is registered in // wake up, first check the one with hint idx = self.waker.get_hint(); trace_log!("select: hint idx {}", idx); } } } impl<'a> Drop for Select<'a> { #[inline(always)] fn drop(&mut self) { for handler in &self.handlers { handler.shared.cancel_waker(&self.waker); } } } impl<'a> std::fmt::Debug for Select<'a> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "Select") } } struct RecvHandle<'a> { shared: &'a dyn SelectHandle, // If multi is true, the registration is persistent until cancel registered: bool, // for validate against unsafe usage channel: *const u8, } impl<'a> RecvHandle<'a> { #[inline(always)] fn try_select(&self, final_check: bool) -> Result { if let Some(token) = self.shared.try_select(final_check) { return Ok(SelectResult { channel: self.channel, token }); } Err(()) } #[inline(always)] fn reg_waker(&mut self, index: usize, global_waker: &Arc) { if self.registered { return; } if self.shared.reg_waker(index, global_waker) { trace_log!("select: reg waker"); self.registered = true; } } } /// The result from [Select::select], use for calling `read_select()` on the receiver type, may contains event to receive or disconnected event /// /// **Safety**: If `SelectResult` dropped without processed, will result in message leak/hang. /// /// See the example of select interface. pub struct SelectResult { // for validation pub(crate) channel: *const u8, pub(crate) token: Token, } impl fmt::Debug for SelectResult { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "SelectResult(from {:p})", self.channel) } } impl SelectResult { /// Check if the result is for specified receiver #[inline] pub fn is_from(&self, rx: &R) -> bool { self.channel == rx as *const R as *const u8 } } impl PartialEq for SelectResult { /// Short cut for [SelectResult::is_from()] #[inline] fn eq(&self, other: &R) -> bool { self.is_from(other) } } #[allow(private_bounds)] pub(crate) trait SelectHandle: Send { /// If final_check is true, should check channel closing, should use SeqCst ordering fn try_select(&self, final_check: bool) -> Option; /// For RegistryMulti return true means the waker will be persistent, otherwise return false fn reg_waker(&self, channel_id: usize, waker: &Arc) -> bool; fn cancel_waker(&self, waker: &Arc); } ================================================ FILE: src/shared.rs ================================================ use crate::backoff::*; pub(crate) use crate::crossbeam::err::*; pub(crate) use crate::flavor::{Flavor, FlavorSelect, Token}; use crate::select::select::SelectHandle; use crate::trace_log; pub(crate) use crate::waker::*; pub(crate) use crate::waker_registry::*; use std::mem::MaybeUninit; use std::sync::atomic::{compiler_fence, fence, AtomicUsize, Ordering}; use std::sync::Arc; use std::time::{Duration, Instant}; pub struct ChannelShared { pub(crate) inner: F, tx_count: AtomicUsize, rx_count: AtomicUsize, pub(crate) senders: F::Send, pub(crate) recvs: F::Recv, pub(crate) backoff_limit: u16, pub(crate) large: bool, pub(crate) may_direct_copy: bool, } impl ChannelShared { pub(crate) fn new(inner: F, senders: F::Send, recvs: F::Recv) -> Arc { let mut large = false; if let Some(bound) = inner.capacity() { if bound >= 10 { large = true; } } Arc::new(Self { tx_count: AtomicUsize::new(1), rx_count: AtomicUsize::new(1), senders, recvs, backoff_limit: inner.backoff_limit(), large, may_direct_copy: inner.may_direct_copy(), inner, }) } #[inline(always)] pub(crate) fn try_recv(&self) -> Result { if let Some(item) = self.inner.try_recv_final() { self.on_recv(); Ok(item) } else { if self.is_tx_closed() { return Err(TryRecvError::Disconnected); } Err(TryRecvError::Empty) } } #[inline(always)] pub(crate) fn read_with_token(&self, token: Token) -> Result where F: FlavorSelect, { if token.pos.is_null() { Err(RecvError) } else { let item = self.inner.read_with_token(token); self.on_recv(); Ok(item) } } /// The number of messages in the channel. #[inline(always)] pub fn len(&self) -> usize { self.inner.len() } /// The capacity of the channel. Returns `None` for unbounded channels. #[inline(always)] pub fn capacity(&self) -> Option { self.inner.capacity() } /// Returns `true` if the channel is empty. #[inline(always)] pub fn is_empty(&self) -> bool { self.inner.is_empty() } /// Returns `true` if the channel is full. pub fn is_full(&self) -> bool { self.inner.is_full() } /// Returns the number of senders for the channel. #[inline(always)] pub fn get_tx_count(&self) -> usize { self.tx_count.load(Ordering::SeqCst) } /// Returns the number of receivers for the channel. #[inline(always)] pub fn get_rx_count(&self) -> usize { self.rx_count.load(Ordering::SeqCst) } #[inline(always)] pub(crate) fn sender_direct_copy(&self) -> bool { self.may_direct_copy && self.senders.use_direct_copy() } /// Returns the number of wakers for senders and receivers. For debugging purposes. pub fn get_wakers_count(&self) -> (usize, usize) { (self.senders.len(), self.recvs.len()) } #[inline(always)] pub(crate) fn is_tx_closed(&self) -> bool { self.tx_count.load(Ordering::SeqCst) == 0 } #[inline(always)] pub(crate) fn is_rx_closed(&self) -> bool { self.rx_count.load(Ordering::SeqCst) == 0 } #[inline(always)] pub(crate) fn add_tx(&self) { // The drop will close_tx, which has release fence let _ = self.tx_count.fetch_add(1, Ordering::Relaxed); } /// for Upgrade of WeakTx #[inline(always)] pub(crate) fn try_add_tx(&self) -> bool { let mut count = self.tx_count.load(Ordering::Relaxed); loop { if count == 0 { return false; } match self.tx_count.compare_exchange( count, count + 1, Ordering::SeqCst, Ordering::Acquire, ) { Ok(_) => { return true; } Err(_count) => { count = _count; std::hint::spin_loop(); } } } } #[inline(always)] pub(crate) fn add_rx(&self) { // The drop will close_rx, which has release fence let _ = self.rx_count.fetch_add(1, Ordering::Relaxed); } /// This method is called when a sender is dropped. #[inline(always)] pub(crate) fn close_tx(&self) { let old = self.tx_count.fetch_sub(1, Ordering::Release); if old <= 1 { trace_log!("closing from tx"); fence(Ordering::SeqCst); self.recvs.close(); } else { trace_log!("drop tx {}", old - 1); } } /// This method is called when a receiver is dropped. #[inline(always)] pub(crate) fn close_rx(&self) { let old = self.rx_count.fetch_sub(1, Ordering::Release); if old <= 1 { trace_log!("closing from rx"); fence(Ordering::SeqCst); // There's SeqCst fence inside RegistrySender::close self.senders.close(); } else { trace_log!("drop rx {}", old - 1); } } /// if need_wake == true, called from on_recv(), when return None indicates try to wake up next. /// when need_wake == false, will always return Some(state). /// /// NOTE: when return state=Done, the waker is not set to Done #[inline] pub(crate) fn sender_double_check( &self, item: &MaybeUninit, o_waker: &mut Option<::Waker>, ) -> u8 { // Not allow Spurious wake and enter this function again; if let Some(res) = self.inner.try_send_oneshot(item.as_ptr()) { if res { self.on_send(); self.senders.cancel_reuse_waker(o_waker, WakerState::Done) } else { let state = if SINK { WakerState::Init as u8 } else { self.senders.commit_waiting(o_waker) }; if self.is_rx_closed() { return WakerState::Closed as u8; } state } } else { // Unlikely to be disconnected, self.senders.cancel_reuse_waker(o_waker, WakerState::Woken) } } /// Wait a little more for the waker state change, /// NOTE: it's important to yield when you have more sender than receiver #[inline(always)] pub(crate) fn sender_snooze( &self, o_waker: &Option<::Waker>, backoff: &mut Backoff, ) -> u8 { backoff.reset(); loop { let state = self.senders.get_waker_state(o_waker, Ordering::Relaxed); compiler_fence(Ordering::AcqRel); if state >= WakerState::Woken as u8 { return state; } if backoff.snooze() { return state; } } } /// Wake up one rx #[inline(always)] pub(crate) fn on_send(&self) { self.recvs.fire(); } /// Wake up one tx #[inline(always)] pub(crate) fn on_recv(&self) { if WakeResult::Sent == self.senders.fire(&self.inner) { self.on_send(); } } /// Call on cancellation, return true to indicate drop temporary message /// return false to indicate already Done. #[inline(always)] pub(crate) fn abandon_send_waker(&self, waker: &::Waker) -> bool { match self.senders.abandon_waker(waker) { Ok(_) => true, Err(state) => { trace_log!("tx: abandon err {:?} {}", waker, state); if state == WakerState::Woken as u8 { // We are awake, but give up sending, should notify another sender for safety self.on_recv(); } else if state == WakerState::Closed as u8 { } else { debug_assert_eq!(state, WakerState::Done as u8); // Unused code for direct_copy return false; } true } } } /// Call on cancellation, return true to indicate drop temporary message #[inline(always)] pub(crate) fn abandon_recv_waker(&self, waker: &::Waker) { if let Err(state) = self.recvs.abandon_waker(waker) { trace_log!("rx: abandon err {:?} {}", waker, state); if state == WakerState::Woken as u8 { // We are awake, but give up receiving, should notify another receiver for safety self.on_send(); } else if state == WakerState::Closed as u8 { // Closed } else { debug_assert_eq!(state, WakerState::Done as u8); // Unused code for direct_copy } } } #[inline(always)] pub(crate) fn get_async_backoff(&self) -> Option { if self.large { return None; } let cfg = BackoffConfig::detect(); if cfg.spin_limit == 0 { // 1 core don't backoff return None; } // It's effective to yield for size=1 Some(Backoff::from(cfg.limit(self.backoff_limit))) } } impl SelectHandle for ChannelShared { #[inline(always)] fn try_select(&self, final_check: bool) -> Option { if let Some(token) = self.inner.try_select(final_check) { return Some(token); } if final_check && self.get_tx_count() == 0 { return Some(Token::default()); } None } #[inline(always)] fn reg_waker(&self, channel_id: usize, waker: &Arc) -> bool { self.recvs.reg_select_waker(channel_id, waker) } #[inline(always)] fn cancel_waker(&self, waker: &Arc) { self.recvs.cancel_select_waker(waker) } } /// On timed out, returns Err(()) #[inline(always)] pub fn check_timeout(deadline: Option) -> Result, ()> { if let Some(end) = deadline { let now = Instant::now(); if now < end { return Ok(Some(end - now)); } else { return Err(()); } } Ok(None) } ================================================ FILE: src/sink.rs ================================================ use crate::shared::*; use crate::{flavor::FlavorMP, AsyncTx, MAsyncTx, TrySendError}; use std::fmt; use std::mem::MaybeUninit; use std::ops::Deref; use std::task::*; /// An async sink that allows you to write custom futures with `poll_send(ctx)`. pub struct AsyncSink { tx: AsyncTx, waker: Option<::Waker>, } impl fmt::Debug for AsyncSink { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "AsyncSink") } } impl fmt::Display for AsyncSink { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "AsyncSink") } } impl AsyncSink { #[inline] pub fn new(tx: AsyncTx) -> Self { Self { tx, waker: None } } } impl Deref for AsyncSink { type Target = AsyncTx; #[inline] fn deref(&self) -> &Self::Target { &self.tx } } impl From> for AsyncSink { #[inline] fn from(tx: AsyncTx) -> Self { tx.into_sink() } } impl From> for AsyncSink { #[inline] fn from(tx: MAsyncTx) -> Self { tx.into_sink() } } impl AsyncSink where F::Item: Unpin, { /// `poll_send()` will try to send a message. /// If the channel is full, it will register a notification for the next poll. /// /// # Behavior /// /// The polling behavior is different from [SendFuture](crate::SendFuture). /// Because the waker is not exposed to the user, you cannot perform delicate operations on /// the waker (compared to the `Drop` handler in `SendFuture`). /// To make sure no deadlock happens on cancellation, the `WakerState` will be `Init` /// after being registered (and will not be converted to `Waiting`). /// The receivers will wake up all `Init` state wakers until they find a normal /// pending sender in the `Waiting` state. /// /// # Return value: /// /// Returns `Ok(())` on message sent. /// /// Returns `Err([crate::TrySendError::Full])` for a `Poll::Pending` case. /// The next time the channel is not full, your future will be woken again. /// You should then continue calling `poll_send()` to send the message. /// If you want to cancel, just don't call `poll_send()` again. There are no side effects, /// and other senders will have a chance to send their messages. /// /// Returns `Err([crate::TrySendError::Disconnected])` when all `Rx` are dropped. #[inline] pub fn poll_send( &mut self, ctx: &mut Context, item: F::Item, ) -> Result<(), TrySendError> { let _item = MaybeUninit::new(item); let shared = &self.tx.shared; if shared.inner.try_send(&_item) { shared.on_send(); return Ok(()); } match self.tx.poll_send::(ctx, &_item, &mut self.waker) { Poll::Ready(Ok(())) => Ok(()), Poll::Ready(Err(())) => Err(TrySendError::Disconnected(unsafe { _item.assume_init() })), Poll::Pending => Err(TrySendError::Full(unsafe { _item.assume_init() })), } } } impl Drop for AsyncSink { fn drop(&mut self) { if let Some(waker) = self.waker.as_ref() { self.tx.shared.abandon_send_waker(waker); } } } ================================================ FILE: src/spsc.rs ================================================ //! Single producer, single consumer. //! //! The optimization assumes a single producer and consumer, so waker registration is completely lockless. //! //! **NOTE**: For the SP/SC version, [AsyncTx], [AsyncRx], [Tx], and [Rx] are not `Clone` and do not implement `Sync`. //! Although they can be moved to other threads, they are not allowed to be used with `send`/`recv` while in an `Arc`. //! //! The following code is OK: //! //! ``` rust //! use crossfire::*; //! async fn foo() { //! let (tx, rx) = spsc::bounded_async::(100); //! tokio::spawn(async move { //! let _ = tx.send(2).await; //! }); //! drop(rx); //! } //! ``` //! //! Because the `AsyncTx` does not have the `Sync` marker, using `Arc` will lose the `Send` marker. //! //! For your safety, the following code **should not compile**: //! //! ``` compile_fail //! use crossfire::*; //! use std::sync::Arc; //! async fn foo() { //! let (tx, rx) = spsc::bounded_async::(100); //! let tx = Arc::new(tx); //! tokio::spawn(async move { //! let _ = tx.send(2).await; //! }); //! drop(rx); //! } //! ``` use crate::async_rx::*; use crate::async_tx::*; use crate::blocking_rx::*; use crate::blocking_tx::*; use crate::flavor::{ flavor_dispatch, flavor_select_dispatch, queue_dispatch, Flavor, FlavorBounded, FlavorImpl, FlavorNew, FlavorWrap, Queue, }; use crate::shared::*; use crate::{NotCloneable, ReceiverType, SenderType}; use std::mem::MaybeUninit; /// Flavor Type for unbounded SPSC channel pub type List = FlavorWrap, RegistryDummy, RegistrySingle>; /// Flavor type for one-sized SPSC channel pub type One = FlavorWrap, RegistrySingle, RegistrySingle>; /// Flavor Type for bounded SPSC channel #[allow(clippy::large_enum_variant)] pub enum Array { Array(crate::flavor::ArraySpsc), One(crate::flavor::OneSpsc), } impl Array { #[inline] pub fn new(size: usize) -> Self { if size <= 1 { Self::One(crate::flavor::OneSpsc::new()) } else { Self::Array(crate::flavor::ArraySpsc::::new(size)) } } } macro_rules! wrap_array { ($self: expr, $method:ident $($arg:expr)*)=>{ match $self { Self::Array(inner) => inner.$method($($arg)*), Self::One(inner) => inner.$method($($arg)*), } }; } impl Queue for Array { type Item = T; queue_dispatch!(wrap_array); } impl FlavorImpl for Array { flavor_dispatch!(wrap_array); } impl FlavorSelect for Array { flavor_select_dispatch!(wrap_array); } impl FlavorBounded for Array { #[inline(always)] fn new_with_bound(size: usize) -> Self { Self::new(size) } } impl Flavor for Array { type Send = RegistrySingle; type Recv = RegistrySingle; } /// The generic builder for all spsc channel types with a new method (except Array). /// /// Initialize sender and receiver types from a flavor type, /// you can let the compiler to infer the type according to return type signature. /// (the falvor might have different new() method, but the rest is the same. /// # Examples /// /// ```rust /// use crossfire::*; /// let (tx, rx): (Tx<_>, Rx<_>) = spsc::new::, _, _>(); /// let (tx, rx): (AsyncTx>, Rx>) = spsc::new(); /// ``` #[inline(always)] pub fn new() -> (S, R) where F: Flavor + FlavorNew, S: SenderType + NotCloneable, R: ReceiverType + NotCloneable, { build::(F::new()) } /// The generic builder for all spsc channel types. /// /// Initialize sender and receiver types from a flavor type, /// you can let the compiler to infer the type according to return type signature. /// (the falvor might have different new() method, but the rest is the same. /// # Examples /// /// ```rust /// use crossfire::{*, spsc::*}; /// let (tx, rx): (Tx<_>, Rx<_>) = build::, _, _>(List::new()); /// let (tx, rx): (AsyncTx>, Rx>) = build(One::new()); /// ``` #[inline(always)] pub fn build(flavor: F) -> (S, R) where F: Flavor, S: SenderType + NotCloneable, R: ReceiverType + NotCloneable, { let shared = ChannelShared::new(flavor, F::Send::new(), F::Recv::new()); (S::new(shared.clone()), R::new(shared)) } #[inline] fn unbounded_new() -> (Tx>, R) where T: 'static, R: ReceiverType> + NotCloneable, { build::, Tx>, R>(List::::from_inner(crate::flavor::List::::new())) } #[inline] pub fn unbounded_blocking() -> (Tx>, Rx>) where T: 'static, { unbounded_new() } #[inline] pub fn unbounded_async() -> (Tx>, AsyncRx>) where T: 'static, { unbounded_new() } fn bounded_new(size: usize) -> (S, R) where T: 'static, S: SenderType> + NotCloneable, R: ReceiverType> + NotCloneable, { build::, S, R>(Array::::new(size)) } /// Creates a bounded channel with a pair of blocking sender and receiver. /// /// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1. #[inline] pub fn bounded_blocking(size: usize) -> (Tx>, Rx>) where T: 'static, { bounded_new(size) } /// Creates a bounded channel with a pair of async sender and receiver. /// /// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1. #[inline] pub fn bounded_async(size: usize) -> (AsyncTx>, AsyncRx>) where T: 'static, { bounded_new(size) } /// Creates a bounded channel with a pair of blocking sender and async receiver. /// /// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1. #[inline] pub fn bounded_blocking_async(size: usize) -> (Tx>, AsyncRx>) where T: 'static, { bounded_new(size) } /// Creates a bounded channel with a pair of async sender and blocking receiver. /// /// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1. #[inline] pub fn bounded_async_blocking(size: usize) -> (AsyncTx>, Rx>) where T: 'static, { bounded_new(size) } ================================================ FILE: src/stream.rs ================================================ use crate::shared::*; use crate::{AsyncRx, MAsyncRx}; use futures_core::stream; use std::fmt; use std::ops::Deref; use std::pin::Pin; use std::task::*; /// Constructed by [AsyncRx::into_stream()](crate::AsyncRx::into_stream()) /// /// Implements `futures_core::stream::Stream`. pub struct AsyncStream { rx: AsyncRx, waker: Option<::Waker>, ended: bool, } impl fmt::Debug for AsyncStream { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "AsyncStream") } } impl fmt::Display for AsyncStream { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "AsyncStream") } } impl AsyncStream { #[inline(always)] pub fn new(rx: AsyncRx) -> Self { Self { rx, waker: None, ended: false } } /// `poll_item()` will try to receive a message. /// If the channel is empty, it will register a notification for the next poll. /// /// # Behavior /// /// The polling behavior is different from [RecvFuture](crate::RecvFuture). /// Because the waker is not exposed to the user, you cannot perform delicate operations on /// the waker (compared to the `Drop` handler in `RecvFuture`). /// To make sure no deadlock happens on cancellation, the `WakerState` will be `Init` /// after being registered (and will not be converted to `Waiting`). /// The senders will wake up all `Init` state wakers until they find a normal /// pending receiver in the `Waiting` state. /// /// # Return Value: /// /// Returns `Ok(T)` on success. /// /// Returns Err([TryRecvError::Empty]) for a `Poll::Pending` case. /// The next time the channel is not empty, your future will be woken again. /// You should then continue calling `poll_item()` to receive the message. /// If you want to cancel, just don't call `poll_item()` again. Others will still have a chance /// to receive messages. /// /// Returns Err([TryRecvError::Disconnected]) if all `Tx` have been dropped and the channel is empty. #[inline] pub fn poll_item(&mut self, ctx: &mut Context) -> Poll> { match self.rx.poll_item::(ctx, &mut self.waker) { Ok(item) => Poll::Ready(Some(item)), Err(e) => { if e.is_empty() { return Poll::Pending; } self.ended = true; Poll::Ready(None) } } } } impl Deref for AsyncStream { type Target = AsyncRx; #[inline] fn deref(&self) -> &Self::Target { &self.rx } } impl stream::Stream for AsyncStream { type Item = F::Item; #[inline(always)] fn poll_next(self: Pin<&mut Self>, ctx: &mut Context) -> Poll> { let mut _self = self.get_mut(); if _self.ended { return Poll::Ready(None); } match _self.rx.poll_item::(ctx, &mut _self.waker) { Ok(item) => Poll::Ready(Some(item)), Err(e) => { if e.is_empty() { return Poll::Pending; } _self.ended = true; Poll::Ready(None) } } } } impl stream::FusedStream for AsyncStream { fn is_terminated(&self) -> bool { self.ended } } impl Drop for AsyncStream { fn drop(&mut self) { if let Some(waker) = self.waker.as_ref() { self.rx.shared.abandon_recv_waker(waker); } } } impl From> for AsyncStream { #[inline] fn from(rx: AsyncRx) -> Self { rx.into_stream() } } impl From> for AsyncStream { #[inline] fn from(rx: MAsyncRx) -> Self { rx.into_stream() } } ================================================ FILE: src/waitgroup.rs ================================================ //! This module provides two waitgroup implementation, works in blocking & async context. //! The implementation is low-cost ref-counting (counter and waker state is packed inside one atomic), the max value //! is (1 << (usize::BITS - 2) - 2) //! //! - [WaitGroupInline]: Which embedded inline with its parent structure (with no dereference cost) //! - (It requires its parent can be accessed by multi thread, for deep embedded scenario) //! - Threshold is const //! - Requires manual ref count manage, ([done()](WaitGroupInline::done) [done_many()](WaitGroupInline::done_many) is unsafe). //! - only one waiter thread is allowed. ([wait()](WaitGroupInline::wait), //! [wait_async()](WaitGroupInline::wait_async) is unsafe) //! //! - [WaitGroup]: which is a safe RAII guard API. //! - Its a referenced counted container, optional state inside may be shared between the threads of WaitGroup and its guards. //! - Only one waiter is allowed. (`WaitGroup` is `!Sync`) //! - Use [WaitGroup::add_guard()] to get [WaitGroupGuard]. //! - [WaitGroupGuard] has `Clone` (Although `WaitGroup` can not `Clone`) //! - [WaitGroupGuard] drop will decrease ref and protentially wake the main thread. //! - Can change threshold at any time. //! - **NOTE**: threshold is carried inside generated [WaitGroupGuard] to minimize the cost of atomic ops. //! When changing threshold to larger value, wait() might not wake up as soon as new threshold reached. //! //! # Safety //! //! [WaitGroup] does not have `Sync` marker, because it's not safe to concurrently wait, due to only one slot reserved for waker. //! If you know what you are doing when put it inside other struct, use unsafe impl on its parent //! struct. //! //! ``` //! use crossfire::waitgroup::WaitGroup; //! use std::sync::Arc; //! pub struct Parent { //! wg: WaitGroup<()>, //! } //! // allow parent to have Sync marker for Arc //! unsafe impl Sync for Parent {} //! //! let _parent = Arc::new(Parent{ //! wg: WaitGroup::new((), 0), //! }); //! ``` //! //! # Examples //! //! **Blocking Example: Concurrency Limiter** //! //! This example simulates a task scheduler that uses a `WaitGroup` to limit //! the number of concurrently running tasks to a specific watermark. //! It also uses the generic `T` to carry a shared state (e.g. `AtomicBool`) //! //! ``` //! use crossfire::waitgroup::WaitGroup; //! use std::thread; //! use std::time::Duration; //! use std::sync::atomic::{AtomicBool, Ordering}; //! //! const MAX_CONCURRENT_TASKS: usize = 4; //! const TOTAL_TASKS: usize = 10; //! //! // Initialize WaitGroup with a threshold of N-1. //! // `wait()` will block when the number of running tasks is >= N. //! // The `AtomicBool` is used to track if any task failed. //! let mut wg = WaitGroup::::new(AtomicBool::new(true), MAX_CONCURRENT_TASKS - 1); //! //! // Use a simple for loop to spawn a total of 10 tasks. //! for i in 0..TOTAL_TASKS { //! // `wait()` blocks until `wg.get_left() < MAX_CONCURRENT_TASKS`. //! // This effectively waits for a slot to become available. //! wg.wait(); //! // A slot is available, spawn a new task. //! let guard = wg.add_guard(); //! thread::spawn(move || { //! thread::sleep(Duration::from_millis(100)); //! // do some work //! if i == 5 { //! // Notify failure //! guard.store(false, Ordering::SeqCst); //! } //! drop(guard); //! }); //! } //! // After spawning all tasks, wait for the remaining running tasks to finish. //! // Set threshold to 0 to wait until all guards are dropped. //! wg.set_threshold(0); //! wg.wait(); //! //! assert_eq!(wg.get_left_seqcst(), 0); //! assert_eq!(wg.load(Ordering::SeqCst), false); //! ``` //! //! **Async Example** //! //! This example demonstrate task and sub-task, dynamic increase ref count by cloning WaitGroupGuard. //! //! ``` //! use crossfire::waitgroup::WaitGroup; //! use std::time::Duration; //! //! #[tokio::test] //! async fn wait_group_async_example() { //! let wg = WaitGroup::new((), 0); //! for _j in 0..4 { //! // Create a guard for the manager task. //! let parent_guard = wg.add_guard(); //! tokio::spawn(async move { //! // This manager task will spawn 2 workers. //! for i in 0..2 { //! let child_guard = parent_guard.clone(); //! tokio::spawn(async move { //! // Do some work... //! tokio::time::sleep(Duration::from_millis(50 * (i + 1))).await; //! // worker_guard is dropped here. //! }); //! } //! // The manager's work is to spawn workers, //! // so it drops its own guard after the loop. //! drop(manager_guard); //! }); //! } //! // Wait until the manager guard and all its clones are dropped. //! wg.wait_async().await; //! assert_eq!(wg.get_left_seqcst(), 0); //! } //! ``` use crate::backoff::Backoff; use crate::shared::{check_timeout, ThinWaker}; #[allow(unused_imports)] use crate::{tokio_task_id, trace_log}; use std::cell::UnsafeCell; use std::future::Future; use std::mem::transmute; use std::ops::Deref; use std::pin::Pin; use std::ptr::NonNull; use std::sync::atomic::{ AtomicUsize, Ordering::{self, Acquire, Relaxed, Release, SeqCst}, }; use std::task::{Context, Poll, Waker}; use std::thread; use std::time::{Duration, Instant}; /// An unsafe version WaitGroup which does not allocate, and not dereference cost, must embedded in a shared parent structure. /// /// # Limitation /// /// - THRESHOLD is const, default to zero /// - Only one thread / coroutine to wait, all wait_XXX() function is unsafe. /// - done() is unsafe. /// - Also provide add_many() done_many(). pub struct WaitGroupInline { inner: WaitGroupInner<()>, } impl WaitGroupInline { pub fn new() -> Self { // the inline version don't need its ref to represent ownership Self { inner: WaitGroupInner::new((), 0) } } /// load total reference count of `WaitGroupGuard` with SeqCst #[inline(always)] pub fn get_left_seqcst(&self) -> usize { self.inner.count(SeqCst) } /// Return total reference count of `WaitGroupGuard` with Acquire #[inline(always)] pub fn get_left(&self) -> usize { self.inner.count(Acquire) } /// Add one count to the WaitGroup #[inline(always)] pub fn add(&self) { self.inner.add(1); } /// Add multiple count to the WaitGroup #[inline(always)] pub fn add_many(&self, count: usize) { debug_assert!(count < COUNT_MASK - 2); self.inner.add(count); } /// Decrease one count, if it reduced to zero, will waking the waiter thread. /// /// Return true when zero has been reached /// /// # Safety /// /// You have to be careful for underflow, which will panic pub unsafe fn done(&self) -> bool { let p = &self.inner as *const WaitGroupInner<()>; WaitGroupInner::<()>::done::(p, 1, THRESHOLD) } /// Decrease multiple count, if it reduced to zero, will waking the waiter thread. /// /// Return true when zero has been reached /// /// # Safety /// /// You have to be careful for underflow, which will panic pub unsafe fn done_many(&self, count: usize) -> bool { debug_assert!(count < COUNT_MASK - 2); let p = &self.inner as *const WaitGroupInner<()>; WaitGroupInner::<()>::done::(p, count, THRESHOLD) } /// If the ref count reaches zero, return `Ok(())`, otherwise `Err(())` #[inline] pub fn try_wait(&self) -> Result<(), ()> { // one ref owned by mysql if self.inner.count(SeqCst) <= THRESHOLD { Ok(()) } else { Err(()) } } /// Block current coroutine until count drop below threshold. /// /// # Safety /// /// Only one thread is allow to wait #[inline] pub unsafe fn wait_async<'a>(&'a self) -> WaitGroupFuture<'a, ()> { WaitGroupFuture { inner: &self.inner, threshold: THRESHOLD, waker: None } } /// Block current coroutine until count drop below threshold, or until timeout happens /// /// # Safety /// /// Only one thread is allow to wait #[cfg(feature = "tokio")] #[cfg_attr(docsrs, doc(cfg(feature = "tokio")))] #[inline] pub unsafe fn wait_async_timeout<'a>( &'a self, timeout: Duration, ) -> WaitGroupTimeoutFuture<'a, (), tokio::time::Sleep, ()> { let sleep = tokio::time::sleep(timeout); self.wait_async_with_timer(sleep) } /// Block current coroutine until count drop below threshold, or until timeout happens /// /// # Safety /// /// Only one thread is allow to wait #[cfg(feature = "async_std")] #[cfg_attr(docsrs, doc(cfg(feature = "async_std")))] #[inline] pub unsafe fn wait_async_timeout<'a>( &'a self, timeout: Duration, ) -> WaitGroupTimeoutFuture<'a, (), impl Future, ()> { let sleep = async_std::task::sleep(timeout); self.wait_async_with_timer(sleep) } /// Block current coroutine until count drop below threshold, with a custom sleep / or cancel function /// /// # Safety /// /// Only one thread is allow to wait #[inline] pub unsafe fn wait_async_with_timer<'a, FR, R>( &'a self, fut: FR, ) -> WaitGroupTimeoutFuture<'a, (), FR, R> where FR: Future, { WaitGroupTimeoutFuture { inner: &self.inner, threshold: THRESHOLD, sleep: fut, waker: None } } /// Blocking current thread and Wait until count drop below threshold. /// /// # Safety /// /// Only one thread is allow to wait #[inline] pub unsafe fn wait(&self) { let _ = self.inner.wait_blocking(None, THRESHOLD); } /// Blocking current thread and Wait until count drop below threshold, or until timeout /// /// # Safety /// /// Only one thread is allow to wait #[inline] pub unsafe fn wait_timeout(&self, timeout: Duration) -> Result<(), ()> { self.inner.wait_blocking(Some(Instant::now() + timeout), THRESHOLD) } } /// A WaitGroup implementation allows custom threshold (>=0), works in blocking & async context. /// /// Features: /// - Only one waiter, concurrent ref count. /// - Carry optional state inside, shared between the main thread and WaitGroupGuard, just like Arc. /// - Change threshold at any time. /// - **NOTE**: /// threshold is carried inside generated [WaitGroupGuard] to minimize the cost of atomic ops. /// When changing threshold to larger value, wait() might not wake up as soon as new threshold reached. /// - Low-cost create and drop, because reference count and waker state is packed inside one atomic. /// - WaitGroupGuard dropping is wait-free, which decrease ref count with SeqCst CAS. /// - Max reference count to (1 << (usize::BITS - 2) - 2) /// /// You don't need to put WaitGroup into Arc, use [WaitGroup::add_guard()] to get `WaitGroupGuard`. /// It's ok to clone [WaitGroupGuard], which will increase internal ref count. /// /// # Safety /// /// It's not safe to concurrently wait, so it does not have `Sync` marker. /// If you know what you are doing when put it inside other struct, use unsafe impl. /// /// See module level [doc](crate::waitgroup) for example. pub struct WaitGroup { threshold: usize, inner: NonNull>, // Remove the Sync marker to prevent concurrent waiting } unsafe impl Send for WaitGroup {} impl WaitGroup { #[inline(always)] pub fn new(inner: T, threshold: usize) -> Self { // need one ref to represent ownership let inner = Box::new(WaitGroupInner::new(inner, 1)); Self { // one ref owned by myself threshold: threshold + 1, inner: unsafe { NonNull::new_unchecked(Box::into_raw(inner)) }, } } /// Threshold can be changed on the fly, which only affect the next `wait()`. /// /// # Safety /// /// Previous threshold is carried inside generated `WaitGroupGuard`. /// When changing threshold to larger value, wait() might not wake up as soon as new threshold reached. #[inline] pub fn set_threshold(&mut self, threshold: usize) { // one ref owned by myself self.threshold = threshold + 1; } #[inline(always)] fn get_inner(&self) -> &WaitGroupInner { unsafe { self.inner.as_ref() } } /// load total reference count of `WaitGroupGuard` with SeqCst #[inline(always)] pub fn get_left_seqcst(&self) -> usize { // minus my own ref self.get_inner().count(SeqCst) - 1 } /// Return total reference count of `WaitGroupGuard` with Acquire #[inline(always)] pub fn get_left(&self) -> usize { // minus my own ref self.get_inner().count(Acquire) - 1 } /// Add one ref count to the WaitGroup, return a guard to decrease the count on drop. #[inline(always)] pub fn add_guard(&self) -> WaitGroupGuard { self.get_inner().add(1); WaitGroupGuard { inner: self.inner, threshold: self.threshold } } /// If the ref count is below threshold, return `Ok(())`, otherwise `Err(())` #[inline] pub fn try_wait(&self) -> Result<(), ()> { // one ref owned by mysql if self.get_inner().count(SeqCst) <= self.threshold { Ok(()) } else { Err(()) } } /// Block current coroutine until count drop below threshold. /// /// # Safety /// /// Only one thread is allow to wait #[inline] pub fn wait_async<'a>(&'a self) -> WaitGroupFuture<'a, T> where T: Send + Unpin, { let inner = self.get_inner(); WaitGroupFuture { inner, threshold: self.threshold, waker: None } } /// Block current coroutine until count drop below threshold, or until timeout happens /// /// # Safety /// /// Only one thread is allow to wait #[cfg(feature = "tokio")] #[cfg_attr(docsrs, doc(cfg(feature = "tokio")))] #[inline] pub fn wait_async_timeout<'a>( &'a self, timeout: Duration, ) -> WaitGroupTimeoutFuture<'a, T, tokio::time::Sleep, ()> where T: Send + Unpin, { let sleep = tokio::time::sleep(timeout); self.wait_async_with_timer(sleep) } /// Block current coroutine until count drop below threshold, or until timeout happens /// /// # Safety /// /// Only one thread is allow to wait #[cfg(feature = "async_std")] #[cfg_attr(docsrs, doc(cfg(feature = "async_std")))] #[inline] pub fn wait_async_timeout<'a>( &'a self, timeout: Duration, ) -> WaitGroupTimeoutFuture<'a, T, impl Future, ()> where T: Send + Unpin, { let sleep = async_std::task::sleep(timeout); self.wait_async_with_timer(sleep) } /// Block current coroutine until count drop below threshold, with a custom sleep / or cancel function /// /// # Safety /// /// Only one thread is allow to wait #[inline] pub fn wait_async_with_timer<'a, FR, R>( &'a self, fut: FR, ) -> WaitGroupTimeoutFuture<'a, T, FR, R> where FR: Future, T: Send + Unpin, { let inner = self.get_inner(); WaitGroupTimeoutFuture { inner, threshold: self.threshold, sleep: fut, waker: None } } /// Blocking current thread and Wait until count drop below threshold. /// /// # Safety /// /// Only one thread is allow to wait #[inline] pub fn wait(&self) { let _ = self.get_inner().wait_blocking(None, self.threshold); } /// Blocking current thread and Wait until count drop below threshold, or until timeout /// /// # Safety /// /// Only one thread is allow to wait #[inline] pub fn wait_timeout(&self, timeout: Duration) -> Result<(), ()> { self.get_inner().wait_blocking(Some(Instant::now() + timeout), self.threshold) } } impl Drop for WaitGroup { #[inline] fn drop(&mut self) { unsafe { WaitGroupInner::destroy(self.inner); } } } impl Deref for WaitGroup { type Target = T; #[inline] fn deref(&self) -> &T { &unsafe { self.inner.as_ref() }.inner } } /// An RAII implementation got represent ref count in WaitGroup. /// /// When cloning WaitGroupGuard, which will increase the ref count in WaitGroup. /// /// WaitGroupGuard dropping is wait-free, which decrease ref count with SeqCst CAS. /// will wake up the waiter once ref count decrease below threshold. /// /// **NOTE**: Threshold is carried inside as non-atomic, not syned with the main thread for /// efficiency. But it's sufficient for most scenario. /// pub struct WaitGroupGuard { inner: NonNull>, threshold: usize, } unsafe impl Send for WaitGroupGuard {} unsafe impl Sync for WaitGroupGuard {} impl Drop for WaitGroupGuard { #[inline(always)] fn drop(&mut self) { unsafe { WaitGroupInner::done_ptr(self.inner, 1, self.threshold); } } } impl Clone for WaitGroupGuard { #[inline] fn clone(&self) -> Self { let inner = unsafe { self.inner.as_ref() }; inner.add(1); Self { inner: self.inner, threshold: self.threshold } } } impl Deref for WaitGroupGuard { type Target = T; #[inline] fn deref(&self) -> &T { &unsafe { self.inner.as_ref() }.inner } } struct WaitGroupInner { /// Refer to the doc of State state: AtomicUsize, o_waker: UnsafeCell>, inner: T, } unsafe impl Sync for WaitGroupInner {} impl WaitGroupInner { #[inline(always)] fn new(inner: T, init_count: usize) -> Self { Self { state: AtomicUsize::new(init_count), o_waker: UnsafeCell::new(None), inner } } #[inline] fn count(&self, order: Ordering) -> usize { self.state.load(order) & COUNT_MASK } #[inline(always)] fn get_waker(&self) -> &mut Option { unsafe { transmute(self.o_waker.get()) } } #[inline] fn add(&self, count: usize) { let old_state = self.state.fetch_add(count, Relaxed); if State::new(old_state).count() >= COUNT_MASK - 2 { panic!("WaitGroup count overflowed"); } } #[inline] unsafe fn destroy(p: NonNull) -> bool { let this = unsafe { p.as_ref() }; let mut state = this.state.load(SeqCst); loop { let s = State::new(state); if s.is_locked() || s.count() > 1 { if let Err(_state) = this.state.compare_exchange_weak(state, state - 1, SeqCst, Acquire) { state = _state; continue; } trace_log!("wg:({:?}) drop delay state={}", tokio_task_id!(), state - 1); return false; } { trace_log!("wg:({:?}) drop", tokio_task_id!()); let _ = unsafe { Box::from_raw(p.as_ptr()) }; return true; } } } #[inline(always)] unsafe fn done_ptr(p: NonNull, count: usize, threshold: usize) -> bool { let _p = p.as_ptr(); if Self::done::(_p, count, threshold) { let _ = unsafe { Box::from_raw(_p) }; return true; } else { false } } /// return true to allow drop #[inline] fn done(this: *const Self, count: usize, threshold: usize) -> bool { trace_log!("wg:({:?}) enter done {count} {threshold}", tokio_task_id!()); unsafe { let mut state = (*this).state.load(Relaxed); loop { let mut s = State::new(state); if OWNER_SHIP && s.is_last(count) { // in case non SeqCst read old value, double check with SeqCst let _state = (*this).state.load(SeqCst); if _state == state { trace_log!("wg:({:?}) done drop {count} {threshold}", tokio_task_id!()); return true; } state = _state; continue; } // NOTE: When flag == WAKER_FLAG_LOCK, means one other thread is reading the waker, // we just try to decrease the count, but we should not drop it even ref reach 0 let try_lock = s.try_done(count, threshold); if try_lock { debug_assert!(s.is_locked()); } match (*this).state.compare_exchange_weak(state, s.to_usize(), SeqCst, Acquire) { Ok(_) => { if try_lock { let o_waker = (*this).get_waker().take(); // Probably the last chance to check state, should use SeqCst to unlock. // ref count may reach 0, means I'm the last one. if OWNER_SHIP { let old = (*this).state.fetch_and(!WAKER_FLAG_MASK, SeqCst); if old & COUNT_MASK == 0 { trace_log!( "wg:({:?}) done locked drop cur {count} = 0", tokio_task_id!(), ); // Safety: we had the lock, won't be others change the waker, // we are the last one, don't need to actually wake, just destroy. return true; } } else { (*this).state.fetch_and(!WAKER_FLAG_MASK, Release); } if let Some(waker) = o_waker { trace_log!( "wg:({:?}) done waked {count} -> {} <= {threshold}", tokio_task_id!(), s.count() ); waker.wake(); } } else { trace_log!("wg:({:?}) done {count} -> {}", tokio_task_id!(), s.count()); } return false; } Err(cur) => { state = cur; } } } } } /// may_skip = true, for blocking context does not need to overwrite waker #[inline] fn try_set_waker(&self, waker: ThinWaker, threshold: usize, may_skip: bool) -> Result<(), ()> { let mut state = self.state.load(SeqCst); loop { let s = State::new(state); if s.count() <= threshold { // Safety: because of this, use SeqCst to prevent reading old value return Err(()); } else if s.is_locked() { // done() is waking std::hint::spin_loop(); state = self.state.load(Acquire); trace_log!("wg:({:?}) set_waker try again", tokio_task_id!()); continue; } let old_state = if s.has_waker() { if may_skip { trace_log!("wg:({:?}) set_waker skip", tokio_task_id!()); return Ok(()); } // waker exist, first try lock, then replace if let Err(s) = self.state.compare_exchange_weak(state, s.try_lock(), SeqCst, Acquire) { state = s; continue; } self.get_waker().replace(waker); trace_log!("wg:({:?}) set_waker replaced", tokio_task_id!()); // clear WAKER_FLAG_LOCK and set WAKER_FLAG_SET self.state.fetch_xor(WAKER_FLAG_MASK, SeqCst) } else { self.get_waker().replace(waker); trace_log!("wg:({:?}) set_waker ok", tokio_task_id!()); self.state.fetch_or(WAKER_FLAG_SET, SeqCst) }; if State::new(old_state).count() <= threshold { return Err(()); } return Ok(()); } } #[inline] fn wait_blocking(&self, deadline: Option, threshold: usize) -> Result<(), ()> { macro_rules! check { ($order: expr) => { let cur = self.count($order); if cur <= threshold { trace_log!("wg:({:?}) check {cur} <= {threshold}", tokio_task_id!()); return Ok(()); } trace_log!("wg:({:?}) check {cur} > {threshold}", tokio_task_id!()); }; } check!(Acquire); let mut backoff = Backoff::new(); let mut set_waker = false; loop { let r = backoff.snooze(); check!(Acquire); if r { let waker = ThinWaker::Blocking(thread::current()); if self.try_set_waker(waker, threshold, set_waker).is_err() { return Ok(()); } else { set_waker = true; } match check_timeout(deadline) { Ok(None) => thread::park(), Ok(Some(dur)) => thread::park_timeout(dur), Err(_) => { return Err(()); } } backoff.reset(); } } } #[inline] fn poll_async( &self, ctx: &mut Context, o_waker: &mut Option, threshold: usize, ) -> Poll<()> { macro_rules! check { ($order: expr) => {{ let s = State::new(self.state.load($order)); let cur = s.count(); if cur <= threshold { trace_log!("wg:({:?}) READY check {cur} <= {threshold}", tokio_task_id!()); return Poll::Ready(()); } trace_log!("wg:({:?}) check {cur} > {threshold}", tokio_task_id!()); s.has_waker() }}; } let has_waker = check!(Acquire); let new_waker = ctx.waker(); if has_waker { #[allow(clippy::needless_else)] if let Some(old_waker) = o_waker { if old_waker.will_wake(new_waker) { trace_log!("wg:({:?}) will_wake=true", tokio_task_id!()); check!(SeqCst); trace_log!("wg:({:?}) PENDING", tokio_task_id!()); return Poll::Pending; } else { trace_log!("wg:({:?}) waker will_wake=false", tokio_task_id!()) } } } if self.try_set_waker(ThinWaker::Async(new_waker.clone()), threshold, false).is_err() { trace_log!("wg:({:?}) READY during set_waker", tokio_task_id!()); Poll::Ready(()) } else { o_waker.replace(new_waker.clone()); trace_log!("wg:({:?}) PENDING", tokio_task_id!()); Poll::Pending } } } #[must_use] pub struct WaitGroupFuture<'a, T> { inner: &'a WaitGroupInner, threshold: usize, waker: Option, } impl<'a, T> Future for WaitGroupFuture<'a, T> where T: Send + Unpin, { type Output = (); fn poll(self: Pin<&mut Self>, ctx: &mut Context) -> Poll { let this = unsafe { self.get_unchecked_mut() }; this.inner.poll_async(ctx, &mut this.waker, this.threshold) } } /// Wait until the ref count is below threshold, return `Ok(())`. /// If timeout happens returns `Err(())` #[must_use] pub struct WaitGroupTimeoutFuture<'a, T, FR, R> where FR: Future, T: Send + Unpin, { inner: &'a WaitGroupInner, sleep: FR, threshold: usize, waker: Option, } impl<'a, T, FR, R> Future for WaitGroupTimeoutFuture<'a, T, FR, R> where FR: Future, T: Send + Unpin, { type Output = Result<(), ()>; fn poll(self: Pin<&mut Self>, ctx: &mut Context) -> Poll { let this = unsafe { self.get_unchecked_mut() }; if this.inner.poll_async(ctx, &mut this.waker, this.threshold).is_ready() { return Poll::Ready(Ok(())); } let sleep = unsafe { Pin::new_unchecked(&mut this.sleep) }; if sleep.poll(ctx).is_ready() { Poll::Ready(Err(())) } else { Poll::Pending } } } const WAKER_FLAG_SET: usize = 1 << (usize::BITS - 1); const WAKER_FLAG_LOCK: usize = 1 << (usize::BITS - 2); const WAKER_FLAG_MASK: usize = WAKER_FLAG_SET | WAKER_FLAG_LOCK; const COUNT_MASK: usize = !WAKER_FLAG_MASK; /// The 2 highest bit is WAKER_FLAG_SET | WAKER_FLAG_LOCK, they are exclusive, so there're 3 /// states: /// - 0: waker is not set /// - WAKER_FLAG_SET: there's a waker, some one might be waiting, it's possible to give up waiting /// when threshold is reached /// - WAKER_FLAG_LOCK: there's one thread is reading the waker, when he is done, should reset the /// state to 0. /// /// ref count: /// - the lower bits is for ref count. When initial to be 1. /// - The WaitGroup can be drop early, leaving the WaitGroupGuard holders to drop the count. /// - when the last holder drop the count to 0, is responsible to free the memory, with the following exception: /// - NOTE that When WAKER_FLAG_LOCK is set, not allow to free the memory even count reach /// 0, the last one release the lock is responsible to free the memory struct State(usize); impl State { #[inline(always)] fn new(state: usize) -> Self { Self(state) } #[inline(always)] fn count(&self) -> usize { self.0 & COUNT_MASK } #[inline(always)] fn waker_flag(&self) -> usize { self.0 & WAKER_FLAG_MASK } #[inline(always)] fn is_locked(&self) -> bool { self.0 & WAKER_FLAG_LOCK > 0 } #[inline(always)] fn has_waker(&self) -> bool { self.0 & WAKER_FLAG_SET > 0 } #[inline(always)] fn try_lock(&self) -> usize { self.count() | WAKER_FLAG_LOCK } /// When no one lock and I'm the last one, can drop directly, return true #[inline] fn is_last(&self, delta: usize) -> bool { let waker_flag = self.waker_flag(); waker_flag != WAKER_FLAG_LOCK && self.count() == delta } /// # Return value: /// - should_lock==true: when reach threshold, should dec count and try_lock. /// - should_lock==false: just decrease count. #[inline(always)] fn try_done(&mut self, delta: usize, threshold: usize) -> bool { let waker_flag = self.waker_flag(); let old_count = self.count(); let new_count = if old_count >= delta { old_count - delta } else { panic!("underflow detected {} < {}", old_count, delta); }; let try_lock = new_count <= threshold && waker_flag == WAKER_FLAG_SET; if try_lock { self.0 = WAKER_FLAG_LOCK | new_count; true } else { self.0 = waker_flag | new_count; false } } #[inline(always)] #[allow(clippy::wrong_self_convention)] fn to_usize(&self) -> usize { self.0 } } #[cfg(test)] mod tests { use super::*; use captains_log::{recipe, ConsoleTarget, Level}; use std::thread; #[test] fn test_waitgroup_inner_count() { let wg = WaitGroup::new((), 0); assert_eq!(wg.get_left_seqcst(), 0); let guard1 = wg.add_guard(); assert_eq!(wg.get_left_seqcst(), 1); let guard2 = wg.add_guard(); assert_eq!(wg.get_left_seqcst(), 2); drop(guard1); assert_eq!(wg.get_left_seqcst(), 1); drop(guard2); assert_eq!(wg.get_left_seqcst(), 0); } #[test] fn test_waitgroup_state() { assert_eq!(State::new(2).count(), 2); assert!(State::new(2 | WAKER_FLAG_SET).has_waker()); assert!(!State::new(2 | WAKER_FLAG_SET).is_locked()); assert!(!State::new(2 | WAKER_FLAG_LOCK).has_waker()); assert!(State::new(2 | WAKER_FLAG_LOCK).is_locked()); let mut s = State::new(2); // no waker assert_eq!(s.try_done(1, 1), false); assert!(!s.is_locked()); assert_eq!(s.count(), 1); // threshold is ignore, just drop assert!(s.is_last(1)); // state don't need to change assert_eq!(s.count(), 1); // WAKER_FLAG_SET ( 3-1 <=2 )-> WAKER_FLAG_LOCK let mut s = State::new(3 | WAKER_FLAG_SET); assert!(!s.is_last(1)); assert_eq!(s.try_done(1, 2), true); assert!(s.is_locked()); assert!(!s.has_waker()); assert_eq!(s.count(), 2); // WAKER_FLAG_LOCK -> dec assert_eq!(s.try_done(1, 0), false); assert!(s.is_locked()); assert_eq!(s.count(), 1); // WAKER_FLAG_LOCK -> no waker let _s = s.0 & (!WAKER_FLAG_MASK); assert_eq!(_s, 1); // WAKER_FLAG_LOCK exist, don't drop, just dec assert_eq!(s.try_done(1, 0), false); assert_eq!(s.count(), 0); } #[test] fn test_waitgroup_ptr() { recipe::console_logger(ConsoleTarget::Stdout, Level::Trace).test().build().expect("log"); let inner = Box::new(WaitGroupInner::new((), 1)); assert_eq!(inner.count(SeqCst), 1); assert_eq!(State::new(inner.state.load(Ordering::SeqCst)).waker_flag(), 0); println!("test try_set_waker met threshold reach"); assert_eq!(inner.try_set_waker(ThinWaker::Blocking(thread::current()), 1, false), Err(())); inner.add(1); assert_eq!(inner.count(SeqCst), 2); println!("test try_set_waker ok"); assert!(inner.try_set_waker(ThinWaker::Blocking(thread::current()), 1, false).is_ok()); let s = State::new(inner.state.load(Ordering::SeqCst)); assert_eq!(s.waker_flag(), WAKER_FLAG_SET, "s {}, {}", s.is_locked(), s.has_waker()); println!("test try_set_waker again skip"); assert!(inner.try_set_waker(ThinWaker::Blocking(thread::current()), 1, true).is_ok()); let s = State::new(inner.state.load(Ordering::SeqCst)); assert_eq!(s.waker_flag(), WAKER_FLAG_SET); println!("test try_set_waker again force"); assert!(inner.try_set_waker(ThinWaker::Blocking(thread::current()), 1, false).is_ok()); let s = State::new(inner.state.load(Ordering::SeqCst)); assert_eq!(s.waker_flag(), WAKER_FLAG_SET); assert_eq!(inner.count(SeqCst), 2); let p = unsafe { NonNull::new_unchecked(Box::into_raw(inner)) }; println!("test done triggering wakeup"); unsafe { assert!(!WaitGroupInner::done_ptr(p, 1, 1)); { let inner = p.as_ref(); assert_eq!(inner.count(SeqCst), 1); let s = State::new(inner.state.load(Ordering::SeqCst)); assert_eq!(s.waker_flag(), 0); } println!("test done triggering drop"); assert!(WaitGroupInner::done_ptr(p, 1, 0)); } } #[test] fn test_waitgroup_inner() { recipe::console_logger(ConsoleTarget::Stdout, Level::Trace).test().build().expect("log"); let inner = WaitGroupInner::new((), 1); assert_eq!(inner.count(SeqCst), 1); assert_eq!(State::new(inner.state.load(Ordering::SeqCst)).waker_flag(), 0); println!("test try_set_waker met threshold reach"); assert_eq!(inner.try_set_waker(ThinWaker::Blocking(thread::current()), 1, false), Err(())); inner.add(1); assert_eq!(inner.count(SeqCst), 2); println!("test try_set_waker ok"); assert!(inner.try_set_waker(ThinWaker::Blocking(thread::current()), 1, false).is_ok()); let s = State::new(inner.state.load(Ordering::SeqCst)); assert_eq!(s.waker_flag(), WAKER_FLAG_SET, "s {}, {}", s.is_locked(), s.has_waker()); println!("test try_set_waker again skip"); assert!(inner.try_set_waker(ThinWaker::Blocking(thread::current()), 1, true).is_ok()); let s = State::new(inner.state.load(Ordering::SeqCst)); assert_eq!(s.waker_flag(), WAKER_FLAG_SET); println!("test try_set_waker again force"); assert!(inner.try_set_waker(ThinWaker::Blocking(thread::current()), 1, false).is_ok()); let s = State::new(inner.state.load(Ordering::SeqCst)); assert_eq!(s.waker_flag(), WAKER_FLAG_SET); assert_eq!(inner.count(SeqCst), 2); let p = &inner as *const WaitGroupInner<()>; println!("test done triggering wakeup"); assert!(!WaitGroupInner::<()>::done::(p, 1, 1)); { assert_eq!(inner.count(SeqCst), 1); let s = State::new(inner.state.load(Ordering::SeqCst)); assert_eq!(s.waker_flag(), 0); } println!("test done last"); WaitGroupInner::<()>::done::(p, 1, 0); assert_eq!(inner.count(Ordering::SeqCst), 0) } } ================================================ FILE: src/waker.rs ================================================ use crate::collections::ArcCell; use crate::flavor::FlavorImpl; use std::cell::UnsafeCell; use std::fmt; use std::ops::Deref; use std::sync::{ atomic::{AtomicU32, AtomicU8, Ordering}, Arc, Weak, }; use std::task::*; use std::thread; #[derive(Debug, Clone, Copy, PartialEq)] #[repr(u8)] pub enum WakerState { Init = 0, // A temporary state, https://github.com/frostyplanet/crossfire-rs/issues/22 Waiting = 1, //Copy = 2, // Omit due to skipping direct copy on async or with deadline Woken = 3, Closed = 4, // Channel closed, or timeout cancellation Done = 5, } #[derive(PartialEq, Debug, Clone, Copy)] #[repr(u8)] pub enum WakeResult { Woken = 0x1, // Woken, stop iteration Sent = 0x3, // Woken with message direct copied Next = 0x2, // Woken, but have to continued for more iteration Skip = 0x4, // Waker Cancelled or Done } impl WakeResult { #[inline(always)] pub fn is_done(&self) -> bool { (*self as u8) & (WakeResult::Woken as u8) > 0 } } /// Although removing direct copy feature of the payload pointer is not used, /// leave it to unbuffer channel in the future pub struct ArcWaker

(Arc>); impl

fmt::Debug for ArcWaker

{ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.0.fmt(f) } } impl

fmt::Debug for WakerInner

{ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "waker({})", self.get_seq()) } } impl

Deref for ArcWaker

{ type Target = WakerInner

; #[inline] fn deref(&self) -> &Self::Target { self.0.as_ref() } } impl

ArcWaker

{ #[inline(always)] pub fn new_async(ctx: &Context, payload: P) -> Self { Self(Arc::new(WakerInner { seq: AtomicU32::new(0), state: AtomicU8::new(WakerState::Init as u8), waker: UnsafeCell::new(ThinWaker::Async(ctx.waker().clone())), payload: UnsafeCell::new(payload), })) } #[inline(always)] pub fn new_blocking(payload: P) -> Self { Self(Arc::new(WakerInner { seq: AtomicU32::new(0), state: AtomicU8::new(WakerState::Init as u8), waker: UnsafeCell::new(ThinWaker::Blocking(thread::current())), payload: UnsafeCell::new(payload), })) } } impl

ArcWaker

{ #[inline(always)] pub fn from_arc(inner: Arc>) -> Self { Self(inner) } #[allow(clippy::wrong_self_convention)] #[inline(always)] pub fn to_arc(self) -> Arc> { self.0 } #[inline(always)] pub fn weak(&self) -> Weak> { Arc::downgrade(&self.0) } } #[derive(Debug)] pub(crate) enum ThinWaker { Async(Waker), Blocking(thread::Thread), } impl ThinWaker { #[inline(always)] pub fn wake_by_ref(&self) { match self { Self::Async(w) => w.wake_by_ref(), Self::Blocking(th) => th.unpark(), } } #[allow(dead_code)] #[inline(always)] pub fn wake(self) { match self { Self::Async(w) => w.wake(), Self::Blocking(th) => th.unpark(), } } #[inline(always)] pub fn will_wake(&self, ctx: &mut Context) -> bool { // ref: https://github.com/frostyplanet/crossfire-rs/issues/14 // https://docs.rs/tokio/latest/tokio/runtime/index.html#:~:text=Normally%2C%20tasks%20are%20scheduled%20only,is%20called%20a%20spurious%20wakeup // There might be situation like spurious wakeup, poll() again under no waking up ever // happened, waker still exists in registry but cannot be used to wake the current future. if let Self::Async(_waker) = self { _waker.will_wake(ctx.waker()) } else { unreachable!(); } } } pub struct WakerInner

{ state: AtomicU8, seq: AtomicU32, waker: UnsafeCell, #[allow(dead_code)] payload: UnsafeCell

, } unsafe impl

Send for WakerInner

{} unsafe impl

Sync for WakerInner

{} impl

WakerInner

{ #[inline(always)] fn get_waker(&self) -> &ThinWaker { unsafe { &*self.waker.get() } } #[inline(always)] fn get_waker_mut(&self) -> &mut ThinWaker { unsafe { &mut *self.waker.get() } } #[inline(always)] fn get_payload_mut(&self) -> &mut P { unsafe { &mut *self.payload.get() } } #[inline(always)] pub fn reset(&self, payload: P) { // From the object pool to reset value, // we should use SeqCst fence to clear the cache of other cores *self.get_payload_mut() = payload; self.reset_init(); } #[inline(always)] pub fn get_seq(&self) -> u32 { self.seq.load(Ordering::Relaxed) } #[inline(always)] pub fn set_seq(&self, seq: u32) { self.seq.store(seq, Ordering::Relaxed); } #[inline(always)] fn update_thread_handle(&self) { let _waker = self.get_waker_mut(); *_waker = ThinWaker::Blocking(thread::current()); } #[inline(always)] pub fn commit_waiting(&self) -> u8 { if let Err(s) = self.try_change_state(WakerState::Init, WakerState::Waiting) { s } else { WakerState::Waiting as u8 } } #[inline(always)] pub fn try_change_state(&self, cur: WakerState, new_state: WakerState) -> Result<(), u8> { self.state.compare_exchange( cur as u8, new_state as u8, Ordering::SeqCst, Ordering::Acquire, )?; Ok(()) } #[inline(always)] pub fn reset_init(&self) { // this is before we put into registry (which will extablish happen-before relationship), // it safe to use Relaxed self.state.store(WakerState::Init as u8, Ordering::Relaxed); } /// Return current status, /// Closed: might be channel closed, or future successfully cancelled, the future should drop message; try to clear its waker. /// Done: the message actually sent, nothing to DO /// Woken: the future should drop message, and wake another counterpart. #[inline(always)] pub fn abandon(&self) -> Result<(), u8> { // it will content with close(), on_recv(), on_send() match self.change_state_smaller_eq(WakerState::Waiting, WakerState::Closed) { Ok(_) => Ok(()), Err(state) => Err(state), } // NOTE: there's no Copy state, so we do not loop } #[inline(always)] pub fn close_wake(&self) -> bool { // should have lock because it will content with abandon() if self.change_state_smaller_eq(WakerState::Waiting, WakerState::Closed).is_ok() { self.get_waker().wake_by_ref(); return true; } false } // Return Ok(pre_state), otherwise return Err(current_state) #[inline(always)] pub fn change_state_smaller_eq( &self, condition: WakerState, target: WakerState, ) -> Result { debug_assert!((condition as u8) < (target as u8)); // Save one load() let mut state = condition as u8; loop { match self.state.compare_exchange_weak( state, target as u8, Ordering::SeqCst, Ordering::Acquire, ) { Ok(_) => { return Ok(state); } Err(s) => { if s > condition as u8 { return Err(s); } state = s; } } } } #[inline(always)] pub fn _get_state(&self, order: Ordering) -> u8 { self.state.load(order) } #[inline(always)] pub fn get_state(&self) -> u8 { self.state.load(Ordering::SeqCst) } #[inline(always)] pub fn get_state_relaxed(&self) -> u8 { self.state.load(Ordering::Relaxed) } /// Assume no lock #[inline(always)] pub fn wake(&self) -> WakeResult { // This is after we get waker from waker_registry, which already happen before relationship. // both >= WakerState::Waiting is certain let mut state = self.get_state_relaxed(); loop { if state >= WakerState::Woken as u8 { return WakeResult::Skip; } else if state == WakerState::Waiting as u8 { self.state.store(WakerState::Woken as u8, Ordering::SeqCst); self.get_waker().wake_by_ref(); return WakeResult::Woken; } else { match self.state.compare_exchange_weak( WakerState::Init as u8, WakerState::Woken as u8, Ordering::SeqCst, Ordering::Acquire, ) { Ok(_) => { self.get_waker().wake_by_ref(); return WakeResult::Next; } Err(s) => { state = s; } } } } } #[inline(always)] pub fn will_wake(&self, ctx: &mut Context) -> bool { self.get_waker().will_wake(ctx) } } impl WakerInner<*const T> { #[inline(always)] fn get_payload(&self) -> *const T { *self.get_payload_mut() } #[inline(always)] pub fn wake_or_copy>(&self, flavor: &F) -> WakeResult { // This is after we get waker from waker_registry, which already happen before relationship. // both >= WakerState::Waiting is certain let mut state = self.get_state_relaxed(); loop { if state >= WakerState::Woken as u8 { return WakeResult::Skip; } else if state == WakerState::Waiting as u8 { let p = self.get_payload(); if p.is_null() { self.state.store(WakerState::Woken as u8, Ordering::SeqCst); self.get_waker().wake_by_ref(); return WakeResult::Woken; } state = if let Some(true) = flavor.try_send_oneshot(p) { WakerState::Done as u8 } else { WakerState::Woken as u8 }; self.state.store(state, Ordering::SeqCst); self.get_waker().wake_by_ref(); if state == WakerState::Done as u8 { return WakeResult::Sent; } else { return WakeResult::Woken; } } else { match self.state.compare_exchange_weak( WakerState::Init as u8, WakerState::Woken as u8, Ordering::SeqCst, Ordering::Acquire, ) { Ok(_) => { self.get_waker().wake_by_ref(); return WakeResult::Next; } Err(s) => { state = s; } } } } } } pub struct WakerCache(ArcCell>); impl WakerCache

{ #[inline(always)] pub(crate) fn new() -> Self { Self(ArcCell::new()) } #[inline(always)] pub fn new_blocking(&self, payload: P) -> ArcWaker

{ if let Some(inner) = self.0.pop() { inner.update_thread_handle(); inner.reset(payload); return ArcWaker::

::from_arc(inner); } ArcWaker::new_blocking(payload) } #[inline(always)] pub(crate) fn push(&self, waker: ArcWaker

) { debug_assert!(waker.get_state() >= WakerState::Woken as u8); let a = waker.to_arc(); if Arc::weak_count(&a) == 0 && Arc::strong_count(&a) == 1 { self.0.try_put(a); } } #[allow(dead_code)] #[inline(always)] pub(crate) fn is_empty(&self) -> bool { !self.0.exists() } } #[cfg(test)] mod tests { use super::*; #[test] fn test_waker_size() { use std::mem::size_of; println!("wakertype {}", size_of::()); println!("waker inner {}", size_of::>()); } } ================================================ FILE: src/waker_registry.rs ================================================ #[allow(unused_imports)] use crate::collections::WeakCell; #[allow(unused_imports)] use crate::flavor::{Flavor, FlavorImpl}; #[cfg(feature = "trace_log")] use crate::tokio_task_id; use crate::trace_log; use crate::waker::*; use parking_lot::Mutex; use std::cell::UnsafeCell; use std::collections::VecDeque; use std::fmt::Debug; use std::sync::{ atomic::{compiler_fence, AtomicU8, AtomicUsize, Ordering}, Arc, Weak, }; use std::task::{Context, Poll}; // pub(crate) on type alias does not matter, mpmc::List alias works because RegistryMulti is pub pub(crate) type RegistryMultiSend = RegistryMulti<*const T>; pub(crate) type RegistryMultiRecv = RegistryMulti<()>; pub(crate) trait Registry: Send + Sync + 'static { type Waker: Send + Unpin + 'static + Debug; fn get_waker_state(&self, o_waker: &Option, order: Ordering) -> u8; #[inline(always)] fn clear_wakers(&self, _waker: &Self::Waker) {} fn close(&self); #[inline(always)] fn len(&self) -> usize { 0 } #[inline(always)] fn commit_waiting(&self, _o_waker: &Option) -> u8 { WakerState::Init as u8 } #[inline(always)] fn cancel_waker(&self, o_waker: &mut Option) { let _ = o_waker.take(); } #[inline(always)] fn abandon_waker(&self, _waker: &Self::Waker) -> Result<(), u8> { Ok(()) } } pub(crate) trait RegistrySend: Registry { fn new() -> Self; #[inline(always)] fn use_direct_copy(&self) -> bool { false } #[inline(always)] fn reg_waker_blocking( &self, _o_waker: &mut Option<::Waker>, _cache: &WakerCache<*const T>, _payload: *const T, ) { unreachable!(); } #[inline(always)] fn reg_waker_async( &self, _ctx: &mut Context, _o_waker: &mut Option<::Waker>, ) -> Option> { unreachable!(); } /// remove outdated waker, make sure it does not accumulate. /// /// It's ok to set state with Relaxed here, two scenario: /// * set Done while the state is Init, does not matter other thread see it or not. /// * other thread might have wake it in the process, but we are dropping it anyway, and then /// reg_waker with a new one. #[inline(always)] fn cancel_reuse_waker( &self, o_waker: &mut Option<::Waker>, state: WakerState, ) -> u8 { let _ = o_waker.take(); state as u8 } #[inline(always)] fn fire(&self, _flavor: &F) -> WakeResult where F: FlavorImpl, { WakeResult::Next } #[inline(always)] fn cache_waker( &self, _o_waker: Option<::Waker>, _cache: &WakerCache<*const T>, ) { } } pub(crate) trait RegistryRecv: Registry { fn new() -> Self; #[inline(always)] fn fire(&self) {} #[inline(always)] fn reg_waker_blocking( &self, _o_waker: &mut Option<::Waker>, _cache: &WakerCache<()>, ) { unreachable!(); } #[inline(always)] fn reg_waker_async( &self, _ctx: &mut Context, _o_waker: &mut Option<::Waker>, ) -> Option> { unreachable!(); } #[inline(always)] fn cache_waker(&self, _o_waker: Option<::Waker>, _cache: &WakerCache<()>) {} fn reg_select_waker(&self, channel_id: usize, waker: &Arc) -> bool; #[inline(always)] fn cancel_select_waker(&self, _waker: &Arc) {} } #[derive(Debug)] pub struct RegistryDummy(); impl Registry for RegistryDummy { type Waker = (); #[inline(always)] fn get_waker_state(&self, _o_waker: &Option, _order: Ordering) -> u8 { unreachable!(); } #[inline(always)] fn close(&self) {} } impl RegistrySend for RegistryDummy { #[inline(always)] fn new() -> Self { Self() } } type SingleWaker = ArcWaker<()>; //type SingleWaker = ThinWaker; pub struct RegistrySingle { cell: WeakCell>, // OneSpmc has comparable speed as WeakCell and does not allocate on waker registration, // but since miri will report datarace issue, commented out for now. //cell: OneSpmc, _tag: &'static str, } impl RegistrySingle { #[inline(always)] fn _fire(&self) { if let Some(waker) = self.cell.pop() { waker.wake(); trace_log!("{} wake", self._tag); } } #[inline(always)] fn _reg_waker_async(&self, ctx: &mut Context, o_waker: &mut Option) { // XXX don't know what the waker was, always generate new let waker = ArcWaker::<()>::new_async(ctx, ()); //let waker = ThinWaker::Async(ctx.waker().clone()); trace_log!("{}{:?}: reg {:?}", self._tag, tokio_task_id!(), waker); self.cell.replace(waker.weak()); o_waker.replace(waker); //self.cell.replace(waker); // should store into o_waker, AsyncTx need to drop item when SendFuture drop } #[inline(always)] fn _reg_waker_blocking(&self, o_waker: &mut Option) { let waker = ArcWaker::<()>::new_blocking(()); // let waker = ThinWaker::Blocking(thread::current()); trace_log!("{}{:?}: reg {:?}", self._tag, tokio_task_id!(), waker); self.cell.replace(waker.weak()); o_waker.replace(waker); //self.cell.replace(waker); } } impl Registry for RegistrySingle { type Waker = SingleWaker; #[inline(always)] fn get_waker_state(&self, _o_waker: &Option, _order: Ordering) -> u8 { if self.cell.is_empty() { WakerState::Woken as u8 } else { WakerState::Init as u8 } } #[inline(always)] fn close(&self) { self._fire(); } } impl RegistrySend for RegistrySingle { #[inline(always)] fn new() -> Self { //Self { cell: _OneSpmc::new(), _tag: "tx" } Self { cell: WeakCell::new(), _tag: "tx" } } #[inline(always)] fn fire(&self, _flavor: &F) -> WakeResult where F: FlavorImpl, { self._fire(); WakeResult::Next } #[inline(always)] fn reg_waker_blocking( &self, o_waker: &mut Option, _cache: &WakerCache<*const T>, _payload: *const T, ) { self._reg_waker_blocking(o_waker); } #[inline(always)] fn reg_waker_async( &self, ctx: &mut Context, o_waker: &mut Option, ) -> Option> { self._reg_waker_async(ctx, o_waker); None } } impl RegistryRecv for RegistrySingle { #[inline(always)] fn new() -> Self { //Self { cell: OneSpmc::new(), _tag: "rx" } Self { cell: WeakCell::new(), _tag: "rx" } } #[inline(always)] fn fire(&self) { self._fire(); } #[inline(always)] fn reg_waker_blocking(&self, o_waker: &mut Option, _cache: &WakerCache<()>) { self._reg_waker_blocking(o_waker) } #[inline(always)] fn reg_waker_async( &self, ctx: &mut Context, o_waker: &mut Option, ) -> Option> { self._reg_waker_async(ctx, o_waker); None } #[inline(always)] fn reg_select_waker(&self, _channel_id: usize, waker: &Arc) -> bool { trace_log!("{}: reg for select", self._tag); self.cell.replace(waker.clone_weak()); false } } struct RegistryMultiInner

{ queue: VecDeque>>, selectors: Vec, seq: u32, } impl

RegistryMultiInner

{ #[inline(always)] fn new() -> Self { Self { queue: VecDeque::with_capacity(32), selectors: Vec::with_capacity(32), seq: 0 } } // it's better to use non-atomic than fetch_XXX #[inline(always)] fn check_select(&self) -> u8 { if self.selectors.is_empty() { 0 } else { MULTI_HAS_SELECT } } // it's better to use non-atomic than fetch_XXX #[inline(always)] fn check_waker(&self) -> u8 { if self.queue.is_empty() { 0 } else { MULTI_HAS_WAKER } } } const MULTI_EMPTY: u8 = 0; const MULTI_HAS_SELECT: u8 = 1; const MULTI_HAS_WAKER: u8 = 2; pub struct RegistryMulti

{ state: AtomicU8, inner: Mutex>, _tag: &'static str, } impl RegistryMulti

{ #[inline(always)] fn reg_waker(&self, waker: &ArcWaker

) { let weak = waker.weak(); { let mut guard = self.inner.lock(); let seq = guard.seq.wrapping_add(1); guard.seq = seq; waker.set_seq(seq); if guard.queue.is_empty() { self.state.store(guard.check_select() | MULTI_HAS_WAKER, Ordering::SeqCst); } guard.queue.push_back(weak); } } #[inline(always)] fn _reg_waker_async( &self, ctx: &mut Context, o_waker: &mut Option>, payload: P, ) -> Option> { if let Some(waker) = o_waker.as_ref() { match waker.try_change_state(WakerState::Woken, WakerState::Init) { Ok(_) => { if waker.will_wake(ctx) { self.reg_waker(waker); return None; } } Err(state) => { if state < WakerState::Woken as u8 { if waker.will_wake(ctx) { trace_log!( "{} {:?}: will_wake {:?}", self._tag, tokio_task_id!(), waker ); // Normally only selection or multiplex future will get here. // No need to reg again, since waker is not consumed. return Some(Poll::Pending); } else { // Spurious woken by runtime, waker can not be re-used (issue 38) // If we se Woken here, only possible otherside has woken it if waker.get_state_relaxed() < WakerState::Woken as u8 { self._clear_wakers(waker, true); } trace_log!( "{} {:?}: drop waker {:?}", self._tag, tokio_task_id!(), waker ); } } else if state == WakerState::Closed as u8 { return Some(Poll::Ready(())); } else { panic!("state: impossible for async {:?}", state); } } } } let waker = ArcWaker::

::new_async(ctx, payload); self.reg_waker(&waker); o_waker.replace(waker); None } #[inline(always)] fn _reg_waker_blocking( &self, o_waker: &mut Option>, _cache: &WakerCache

, payload: P, ) { if let Some(waker) = o_waker.as_ref() { waker.reset_init(); self.reg_waker(waker); trace_log!("{}{:?}: re-reg {:?}", self._tag, tokio_task_id!(), waker); } else { debug_assert!(o_waker.is_none()); //let waker = cache.new_blocking(payload); let waker = ArcWaker::

::new_blocking(payload); self.reg_waker(&waker); trace_log!("{}{:?}: reg {:?}", self._tag, tokio_task_id!(), waker); o_waker.replace(waker); } } /// If trigger all selector while not empty. /// return Some((waker, again)) /// if there's more waker after pop_first, again=true #[inline(always)] fn pop_first(&self) -> Option<(ArcWaker

, Option)> { // This is a snapshot, it's safe to ignore the new situation after acquire lock let flag = self.state.load(Ordering::SeqCst); if flag == MULTI_EMPTY { return None; } { let mut guard = self.inner.lock(); if flag & MULTI_HAS_SELECT > 0 { for select in &guard.selectors { select.wake(); } } if flag & MULTI_HAS_WAKER > 0 { let mut has_pop = false; loop { if let Some(weak) = guard.queue.pop_front() { has_pop = true; if let Some(inner) = weak.upgrade() { if guard.queue.is_empty() { self.state.store(guard.check_select(), Ordering::SeqCst); return Some((ArcWaker::from_arc(inner), None)); } else { return Some((ArcWaker::from_arc(inner), Some(guard.seq))); } } } else { if has_pop { // might upgrade encounter weak previous loop self.state.store(guard.check_select(), Ordering::SeqCst); } return None; } } } // nothing changed, don't need to touch the state None } } /// ignore the selectors (since triggered in pop_first()) /// return the flags #[inline(always)] fn pop_again(&self) -> Option> { // This is a snapshot, it's safe to ignore the new situation after acquire lock let flag = self.state.load(Ordering::Acquire); if flag == MULTI_EMPTY { return None; } { let mut guard = self.inner.lock(); let mut has_pop = false; loop { if let Some(weak) = guard.queue.pop_front() { has_pop = true; if let Some(inner) = weak.upgrade() { if guard.queue.is_empty() { self.state.store(guard.check_select(), Ordering::SeqCst); } return Some(ArcWaker::from_arc(inner)); } } else { if has_pop { // might upgrade encounter weak previous loop self.state.store(guard.check_select(), Ordering::SeqCst); } return None; } } } } /// Call when waker is cancelled #[inline(always)] fn _clear_wakers(&self, old_waker: &ArcWaker

, oneshot: bool) { // Don't need accurate, it's optional if self.state.load(Ordering::Acquire) & MULTI_HAS_WAKER == 0 { return; } let old_seq = old_waker.get_seq(); // the macro yield true to stop, false to continue macro_rules! process { ($guard: expr, $weak: expr) => {{ if let Some(waker) = $weak.upgrade() { let _seq = waker.get_seq(); if _seq == old_seq { trace_log!("{}: clear {:?} hit", self._tag, waker); // XXX, it's possible to reuse the waker, leave it for future review true } else if _seq > old_seq { $guard.queue.push_front($weak); true } else { // There might be later waker cancel due to success sending before commit_waiting. // While earlier waker is still waiting. let state = waker.get_state(); if state < WakerState::Woken as u8 { $guard.queue.push_front($weak); true } else { if oneshot { trace_log!("{}: cancel {:?} one {}", self._tag, waker, old_seq); true } else { trace_log!("{}: cancel {:?}<{}", self._tag, waker, old_seq); false } } } } else { false } }}; } let mut guard = self.inner.lock(); if let Some(weak) = guard.queue.pop_front() { if process!(guard, weak) { if guard.queue.is_empty() { self.state.store(guard.check_select(), Ordering::SeqCst); } return; } loop { if let Some(_weak) = guard.queue.pop_front() { if process!(guard, _weak) { if guard.queue.is_empty() { self.state.store(guard.check_select(), Ordering::SeqCst); } return; } } else { // might upgrade encounter weak previous loop self.state.store(guard.check_select(), Ordering::SeqCst); return; } } } } #[inline(always)] fn _cache_waker(_o_waker: Option>, _cache: &WakerCache

) { // XXX: skip cache for now, until we find out miri report of race //if let Some(waker) = o_waker { // if waker.get_state() >= WakerState::Woken as u8 { // cache.push(waker); // } //} } } impl Registry for RegistryMulti

{ type Waker = ArcWaker

; #[inline(always)] fn get_waker_state(&self, o_waker: &Option>, order: Ordering) -> u8 { if let Some(waker) = o_waker { waker._get_state(order) } else { unreachable!(); } } /// Cancel outdated wakers until me, make sure it does not accumulate #[inline(always)] fn clear_wakers(&self, waker: &ArcWaker

) { self._clear_wakers(waker, false); } #[inline(always)] fn close(&self) { let mut guard = self.inner.lock(); for selector in &guard.selectors { selector.wake(); } while let Some(weak) = guard.queue.pop_front() { if let Some(waker) = weak.upgrade() { let _r = waker.close_wake(); trace_log!("close {} wake {:?} {}", self._tag, waker, _r); } } self.state.store(0, Ordering::SeqCst); } /// return waker queue size #[inline] fn len(&self) -> usize { let guard = self.inner.lock(); guard.queue.len() } #[inline(always)] fn commit_waiting(&self, o_waker: &Option>) -> u8 { if let Some(waker) = &o_waker { waker.commit_waiting() } else { unreachable!(); } } /// return false when waker is none #[inline(always)] fn abandon_waker(&self, waker: &ArcWaker

) -> Result<(), u8> { // which change Waiting/Init to Closed match waker.abandon() { Ok(()) => { trace_log!("{}: abandon cancel {:?}", self._tag, waker); self.clear_wakers(waker); Ok(()) } Err(state) => Err(state), } } /// cancel one outdated waker, make sure it does not accumulate #[inline(always)] fn cancel_waker(&self, o_waker: &mut Option>) { if let Some(waker) = o_waker.take() { // If we se Woken here, only possible otherside has woken it if waker.get_state_relaxed() >= WakerState::Woken as u8 { return; } self._clear_wakers(&waker, true); } } } impl RegistrySend for RegistryMultiSend { #[inline(always)] fn new() -> Self { Self { inner: Mutex::new(RegistryMultiInner::new()), state: AtomicU8::new(0), _tag: "tx" } } #[inline(always)] fn use_direct_copy(&self) -> bool { self.state.load(Ordering::Relaxed) != MULTI_EMPTY } #[inline(always)] fn reg_waker_blocking( &self, o_waker: &mut Option>, cache: &WakerCache<*const T>, payload: *const T, ) { self._reg_waker_blocking(o_waker, cache, payload) } #[inline(always)] fn reg_waker_async( &self, ctx: &mut Context, o_waker: &mut Option>, ) -> Option> { self._reg_waker_async(ctx, o_waker, std::ptr::null_mut()) } /// remove outdated waker, make sure it does not accumulate. /// /// It's ok to set state with Relaxed here, two scenario: /// * set Done while the state is Init, does not matter other thread see it or not. /// * other thread might have wake it in the process, but we are dropping it anyway, and then /// reg_waker with a new one. #[inline(always)] fn cancel_reuse_waker( &self, o_waker: &mut Option>, state: WakerState, ) -> u8 { if let Some(waker) = o_waker.as_ref() { let cur_state = waker.get_state(); // If we se Woken here, only possible otherside has woken it if cur_state >= WakerState::Woken as u8 { trace_log!("{}: cancel_reuse {:?} {}", self._tag, waker, cur_state); if cur_state < state as u8 { state as u8 } else { cur_state } } else { self._clear_wakers(waker, true); let _ = o_waker.take(); state as u8 } } else { unreachable!(); } } #[inline(always)] fn fire(&self, _flavor: &F) -> WakeResult where F: FlavorImpl, { if let Some((waker, _last_seq)) = self.pop_first() { let r = waker.wake(); trace_log!("wake {} {:?} {:?}", self._tag, waker, r); if r.is_done() { return r; } drop(waker); if let Some(mut last_seq) = _last_seq { last_seq = last_seq.wrapping_sub(1); while let Some(_waker) = self.pop_again() { let r = _waker.wake(); trace_log!("wake {} {:?} {:?}", self._tag, _waker, r); if r.is_done() { return r; } // The latest seq in RegistryMulti is always last_waker.get_seq() +1 // Because some waker (issued by sink / stream) might be INIT all the time, // prevent to dead loop situation when they are wake up and re-register again. if _waker.get_seq() >= last_seq { trace_log!("wake {} stop at {}", self._tag, last_seq); return WakeResult::Next; } } } } WakeResult::Next } #[inline(always)] fn cache_waker(&self, o_waker: Option>, cache: &WakerCache<*const T>) { Self::_cache_waker(o_waker, cache); } } impl RegistryRecv for RegistryMultiRecv { #[inline(always)] fn new() -> Self { Self { inner: Mutex::new(RegistryMultiInner::new()), state: AtomicU8::new(0), _tag: "rx" } } #[inline(always)] fn reg_waker_blocking(&self, o_waker: &mut Option>, cache: &WakerCache<()>) { self._reg_waker_blocking(o_waker, cache, ()) } #[inline(always)] fn reg_waker_async( &self, ctx: &mut Context, o_waker: &mut Option>, ) -> Option> { self._reg_waker_async(ctx, o_waker, ()) } #[inline(always)] fn fire(&self) { if let Some((waker, _last_seq)) = self.pop_first() { let r = waker.wake(); trace_log!("wake {} {:?} {:?}", self._tag, waker, r); if r.is_done() { return; } drop(waker); if let Some(mut last_seq) = _last_seq { last_seq = last_seq.wrapping_sub(1); while let Some(_waker) = self.pop_again() { let r = _waker.wake(); trace_log!("wake {} {:?} {:?}", self._tag, _waker, r); if r.is_done() { return; } // The latest seq in RegistryMulti is always last_waker.get_seq() +1 // Because some waker (issued by sink / stream) might be INIT all the time, // prevent to dead loop situation when they are wake up and re-register again. if _waker.get_seq() >= last_seq { trace_log!("wake {} stop at {}", self._tag, last_seq); return; } } } } } #[inline(always)] fn cache_waker(&self, o_waker: Option>, cache: &WakerCache<()>) { Self::_cache_waker(o_waker, cache); } #[inline(always)] fn reg_select_waker(&self, channel_id: usize, waker: &Arc) -> bool { trace_log!("{}: reg for select", self._tag); let mut guard = self.inner.lock(); if guard.selectors.is_empty() { self.state.store(guard.check_waker() | MULTI_HAS_SELECT, Ordering::SeqCst); } guard.selectors.push(SelectWaker::to_wrapper(waker.clone(), channel_id)); true } #[inline(always)] fn cancel_select_waker(&self, waker: &Arc) { let mut guard = self.inner.lock(); if let Some((i, _)) = guard.selectors.iter().enumerate().find(|&(_, entry)| entry.eq(waker)) { guard.selectors.remove(i); } if guard.selectors.is_empty() { self.state.store(guard.check_waker(), Ordering::SeqCst); } } } // Due to it's type alias in crate::select::Mux, should be pub pub struct SelectWakerWrapper(Arc, usize); impl SelectWakerWrapper { #[inline(always)] pub(crate) fn wake(&self) { if let Some(waker) = self.0.cell.pop() { trace_log!("rx: wake select"); self.0.hint.store(self.1, Ordering::Release); waker.wake(); } } #[inline(always)] pub(crate) fn eq(&self, waker: &Arc) -> bool { Arc::ptr_eq(&self.0, waker) } } // For multiplex impl Registry for SelectWakerWrapper { type Waker = ArcWaker<()>; #[inline(always)] fn get_waker_state(&self, _o_waker: &Option>, _order: Ordering) -> u8 { unreachable!(); } #[inline(always)] fn close(&self) { // decrease the opened_channels count to hint Multiplex self.0.close(); self.wake(); } } // For multiplex impl RegistryRecv for SelectWakerWrapper { fn new() -> Self { unreachable!(); } #[inline(always)] fn fire(&self) { self.wake(); } fn reg_select_waker(&self, _channel_id: usize, _waker: &Arc) -> bool { unreachable!(); } } pub(crate) struct SelectWaker { cell: WeakCell>, // does not need to be correct, just a hint for the try_select hint: AtomicUsize, o_waker: UnsafeCell>>, // For multiplex, not for select opened_channels: AtomicUsize, } unsafe impl Send for SelectWaker {} unsafe impl Sync for SelectWaker {} impl SelectWaker { #[inline(always)] pub fn new() -> Self { Self { cell: WeakCell::new(), hint: AtomicUsize::new(0), o_waker: UnsafeCell::new(None), opened_channels: AtomicUsize::new(0), } } #[inline(always)] pub fn init_blocking(&self) { let weak = if let Some(waker) = self.get_waker().as_ref() { waker.reset_init(); waker.weak() } else { let waker = ArcWaker::new_blocking(()); let weak = waker.weak(); self.get_waker().replace(waker); weak }; self.cell.replace(weak); self.hint.store(0, Ordering::Release) } #[allow(dead_code)] #[inline(always)] pub fn init_async(&self, ctx: &mut Context) { let waker = ArcWaker::new_async(ctx, ()); let weak = waker.weak(); self.get_waker().replace(waker); self.cell.replace(weak); self.hint.store(0, Ordering::Release) } #[inline(always)] fn get_waker(&self) -> &mut Option> { unsafe { &mut *self.o_waker.get() } } #[inline(always)] fn clone_weak(&self) -> Weak> { self.get_waker().as_ref().unwrap().weak() } #[inline(always)] pub fn add_opened(&self) { self.opened_channels.fetch_add(1, Ordering::SeqCst); } #[inline(always)] pub fn get_opened_count(&self) -> usize { self.opened_channels.load(Ordering::SeqCst) } #[inline(always)] pub fn to_wrapper(self: Arc, idx: usize) -> SelectWakerWrapper { SelectWakerWrapper(self, idx) } #[inline(always)] pub fn get_hint(&self) -> usize { compiler_fence(Ordering::AcqRel); self.hint.load(Ordering::Relaxed) } #[inline(always)] pub fn close(&self) { self.opened_channels.fetch_sub(1, Ordering::SeqCst); } #[inline(always)] pub fn get_waker_state(&self, order: Ordering) -> u8 { self.get_waker().as_ref().unwrap()._get_state(order) } } #[cfg(test)] mod tests { use super::*; use crate::waker::ArcWaker; #[test] fn print_waker_registry_size() { use std::mem::size_of; println!("RegistryMultiSend size {}", size_of::>()); println!("RegistryMultiRecv size {}", size_of::()); println!("RegistrySingle size {}", size_of::()); println!("RegistryMulti<()> size {}", size_of::()); } #[test] fn test_registry_multi_pop() { let reg = RegistryMultiRecv::new(); // test push let waker1 = ArcWaker::new_blocking(()); assert_eq!(reg.len(), 0); reg.reg_waker(&waker1); assert_eq!(waker1.get_state(), WakerState::Init as u8); assert_eq!(waker1.get_seq(), 1); assert_eq!(reg.len(), 1); let waker2 = ArcWaker::new_blocking(()); reg.reg_waker(&waker2); waker2.commit_waiting(); assert_eq!(waker2.get_seq(), 2); assert_eq!(reg.len(), 2); assert_eq!(waker2.get_seq(), waker1.get_seq() + 1); assert_eq!(waker2.get_state(), WakerState::Waiting as u8); if let Some((w, seq)) = reg.pop_first() { assert!(w.wake() == WakeResult::Next); assert!(seq.is_some()); } assert_eq!(waker1.get_state(), WakerState::Woken as u8); assert_eq!(reg.len(), 1); if let Some(w) = reg.pop_again() { assert!(w.wake() == WakeResult::Woken); } assert_eq!(waker2.get_state(), WakerState::Woken as u8); assert_eq!(reg.len(), 0); } #[test] fn test_registry_multi_clear_waiting() { let reg = RegistryMultiRecv::new(); // test seq let waker3 = ArcWaker::new_blocking(()); reg.reg_waker(&waker3); waker3.commit_waiting(); assert_eq!(waker3.get_state(), WakerState::Waiting as u8); let waker4 = ArcWaker::new_blocking(()); reg.reg_waker(&waker4); // Init assert_eq!(waker4.get_state(), WakerState::Init as u8); let num_workers = reg.len(); // Because waker3 not woken up, waker4 is not clear reg.clear_wakers(&waker4); assert_eq!(reg.len(), num_workers); for _ in 0..10 { let _waker = ArcWaker::new_blocking(()); reg.reg_waker(&_waker); } let num_workers = reg.len(); assert_eq!(reg.len(), num_workers); } #[test] fn test_registry_multi_clear_oneshot() { let reg = RegistryMultiRecv::new(); // test seq let waker1 = ArcWaker::new_blocking(()); reg.reg_waker(&waker1); assert_eq!(waker1.get_state(), WakerState::Init as u8); let waker2 = ArcWaker::new_blocking(()); reg.reg_waker(&waker2); // Init waker2.commit_waiting(); assert_eq!(waker2.get_state(), WakerState::Waiting as u8); for _ in 0..10 { let _waker = ArcWaker::new_blocking(()); reg.reg_waker(&_waker); } let num_workers = reg.len(); println!("clear waker2 oneshot seq {}", waker2.get_seq()); reg.cancel_waker(&mut Some(waker2)); assert_eq!(reg.len(), num_workers); // Only nothing happen. reg.cancel_waker(&mut Some(waker1)); assert_eq!(reg.len(), num_workers - 1); // Only waker1 is removed. } #[test] fn test_registry_multi_clear() { let reg = RegistryMultiRecv::new(); // test seq let waker1 = ArcWaker::new_blocking(()); reg.reg_waker(&waker1); assert_eq!(waker1.get_state(), WakerState::Init as u8); let waker2 = ArcWaker::new_blocking(()); reg.reg_waker(&waker2); // Init drop(waker2); // waker4 is dropped, weak is left for _ in 0..10 { let _waker = ArcWaker::new_blocking(()); reg.reg_waker(&_waker); } let waker3 = ArcWaker::new_blocking(()); reg.reg_waker(&waker3); let _num_workers = reg.len(); // Keep for debugging context, though not used in assertion println!("clear waker3 seq={}", waker3.get_seq()); reg.clear_wakers(&waker3); // nothing happen, because waker3 is there assert_eq!(reg.len(), 13); reg.clear_wakers(&waker1); assert_eq!(reg.len(), 12); reg.clear_wakers(&waker3); assert_eq!(reg.len(), 0); } #[test] fn test_registry_multi_close() { let reg = RegistryMultiRecv::new(); println!("test close"); for _ in 0..10 { let _waker = ArcWaker::new_blocking(()); reg.reg_waker(&_waker); } assert!(reg.len() > 0); reg.close(); assert_eq!(reg.len(), 0); } } ================================================ FILE: src/weak.rs ================================================ use crate::flavor::FlavorMP; use crate::{shared::*, SenderType}; use std::sync::Arc; /// A weak reference of SenderType /// /// Can be obtain from [MTx::downgrade](crate::MTx::downgrade) or [MAsyncTx::downgrade](crate::MAsyncTx::downgrade). /// When the number of valid sender is non-zero, can try [upgrade](WeakTx::upgrade) to a [MTx](crate::MTx) or [MAsyncTx](crate::MAsyncTx). pub struct WeakTx(pub(crate) Arc>); impl WeakTx { /// Upgrade to MTx or MAsyncTx (Only allow for mpsc or mpmc) /// /// # Example /// /// ``` /// use crossfire::*; /// let (tx, rx) = mpsc::bounded_blocking::(100); /// let weak_tx = tx.downgrade(); /// let tx_clone = weak_tx.upgrade::>().unwrap(); /// drop(tx); /// drop(tx_clone); /// assert!(weak_tx.upgrade::>().is_none()); /// assert_eq!(weak_tx.get_tx_count(), 0); /// drop(rx); /// ``` #[inline] pub fn upgrade>(&self) -> Option { if self.0.try_add_tx() { Some(S::new(self.0.clone())) } else { None } } #[inline(always)] pub fn get_tx_count(&self) -> usize { self.0.get_tx_count() } #[inline(always)] pub fn get_rx_count(&self) -> usize { self.0.get_rx_count() } } ================================================ FILE: test-suite/Cargo.toml ================================================ [package] name = "crossfire-test" version = "0.0.1" authors = ["plan "] edition = "2021" license = "Apache-2.0" readme = "README.md" [dependencies] crossfire = {path="../"} async-std = {version = "1", optional=true} log = { version="0"} smol = {version = "2", optional=true } compio = { version = "0.17", optional = true, features = ["runtime", "dispatcher", "polling"], default-features = false} tokio = { version = "1", optional = true, features = ["sync", "rt-multi-thread", "rt", "macros"] } fastrand = "2.3" rstest = "0" captains-log = {version="0.13", features = ["ringfile", "tracing"] } criterion2 = { version="3.0.2", features = ["async"]} crossbeam-channel = "0.5" crossbeam-utils = "0.8" flume = {version="0.11", features= ["async"] } kanal = {version="0.1"} async-channel = {version="2.5.0"} futures-util = {version="0.3", default-features = false} async-oneshot = "0.5" oneshot = "0.1" [features] default = [] tokio = ["crossfire/tokio", "dep:tokio"] async_std = ["dep:async-std", "crossfire/async_std"] smol = ["dep:smol"] compio = ["dep:compio"] # This switch on multi thread test for compio compio_dispatcher = ["dep:compio"] # for test workflow debugging trace_log = ["crossfire/trace_log"] # test invoking timer function in async runtime, try to opt-out time driver for miri time = ["compio?/time", "tokio?/time"] [[bench]] name = "crossfire" harness = false [[bench]] name = "crossfire_select" harness = false [[bench]] name = "crossbeam" harness = false [[bench]] name = "flume" harness = false [[bench]] name = "kanal" harness = false [[bench]] name = "tokio" harness = false [[bench]] name = "async_channel" harness = false [[bench]] name = "extra" harness = false ================================================ FILE: test-suite/benches/async_channel.rs ================================================ use criterion::*; use std::time::Duration; #[allow(unused_imports)] mod common; use common::*; async fn _async_channel_unbounded_async(tx_count: usize, rx_count: usize, msg_count: usize) { let (tx, rx) = async_channel::unbounded(); let mut th_tx = Vec::new(); let mut th_rx = Vec::new(); let mut send_counter: usize = 0; let _send_counter = msg_count / tx_count; for _tx_i in 0..tx_count { send_counter += _send_counter; let _tx = tx.clone(); th_tx.push(async_spawn!(async move { for i in 0.._send_counter { if let Err(e) = _tx.send(i).await { panic!("send error: {:?}", e); } } })); } drop(tx); let mut recv_counter = 0; for _ in 0..(rx_count - 1) { let _rx = rx.clone(); th_rx.push(async_spawn!(async move { let mut i = 0; loop { match _rx.recv().await { Ok(_) => { i += 1; } Err(_) => { break; } } } i })); } loop { match rx.recv().await { Ok(_) => { recv_counter += 1; } Err(_) => { break; } } } for th in th_tx { let _ = th.await; } for th in th_rx { recv_counter += async_join_result!(th); } assert_eq!(send_counter, recv_counter); } async fn _async_channel_bounded_async( bound: usize, tx_count: usize, rx_count: usize, msg_count: usize, ) { let (tx, rx) = async_channel::bounded(bound); let mut th_tx = Vec::new(); let mut th_rx = Vec::new(); let mut send_counter: usize = 0; let _send_counter = msg_count / tx_count; for _tx_i in 0..tx_count { send_counter += _send_counter; let _tx = tx.clone(); th_tx.push(async_spawn!(async move { for i in 0.._send_counter { if let Err(e) = _tx.send(i).await { panic!("send error: {:?}", e); } } })); } drop(tx); let mut recv_counter = 0; for _ in 0..(rx_count - 1) { let _rx = rx.clone(); th_rx.push(async_spawn!(async move { let mut i = 0; loop { match _rx.recv().await { Ok(_) => { i += 1; } Err(_) => { break; } } } i })); } loop { match rx.recv().await { Ok(_) => { recv_counter += 1; } Err(_) => { break; } } } for th in th_tx { let _ = th.await; } for th in th_rx { recv_counter += async_join_result!(th); } assert_eq!(send_counter, recv_counter); } fn bench_async_channel_unbounded_async(c: &mut Criterion) { let mut group = c.benchmark_group("async_channel_unbounded_async"); group.significance_level(0.1).sample_size(50); group.measurement_time(Duration::from_secs(20)); for input in n_1() { let param = Concurrency { tx_count: input, rx_count: 1 }; group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_with_input(BenchmarkId::new("mpsc", ¶m), ¶m, |b, i| { b.to_async(BenchExecutor()) .iter(|| _async_channel_unbounded_async(i.tx_count, i.rx_count, ONE_MILLION)) }); } for input in n_n() { let param = Concurrency { tx_count: input.0, rx_count: input.1 }; group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_with_input(BenchmarkId::new("mpmc", ¶m), ¶m, |b, i| { b.to_async(BenchExecutor()) .iter(|| _async_channel_unbounded_async(i.tx_count, i.rx_count, ONE_MILLION)) }); } } fn bench_async_channel_bounded_async(c: &mut Criterion) { let mut group = c.benchmark_group("async_channel_bounded_async"); group.significance_level(0.1).sample_size(50); group.measurement_time(Duration::from_secs(20)); for input in n_1() { let param = Concurrency { tx_count: input, rx_count: 1 }; group.throughput(Throughput::Elements(TEN_THOUSAND as u64)); group.bench_with_input(BenchmarkId::new("mpsc size 1", ¶m), ¶m, |b, i| { b.to_async(BenchExecutor()) .iter(|| _async_channel_bounded_async(1, i.tx_count, i.rx_count, TEN_THOUSAND)) }); } for input in n_1() { let param = Concurrency { tx_count: input, rx_count: 1 }; group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_with_input(BenchmarkId::new("mpsc size 100", ¶m), ¶m, |b, i| { b.to_async(BenchExecutor()) .iter(|| _async_channel_bounded_async(100, i.tx_count, i.rx_count, ONE_MILLION)) }); } for input in n_n() { let param = Concurrency { tx_count: input.0, rx_count: input.1 }; group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_with_input(BenchmarkId::new("mpmc size 100", ¶m), ¶m, |b, i| { b.to_async(BenchExecutor()) .iter(|| _async_channel_bounded_async(100, i.tx_count, i.rx_count, ONE_MILLION)) }); } } criterion_group!(benches, bench_async_channel_bounded_async, bench_async_channel_unbounded_async,); criterion_main!(benches); ================================================ FILE: test-suite/benches/common.rs ================================================ use std::fmt; use std::future::Future; use criterion::async_executor::AsyncExecutor; #[allow(dead_code)] pub const ONE_MILLION: usize = 1000000; #[allow(dead_code)] pub const TEN_THOUSAND: usize = 10000; #[allow(dead_code)] pub struct Concurrency { pub tx_count: usize, pub rx_count: usize, } impl fmt::Display for Concurrency { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}x{}", self.tx_count, self.rx_count) } } pub struct BenchExecutor(); impl AsyncExecutor for BenchExecutor { fn block_on(&self, future: impl Future) -> T { #[cfg(feature = "smol")] { use std::num::NonZero; use std::thread; let num_threads = thread::available_parallelism().unwrap_or(NonZero::new(1).unwrap()); unsafe { std::env::set_var("SMOL_THREADS", num_threads.to_string()) }; smol::block_on(future) } #[cfg(not(feature = "smol"))] { #[cfg(feature = "async_std")] { async_std::task::block_on(future) } #[cfg(not(feature = "async_std"))] { tokio::runtime::Builder::new_multi_thread() .enable_all() .build() .unwrap() .block_on(future) } } } } #[allow(unused_macros)] macro_rules! async_spawn { ($f: expr) => {{ #[cfg(feature = "smol")] { smol::spawn($f) } #[cfg(not(feature = "smol"))] { #[cfg(feature = "async_std")] { async_std::task::spawn($f) } #[cfg(any(feature = "tokio", not(feature = "async_std")))] { tokio::spawn($f) } } }}; } pub(super) use async_spawn; #[allow(unused_macros)] macro_rules! async_join_result { ($th: expr) => {{ #[cfg(feature = "smol")] { $th.await } #[cfg(not(feature = "smol"))] { #[cfg(feature = "async_std")] { $th.await } #[cfg(not(feature = "async_std"))] { $th.await.expect("join") } } }}; } pub(super) use async_join_result; #[allow(dead_code)] #[inline(always)] pub fn n_n() -> Vec<(usize, usize)> { vec![(2, 2), (4, 4), (8, 8), (16, 16)] } #[inline(always)] pub fn n_1() -> Vec { vec![1, 2, 4, 8, 16] } ================================================ FILE: test-suite/benches/crossbeam.rs ================================================ use criterion::*; use crossbeam_utils::sync::WaitGroup; use std::thread; use std::time::Duration; #[allow(unused_imports)] mod common; use common::*; fn _crossbeam_bounded_sync(bound: usize, tx_count: usize, rx_count: usize, msg_count: usize) { let (tx, rx) = crossbeam_channel::bounded::(bound); let mut th_tx = Vec::new(); let mut th_rx = Vec::new(); let mut send_counter: usize = 0; let _send_counter = msg_count / tx_count; for _ in 0..tx_count { send_counter += _send_counter; let _tx = tx.clone(); th_tx.push(thread::spawn(move || { for i in 0.._send_counter { _tx.send(i).expect("send"); } })); } drop(tx); let mut recv_counter = 0; for _ in 0..(rx_count - 1) { let _rx = rx.clone(); th_rx.push(thread::spawn(move || -> usize { let mut i = 0; loop { match _rx.recv() { Ok(_) => { i += 1; } Err(_) => { break; } } } i })); } loop { match rx.recv() { Ok(_) => { recv_counter += 1; } Err(_) => { break; } } } for th in th_tx { let _ = th.join(); } for th in th_rx { if let Ok(count) = th.join() { recv_counter += count; } } assert_eq!(send_counter, recv_counter); } fn _crossbeam_unbounded_sync(tx_count: usize, rx_count: usize, msg_count: usize) { let (tx, rx) = crossbeam_channel::unbounded::(); let mut th_tx = Vec::new(); let mut th_rx = Vec::new(); let mut send_counter: usize = 0; let _send_counter = msg_count / tx_count; for _ in 0..tx_count { send_counter += _send_counter; let _tx = tx.clone(); th_tx.push(thread::spawn(move || { for i in 0.._send_counter { _tx.send(i).expect("send"); } })); } drop(tx); let mut recv_counter = 0; for _ in 0..(rx_count - 1) { let _rx = rx.clone(); th_rx.push(thread::spawn(move || -> usize { let mut i = 0; loop { match _rx.recv() { Ok(_) => { i += 1; } Err(_) => { break; } } } i })); } loop { match rx.recv() { Ok(_) => { recv_counter += 1; } Err(_) => { break; } } } for th in th_tx { let _ = th.join(); } for th in th_rx { if let Ok(count) = th.join() { recv_counter += count; } } assert_eq!(send_counter, recv_counter); } fn _crossbeam_select_mpsc(num_channels: usize, bound: usize, total_msgs: usize, is_bias: bool) { let msg_count_per_channel = total_msgs / num_channels; let mut rxs = Vec::new(); let mut th_tx = Vec::new(); for _ in 0..num_channels { let (tx, rx) = crossbeam_channel::bounded::(bound); rxs.push(rx); th_tx.push(thread::spawn(move || { for i in 0..msg_count_per_channel { tx.send(i).expect("send"); } })); } // Receive all messages using select - reuse Select instance let mut recv_counter = 0; let mut select = if is_bias { crossbeam_channel::Select::new_biased() } else { crossbeam_channel::Select::new() }; let mut handles = Vec::with_capacity(num_channels); for rx in &rxs { let op = select.recv(rx); handles.push(op); } while recv_counter < total_msgs { // Perform the selection let oper = select.select(); let i = oper.index(); match oper.recv(&rxs[i]) { Ok(_) => recv_counter += 1, Err(_) => { // https://docs.rs/crossbeam-channel/latest/crossbeam_channel/struct.Select.html#method.remove // If new operations are added after removing some, the indices of removed operations will not be reused select.remove(i); } } } assert_eq!(total_msgs, recv_counter); // Wait for all senders to finish before receiving for th in th_tx { let _ = th.join(); } } fn bench_crossbeam_bounded_sync(c: &mut Criterion) { let mut group = c.benchmark_group("crossbeam_bounded"); group.significance_level(0.1).sample_size(50); group.measurement_time(Duration::from_secs(20)); for input in n_1() { let param = Concurrency { tx_count: input, rx_count: 1 }; group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_with_input(BenchmarkId::new("mpsc size 1", input), ¶m, |b, i| { b.iter(|| _crossbeam_bounded_sync(1, i.tx_count, i.rx_count, ONE_MILLION)) }); } for input in n_1() { let param = Concurrency { tx_count: input, rx_count: 1 }; group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_with_input(BenchmarkId::new("mpsc size 100", input), ¶m, |b, i| { b.iter(|| _crossbeam_bounded_sync(100, i.tx_count, i.rx_count, ONE_MILLION)) }); } for input in n_n() { let param = Concurrency { tx_count: input.0, rx_count: input.1 }; group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_with_input( BenchmarkId::new("mpmc size 100", param.to_string()), ¶m, |b, i| b.iter(|| _crossbeam_bounded_sync(100, i.tx_count, i.rx_count, ONE_MILLION)), ); } group.finish(); } fn bench_crossbeam_unbounded_sync(c: &mut Criterion) { let mut group = c.benchmark_group("crossbeam_unbounded"); group.significance_level(0.1).sample_size(50); group.measurement_time(Duration::from_secs(20)); for input in n_1() { let param = Concurrency { tx_count: input, rx_count: 1 }; group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_with_input(BenchmarkId::new("mpsc", input), ¶m, |b, i| { b.iter(|| _crossbeam_unbounded_sync(i.tx_count, i.rx_count, ONE_MILLION)) }); } for input in n_n() { let param = Concurrency { tx_count: input.0, rx_count: input.1 }; group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_with_input(BenchmarkId::new("mpmc", param.to_string()), ¶m, |b, i| { b.iter(|| _crossbeam_unbounded_sync(i.tx_count, i.rx_count, ONE_MILLION)) }); } group.finish(); } fn bench_crossbeam_select_mpsc(c: &mut Criterion) { let mut group = c.benchmark_group("crossbeam_select"); group.significance_level(0.1).sample_size(50); group.measurement_time(Duration::from_secs(20)); let param = (4, 100, ONE_MILLION); // 3 channels, bound=100, 1M/3 messages per channel group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_with_input( BenchmarkId::new("select_mpsc_4_channels_bias", "4"), ¶m, |b, &(num_channels, bound, msg_count_per_channel)| { b.iter(|| _crossbeam_select_mpsc(num_channels, bound, msg_count_per_channel, true)) }, ); group.bench_with_input( BenchmarkId::new("select_mpsc_4_channels_fair", "4"), ¶m, |b, &(num_channels, bound, msg_count_per_channel)| { b.iter(|| _crossbeam_select_mpsc(num_channels, bound, msg_count_per_channel, false)) }, ); group.finish(); } fn bench_crossbeam_wait_group(c: &mut Criterion) { let mut group = c.benchmark_group("crossbeam_wait_group"); let count = TEN_THOUSAND; group.throughput(Throughput::Elements(count as u64)); group.bench_function("add_guard", |b| { let wg = WaitGroup::new(); b.iter(|| { let mut guards: Vec = Vec::with_capacity(count); for _i in 0..count { guards.push(wg.clone()); } // guards are dropped here }); }); group.finish(); } criterion_group!( benches, bench_crossbeam_bounded_sync, bench_crossbeam_unbounded_sync, bench_crossbeam_select_mpsc, bench_crossbeam_wait_group ); criterion_main!(benches); ================================================ FILE: test-suite/benches/crossfire.rs ================================================ use criterion::*; use crossfire::waitgroup::{WaitGroup, WaitGroupGuard}; use crossfire::*; use std::thread; use std::time::Duration; #[allow(unused_imports)] mod common; use common::*; // Initialize logger for benchmarks fn init_logger() { #[cfg(feature = "trace_log")] { use captains_log::*; use std::sync::Once; static INIT: Once = Once::new(); INIT.call_once(|| { let format = recipe::LOG_FORMAT_THREADED_DEBUG; let ring = ringfile::LogRingFile::new( "/tmp/crossfire_ring.log", 500 * 1024 * 1024, Level::Debug, format, ); let mut config = Builder::default() .signal(signal_consts::SIGINT) .signal(signal_consts::SIGTERM) .add_sink(ring) .add_sink(LogConsole::new( ConsoleTarget::Stdout, Level::Info, recipe::LOG_FORMAT_DEBUG, )); config.dynamic = true; config.panic = true; config.build().expect("log_setup"); }); } } macro_rules! bench_bounded_blocking { ($group: expr, $name: expr, $tx: expr, $rx: expr, $new: expr, $size: expr, $count: expr) => { bench_bounded_blocking!($group, $name, $tx, $rx, $new, $size, $count, 20, 100); }; ($group: expr, $name: expr, $tx: expr, $rx: expr, $new: expr, $size: expr, $count: expr, $time: expr, $sample: expr) => { $group.throughput(Throughput::Elements($count as u64)); $group.significance_level(0.1).sample_size($sample); $group.measurement_time(Duration::from_secs($time)); let param = Concurrency { tx_count: $tx, rx_count: $rx }; $group.bench_with_input( BenchmarkId::new(format!("{}_{}", $name, $size).to_string(), ¶m), ¶m, |b, i| { b.iter(move || { let (tx, rx) = $new($size); _crossfire_blocking( tx.clone_to_vec(i.tx_count), rx.clone_to_vec(i.rx_count), $count, ); }) }, ); }; } macro_rules! bench_unbounded_blocking { ($group: expr, $name: expr, $tx: expr, $rx: expr, $new: expr, $count: expr) => { bench_unbounded_blocking!($group, $name, $tx, $rx, $new, $count, 20, 100); }; ($group: expr, $name: expr, $tx: expr, $rx: expr, $new: expr, $count: expr, $time: expr, $sample: expr) => { $group.throughput(Throughput::Elements($count as u64)); $group.significance_level(0.1).sample_size($sample); $group.measurement_time(Duration::from_secs($time)); let param = Concurrency { tx_count: $tx, rx_count: $rx }; $group.bench_with_input( BenchmarkId::new(format!("{}", $name).to_string(), ¶m), ¶m, |b, i| { b.iter(move || { let (tx, rx) = $new(); _crossfire_blocking( tx.clone_to_vec(i.tx_count), rx.clone_to_vec(i.rx_count), $count, ); }) }, ); }; } macro_rules! bench_bounded_async { ($group: expr, $name: expr, $tx: expr, $rx: expr, $new: expr, $size: expr, $count: expr) => { bench_bounded_async!($group, $name, $tx, $rx, $new, $size, $count, 20, 100); }; ($group: expr, $name: expr, $tx: expr, $rx: expr, $new: expr, $size: expr, $count: expr, $time: expr, $sample: expr) => { $group.throughput(Throughput::Elements($count as u64)); $group.significance_level(0.1).sample_size($sample); $group.measurement_time(Duration::from_secs($time)); let param = Concurrency { tx_count: $tx, rx_count: $rx }; $group.bench_with_input( BenchmarkId::new(format!("{}_{}", $name, $size).to_string(), ¶m), ¶m, |b, i| { b.to_async(BenchExecutor()).iter(async || { let (tx, rx) = $new($size); _crossfire_bounded_async( tx.clone_to_vec(i.tx_count), rx.clone_to_vec(i.rx_count), $count, ) .await; }) }, ); }; } macro_rules! bench_unbounded_async { ($group: expr, $name: expr, $tx: expr, $rx: expr, $new: expr, $count: expr) => { bench_unbounded_async!($group, $name, $tx, $rx, $new, $count, 20, 100); }; ($group: expr, $name: expr, $tx: expr, $rx: expr, $new: expr, $count: expr, $time: expr, $sample: expr) => { $group.throughput(Throughput::Elements($count as u64)); $group.significance_level(0.1).sample_size($sample); $group.measurement_time(Duration::from_secs($time)); let param = Concurrency { tx_count: $tx, rx_count: $rx }; $group.bench_with_input( BenchmarkId::new(format!("{}", $name).to_string(), ¶m), ¶m, |b, i| { b.to_async(BenchExecutor()).iter(async || { let (tx, rx) = $new(); _crossfire_blocking_async( tx.clone_to_vec(i.tx_count), rx.clone_to_vec(i.rx_count), $count, ) .await; }) }, ); }; } fn _crossfire_blocking, R: BlockingRxTrait>( txs: Vec, mut rxs: Vec, msg_count: usize, ) { let mut th_tx = Vec::new(); let mut th_rx = Vec::new(); let mut send_counter: usize = 0; let _send_counter = msg_count / txs.len(); for (i, _tx) in txs.into_iter().enumerate() { send_counter += _send_counter; let th_builder = thread::Builder::new().name(format!("sender{}", i)); th_tx.push( th_builder .spawn(move || { for i in 0.._send_counter { _tx.send(i).expect("send"); } crossfire::trace_log!("sender exit {:?}", _tx); }) .expect("spawn"), ); } let rx_count = rxs.len(); for i in 0..(rx_count - 1) { let _rx = rxs.pop().unwrap(); let th_builder = thread::Builder::new().name(format!("receiver{}", i)); th_rx.push( th_builder .spawn(move || -> usize { let mut i = 0; loop { match _rx.recv() { Ok(_) => { i += 1; } Err(_) => { break; } } } i }) .expect("spawn"), ); } let rx = rxs.pop().unwrap(); let mut recv_counter = 0; loop { match rx.recv() { Ok(_) => { recv_counter += 1; } Err(_) => { break; } } } for th in th_tx { let _ = th.join().unwrap(); } for th in th_rx { recv_counter += th.join().unwrap(); } assert_eq!(send_counter, recv_counter); crossfire::trace_log!("---"); } async fn _crossfire_blocking_async, R: AsyncRxTrait>( txs: Vec, mut rxs: Vec, msg_count: usize, ) { let mut send_counter: usize = 0; let _send_counter = msg_count / txs.len(); let mut th_tx = Vec::new(); for tx in txs { send_counter += _send_counter; th_tx.push(thread::spawn(move || { for i in 0.._send_counter { if let Err(e) = tx.send(i) { panic!("send error: {:?}", e); } } })); } let mut recv_counter = 0; let rx_count = rxs.len(); let mut th_rx = Vec::new(); for _ in 0..(rx_count - 1) { let _rx = rxs.pop().unwrap(); th_rx.push(async_spawn!(async move { let mut i = 0; loop { match _rx.recv().await { Ok(_) => { i += 1; } Err(_) => { break; } } } i })); } let rx = rxs.pop().unwrap(); loop { match rx.recv().await { Ok(_) => { recv_counter += 1; } Err(_) => { break; } } } assert_eq!(rxs.len(), 0); for th in th_tx { let _ = th.join().unwrap(); } for th in th_rx { recv_counter += async_join_result!(th); } assert_eq!(send_counter, recv_counter); } async fn _crossfire_bounded_async, R: AsyncRxTrait>( txs: Vec, mut rxs: Vec, msg_count: usize, ) { let mut send_counter: usize = 0; let _send_counter = msg_count / txs.len(); let mut th_tx = Vec::new(); let mut th_rx = Vec::new(); for tx in txs { send_counter += _send_counter; th_tx.push(async_spawn!(async move { for i in 0.._send_counter { if let Err(e) = tx.send(i).await { panic!("send error: {:?}", e); } } })); } let mut recv_counter = 0; let rx_count = rxs.len(); for _ in 0..(rx_count - 1) { let _rx = rxs.pop().unwrap(); th_rx.push(async_spawn!(async move { let mut i = 0; loop { match _rx.recv().await { Ok(_) => { i += 1; } Err(_) => { break; } } } i })); } let rx = rxs.pop().unwrap(); loop { match rx.recv().await { Ok(_) => { recv_counter += 1; } Err(_) => { break; } } } for th in th_tx { let _ = th.await; } for th in th_rx { recv_counter += async_join_result!(th); } assert_eq!(send_counter, recv_counter); } fn crossfire_bounded_1_blocking_1_1(c: &mut Criterion) { detect_backoff_cfg(); init_logger(); let mut group = c.benchmark_group("crossfire_bounded_1_blocking_1_1"); bench_bounded_blocking!(group, "spsc", 1, 1, spsc::bounded_blocking, 1, TEN_THOUSAND, 10, 100); bench_bounded_blocking!(group, "mpsc", 1, 1, mpsc::bounded_blocking, 1, TEN_THOUSAND, 10, 100); bench_bounded_blocking!(group, "mpmc", 1, 1, mpmc::bounded_blocking, 1, TEN_THOUSAND, 10, 100); group.finish(); } fn crossfire_bounded_1_blocking_n_1(c: &mut Criterion) { detect_backoff_cfg(); init_logger(); let mut group = c.benchmark_group("crossfire_bounded_1_blocking_n_1"); for input in n_1() { bench_bounded_blocking!( group, "mpsc", input, 1, mpsc::bounded_blocking, 1, TEN_THOUSAND, 10, 100 ); } for input in n_1() { bench_bounded_blocking!( group, "mpmc", input, 1, mpmc::bounded_blocking, 1, TEN_THOUSAND, 10, 100 ); } group.finish(); } fn crossfire_bounded_1_blocking_n_n(c: &mut Criterion) { detect_backoff_cfg(); init_logger(); let mut group = c.benchmark_group("crossfire_bounded_1_blocking_n_n"); for input in n_n() { bench_bounded_blocking!( group, "mpmc", input.0, input.1, mpmc::bounded_blocking, 1, TEN_THOUSAND, 10, 100 ); } group.finish(); } fn crossfire_bounded_100_blocking_1_1(c: &mut Criterion) { detect_backoff_cfg(); init_logger(); let mut group = c.benchmark_group("crossfire_bounded_100_blocking_1_1"); bench_bounded_blocking!(group, "spsc", 1, 1, spsc::bounded_blocking, 100, ONE_MILLION); bench_bounded_blocking!(group, "mpsc", 1, 1, mpsc::bounded_blocking, 100, ONE_MILLION); bench_bounded_blocking!(group, "mpmc", 1, 1, mpmc::bounded_blocking, 100, ONE_MILLION); group.finish(); } fn crossfire_bounded_100_blocking_n_1(c: &mut Criterion) { detect_backoff_cfg(); init_logger(); let mut group = c.benchmark_group("crossfire_bounded_100_blocking_n_1"); for input in n_1() { bench_bounded_blocking!(group, "mpsc", input, 1, mpsc::bounded_blocking, 100, ONE_MILLION); } for input in n_1() { bench_bounded_blocking!(group, "mpmc", input, 1, mpmc::bounded_blocking, 100, ONE_MILLION); } group.finish(); } fn crossfire_bounded_100_blocking_n_n(c: &mut Criterion) { detect_backoff_cfg(); init_logger(); let mut group = c.benchmark_group("crossfire_bounded_100_blocking_n_n"); for input in n_n() { bench_bounded_blocking!( group, "mpmc", input.0, input.1, mpmc::bounded_blocking, 100, ONE_MILLION ); } group.finish(); } fn crossfire_bounded_1_async_1_1(c: &mut Criterion) { detect_backoff_cfg(); init_logger(); let mut group = c.benchmark_group("crossfire_bounded_1_async_1_1"); bench_bounded_async!(group, "spsc", 1, 1, spsc::bounded_async, 1, TEN_THOUSAND, 10, 100); bench_bounded_async!(group, "mpsc", 1, 1, mpsc::bounded_async, 1, TEN_THOUSAND, 10, 100); bench_bounded_async!(group, "mpmc", 1, 1, mpmc::bounded_async, 1, TEN_THOUSAND, 10, 100); group.finish(); } fn crossfire_bounded_1_async_n_1(c: &mut Criterion) { detect_backoff_cfg(); init_logger(); let mut group = c.benchmark_group("crossfire_bounded_1_async_n_1"); for input in n_1() { bench_bounded_async!( group, "mpsc", input, 1, mpsc::bounded_async, 1, TEN_THOUSAND, 10, 100 ); } for input in n_1() { bench_bounded_async!( group, "mpmc", input, 1, mpmc::bounded_async, 1, TEN_THOUSAND, 10, 100 ); } group.finish(); } fn crossfire_bounded_1_async_n_n(c: &mut Criterion) { detect_backoff_cfg(); init_logger(); let mut group = c.benchmark_group("crossfire_bounded_1_async_n_n"); for input in n_n() { bench_bounded_async!( group, "mpmc", input.0, input.1, mpmc::bounded_async, 1, TEN_THOUSAND, 10, 100 ); } group.finish(); } fn crossfire_bounded_100_async_1_1(c: &mut Criterion) { detect_backoff_cfg(); init_logger(); let mut group = c.benchmark_group("crossfire_bounded_100_async_1_1"); bench_bounded_async!(group, "spsc", 1, 1, spsc::bounded_async, 100, ONE_MILLION); bench_bounded_async!(group, "mpsc", 1, 1, mpsc::bounded_async, 100, ONE_MILLION); bench_bounded_async!(group, "mpmc", 1, 1, mpmc::bounded_async, 100, ONE_MILLION); group.finish(); } fn crossfire_bounded_100_async_n_1(c: &mut Criterion) { detect_backoff_cfg(); init_logger(); let mut group = c.benchmark_group("crossfire_bounded_100_async_n_1"); for input in n_1() { bench_bounded_async!(group, "mpsc", input, 1, mpsc::bounded_async, 100, ONE_MILLION); } for input in n_1() { bench_bounded_async!(group, "mpmc", input, 1, mpmc::bounded_async, 100, ONE_MILLION); } group.finish(); } fn crossfire_bounded_100_async_n_n(c: &mut Criterion) { detect_backoff_cfg(); init_logger(); let mut group = c.benchmark_group("crossfire_bounded_100_async_n_n"); for input in n_n() { bench_bounded_async!( group, "mpmc", input.0, input.1, mpmc::bounded_async, 100, ONE_MILLION ); } group.finish(); } fn crossfire_unbounded_blocking_1_1(c: &mut Criterion) { detect_backoff_cfg(); init_logger(); let mut group = c.benchmark_group("crossfire_unbounded_blocking_1_1"); bench_unbounded_blocking!(group, "spsc", 1, 1, spsc::unbounded_blocking, ONE_MILLION); bench_unbounded_blocking!(group, "mpsc", 1, 1, mpsc::unbounded_blocking, ONE_MILLION); bench_unbounded_blocking!(group, "mpmc", 1, 1, mpmc::unbounded_blocking, ONE_MILLION); group.finish(); } fn crossfire_unbounded_blocking_n_1(c: &mut Criterion) { detect_backoff_cfg(); init_logger(); let mut group = c.benchmark_group("crossfire_unbounded_blocking_n_1"); for input in n_1() { bench_unbounded_blocking!(group, "mpsc", input, 1, mpsc::unbounded_blocking, ONE_MILLION); } for input in n_1() { bench_unbounded_blocking!(group, "mpmc", input, 1, mpmc::unbounded_blocking, ONE_MILLION); } group.finish(); } fn crossfire_unbounded_blocking_n_n(c: &mut Criterion) { detect_backoff_cfg(); init_logger(); let mut group = c.benchmark_group("crossfire_unbounded_blocking_n_n"); for input in n_n() { bench_unbounded_blocking!( group, "mpmc", input.0, input.1, mpmc::unbounded_blocking, ONE_MILLION ); } group.finish(); } fn crossfire_unbounded_async_1_1(c: &mut Criterion) { detect_backoff_cfg(); init_logger(); let mut group = c.benchmark_group("crossfire_unbounded_async_1_1"); bench_unbounded_async!(group, "spsc", 1, 1, spsc::unbounded_async, ONE_MILLION); bench_unbounded_async!(group, "mpsc", 1, 1, mpsc::unbounded_async, ONE_MILLION); bench_unbounded_async!(group, "mpmc", 1, 1, mpmc::unbounded_async, ONE_MILLION); group.finish(); } fn crossfire_unbounded_async_mpsc(c: &mut Criterion) { detect_backoff_cfg(); init_logger(); let mut group = c.benchmark_group("crossfire_unbounded_async_n_1"); for input in n_1() { bench_unbounded_async!(group, "mpsc", input, 1, mpsc::unbounded_async, ONE_MILLION); } for input in n_1() { bench_unbounded_async!(group, "mpmc", input, 1, mpmc::unbounded_async, ONE_MILLION); } group.finish(); } fn crossfire_unbounded_async_mpmc(c: &mut Criterion) { detect_backoff_cfg(); init_logger(); let mut group = c.benchmark_group("crossfire_unbounded_async_n_n"); for input in n_n() { bench_unbounded_async!(group, "mpmc", input.0, input.1, mpmc::unbounded_async, ONE_MILLION); } group.finish(); } fn crossfire_oneshot_blocking(c: &mut Criterion) { detect_backoff_cfg(); init_logger(); let mut group = c.benchmark_group("crossfire_oneshot_blocking"); let count = TEN_THOUSAND; group.throughput(Throughput::Elements(count as u64)); group.bench_function("spawn", |b| { b.iter(|| { let mut txs = Vec::with_capacity(count); let mut rxs = Vec::with_capacity(count); for _i in 0..count { let (tx, rx) = crossfire::oneshot::oneshot(); txs.push(tx); rxs.push(rx); } thread::spawn(move || { for tx in txs { let _ = tx.send(0); } }); for rx in rxs { let _ = rx.recv(); } }) }); group.finish(); } fn crossfire_oneshot_async(c: &mut Criterion) { detect_backoff_cfg(); init_logger(); let mut group = c.benchmark_group("crossfire_oneshot_async"); let count = TEN_THOUSAND; group.throughput(Throughput::Elements(count as u64)); group.bench_function("spawn", |b| { b.to_async(BenchExecutor()).iter(|| async move { let mut txs = Vec::with_capacity(count); let mut rxs = Vec::with_capacity(count); for _i in 0..count { let (tx, rx) = crossfire::oneshot::oneshot(); txs.push(tx); rxs.push(rx); } let th = async_spawn!(async move { for tx in txs { tx.send(0); } }); for rx in rxs { let _ = rx.await; } let _ = async_join_result!(th); }) }); group.finish(); } fn bench_crossfire_wait_group(c: &mut Criterion) { let mut group = c.benchmark_group("crossfire_wait_group"); let count = TEN_THOUSAND; // Or some appropriate number for throughput group.throughput(Throughput::Elements(count as u64)); group.bench_function("add_guard", |b| { let wg = WaitGroup::new(0); b.iter(|| { let mut guards: Vec = Vec::with_capacity(count); for _i in 0..count { guards.push(wg.add_guard()); } // guards are dropped here }); }); group.finish(); } criterion_group!( benches, crossfire_bounded_1_blocking_1_1, crossfire_bounded_1_blocking_n_1, crossfire_bounded_1_blocking_n_n, crossfire_bounded_100_blocking_1_1, crossfire_bounded_100_blocking_n_1, crossfire_bounded_100_blocking_n_n, crossfire_unbounded_blocking_1_1, crossfire_unbounded_blocking_n_1, crossfire_unbounded_blocking_n_n, crossfire_bounded_1_async_1_1, crossfire_bounded_1_async_n_1, crossfire_bounded_1_async_n_n, crossfire_bounded_100_async_1_1, crossfire_bounded_100_async_n_1, crossfire_bounded_100_async_n_n, crossfire_unbounded_async_1_1, crossfire_unbounded_async_mpsc, crossfire_unbounded_async_mpmc, crossfire_oneshot_blocking, crossfire_oneshot_async, bench_crossfire_wait_group, ); criterion_main!(benches); ================================================ FILE: test-suite/benches/crossfire_select.rs ================================================ use criterion::*; use crossfire::{ mpsc::Array, select::{Multiplex, Mux, Select, SelectMode}, *, }; use std::thread; use std::time::Duration; #[allow(unused_imports, dead_code)] mod common; use common::*; // Initialize logger for benchmarks fn init_logger() { #[cfg(feature = "trace_log")] { use captains_log::*; use std::sync::Once; static INIT: Once = Once::new(); INIT.call_once(|| { let format = recipe::LOG_FORMAT_THREADED_DEBUG; let ring = ringfile::LogRingFile::new( "/tmp/crossfire_ring.log", 500 * 1024 * 1024, Level::Debug, format, ); let mut config = Builder::default() .signal(signal_consts::SIGINT) .signal(signal_consts::SIGTERM) .add_sink(ring) .add_sink(LogConsole::new( ConsoleTarget::Stdout, Level::Info, recipe::LOG_FORMAT_DEBUG, )); config.dynamic = true; config.build().expect("log_setup"); }); } } const NUM_CHANNELS: usize = 4; const BOUND: usize = 100; fn spawn_senders(txs: Vec, total_msgs: usize) -> Vec> where T: BlockingTxTrait + Send + Clone + 'static, { let msgs_per_channel = total_msgs / txs.len(); txs.into_iter() .map(|tx| { thread::spawn(move || { for i in 0..msgs_per_channel { tx.send(i).expect("send"); } }) }) .collect() } fn run_select(mode: SelectMode, total_msgs: usize) { let mut receivers = Vec::with_capacity(NUM_CHANNELS); let mut senders = Vec::with_capacity(NUM_CHANNELS); for _ in 0..NUM_CHANNELS { let (tx, rx) = mpsc::bounded_blocking::(BOUND); receivers.push(rx); senders.push(tx); } let mut select = Select::new_with(mode); for rx in &receivers { select.add(rx); } let handles = spawn_senders(senders, total_msgs); let mut recv_counter = 0; while recv_counter < total_msgs { match select.select() { Ok(res) => { for rx in &receivers { if res == *rx { match rx.read_select(res) { Ok(_) => { recv_counter += 1; } Err(RecvError) => { select.remove(rx); } } break; } } } Err(RecvError) => break, } } assert_eq!(total_msgs, recv_counter); for h in handles { h.join().unwrap(); } } fn run_multiplex(total_msgs: usize) { let mut mp = Multiplex::>::new(); let mut senders: Vec>>> = Vec::with_capacity(NUM_CHANNELS); for _ in 0..NUM_CHANNELS { let tx = mp.bounded_tx(BOUND); senders.push(tx); } let handles = spawn_senders(senders, total_msgs); let mut recv_counter = 0; while recv_counter < total_msgs { match mp.recv() { Ok(_) => { recv_counter += 1; } Err(RecvError) => break, } } assert_eq!(total_msgs, recv_counter); for h in handles { h.join().unwrap(); } } fn bench_select(c: &mut Criterion) { init_logger(); let mut group = c.benchmark_group("select"); group.significance_level(0.1).sample_size(50); group.measurement_time(Duration::from_secs(20)); group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_function("select_rr", |b| b.iter(|| run_select(SelectMode::RR, ONE_MILLION))); group.bench_function("select_rand", |b| b.iter(|| run_select(SelectMode::Rand, ONE_MILLION))); group.bench_function("select_bias", |b| b.iter(|| run_select(SelectMode::Bias, ONE_MILLION))); group.finish(); } fn bench_multiplex(c: &mut Criterion) { init_logger(); let mut group = c.benchmark_group("multiplex"); group.significance_level(0.1).sample_size(50); group.measurement_time(Duration::from_secs(20)); group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_function("multiplex", |b| b.iter(|| run_multiplex(ONE_MILLION))); group.finish(); } criterion_group!(benches, bench_select, bench_multiplex); criterion_main!(benches); ================================================ FILE: test-suite/benches/extra.rs ================================================ use criterion::*; use std::thread; mod common; use common::*; fn bench_async_oneshot_async(c: &mut Criterion) { let mut group = c.benchmark_group("async_oneshot_async"); let count = TEN_THOUSAND; group.throughput(Throughput::Elements(count as u64)); group.bench_function("spawn", |b| { b.to_async(BenchExecutor()).iter(|| async move { let mut txs = Vec::with_capacity(count); let mut rxs = Vec::with_capacity(count); for _i in 0..count { let (tx, rx) = async_oneshot::oneshot(); txs.push(tx); rxs.push(rx); } async_spawn!(async move { for mut tx in txs { let _ = tx.send(0); } }); for rx in rxs { let _ = rx.await; } }) }); group.finish(); } fn bench_oneshot_async(c: &mut Criterion) { let mut group = c.benchmark_group("oneshot_async"); let count = TEN_THOUSAND; group.throughput(Throughput::Elements(count as u64)); group.bench_function("spawn", |b| { b.to_async(BenchExecutor()).iter(|| async move { let mut txs = Vec::with_capacity(count); let mut rxs = Vec::with_capacity(count); for _i in 0..count { let (tx, rx) = oneshot::channel(); txs.push(tx); rxs.push(rx); } async_spawn!(async move { for tx in txs { let _ = tx.send(0); } }); for rx in rxs { let _ = rx.await; } }) }); group.finish(); } fn bench_oneshot_thread(c: &mut Criterion) { let mut group = c.benchmark_group("oneshot_thread"); let count = TEN_THOUSAND; group.throughput(Throughput::Elements(count as u64)); group.bench_function("thread", |b| { b.iter(|| { let mut txs = Vec::with_capacity(count); let mut rxs = Vec::with_capacity(count); for _i in 0..count { let (tx, rx) = oneshot::channel(); txs.push(tx); rxs.push(rx); } let t = thread::spawn(move || { for tx in txs { let _ = tx.send(0); } }); for rx in rxs { let _ = rx.recv(); } t.join().unwrap(); }) }); group.finish(); } criterion_group!( extra_benches, bench_async_oneshot_async, bench_oneshot_async, bench_oneshot_thread ); criterion_main!(extra_benches); ================================================ FILE: test-suite/benches/flume.rs ================================================ use criterion::*; use std::thread; use std::time::Duration; #[allow(unused_imports)] mod common; use common::*; fn _flume_bounded_sync(bound: usize, tx_count: usize, rx_count: usize, msg_count: usize) { let (tx, rx) = flume::bounded(bound); let mut th_tx = Vec::new(); let mut th_rx = Vec::new(); let mut send_counter: usize = 0; let _send_counter = msg_count / tx_count; for _tx_i in 0..tx_count { send_counter += _send_counter; let _tx = tx.clone(); th_tx.push(thread::spawn(move || { for i in 0.._send_counter { if let Err(e) = _tx.send(i) { panic!("send error: {:?}", e); } } })); } drop(tx); let mut recv_counter = 0; for _ in 0..(rx_count - 1) { let _rx = rx.clone(); th_rx.push(thread::spawn(move || -> usize { let mut i = 0; loop { match _rx.recv() { Ok(_) => { i += 1; } Err(_) => { break; } } } i })); } loop { match rx.recv() { Ok(_) => { recv_counter += 1; } Err(_) => { break; } } } for th in th_tx { let _ = th.join(); } for th in th_rx { if let Ok(count) = th.join() { recv_counter += count; } } assert_eq!(send_counter, recv_counter); } fn _flume_unbounded_sync(tx_count: usize, rx_count: usize, msg_count: usize) { let (tx, rx) = flume::unbounded(); let mut th_tx = Vec::new(); let mut th_rx = Vec::new(); let mut send_counter: usize = 0; let _send_counter = msg_count / tx_count; for _tx_i in 0..tx_count { send_counter += _send_counter; let _tx = tx.clone(); th_tx.push(thread::spawn(move || { for i in 0.._send_counter { if let Err(e) = _tx.send(i) { panic!("send error: {:?}", e); } } })); } drop(tx); let mut recv_counter = 0; for _ in 0..(rx_count - 1) { let _rx = rx.clone(); th_rx.push(thread::spawn(move || -> usize { let mut i = 0; loop { match _rx.recv() { Ok(_) => { i += 1; } Err(_) => { break; } } } i })); } loop { match rx.recv() { Ok(_) => { recv_counter += 1; } Err(_) => { break; } } } for th in th_tx { let _ = th.join(); } for th in th_rx { if let Ok(count) = th.join() { recv_counter += count; } } assert_eq!(send_counter, recv_counter); } async fn _flume_unbounded_async(tx_count: usize, rx_count: usize, msg_count: usize) { let (tx, rx) = flume::unbounded(); let mut th_tx = Vec::new(); let mut th_rx = Vec::new(); let mut send_counter: usize = 0; let _send_counter = msg_count / tx_count; for _tx_i in 0..tx_count { send_counter += _send_counter; let _tx = tx.clone(); th_tx.push(async_spawn!(async move { for i in 0.._send_counter { if let Err(e) = _tx.send(i) { panic!("send error: {:?}", e); } } })); } drop(tx); let mut recv_counter = 0; for _ in 0..(rx_count - 1) { let _rx = rx.clone(); th_rx.push(async_spawn!(async move { let mut i = 0; loop { match _rx.recv_async().await { Ok(_) => { i += 1; } Err(_) => { break; } } } i })); } loop { match rx.recv_async().await { Ok(_) => { recv_counter += 1; } Err(_) => { break; } } } for th in th_tx { let _ = th.await; } for th in th_rx { recv_counter += async_join_result!(th); } assert_eq!(send_counter, recv_counter); } async fn _flume_bounded_async(bound: usize, tx_count: usize, rx_count: usize, msg_count: usize) { let (tx, rx) = flume::bounded(bound); let mut th_tx = Vec::new(); let mut th_rx = Vec::new(); let mut send_counter: usize = 0; let _send_counter = msg_count / tx_count; for _tx_i in 0..tx_count { send_counter += _send_counter; let _tx = tx.clone(); th_tx.push(async_spawn!(async move { for i in 0.._send_counter { if let Err(e) = _tx.send_async(i).await { panic!("send error: {:?}", e); } } })); } drop(tx); let mut recv_counter = 0; for _ in 0..(rx_count - 1) { let _rx = rx.clone(); th_rx.push(async_spawn!(async move { let mut i = 0; loop { match _rx.recv_async().await { Ok(_) => { i += 1; } Err(_) => { break; } } } i })); } loop { match rx.recv_async().await { Ok(_) => { recv_counter += 1; } Err(_) => { break; } } } for th in th_tx { let _ = th.await; } for th in th_rx { recv_counter += async_join_result!(th); } assert_eq!(send_counter, recv_counter); } fn bench_flume_bounded_sync(c: &mut Criterion) { let mut group = c.benchmark_group("flume_bounded_blocking"); group.significance_level(0.1).sample_size(50); group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.measurement_time(Duration::from_secs(15)); for input in n_1() { let param = Concurrency { tx_count: input, rx_count: 1 }; group.throughput(Throughput::Elements(TEN_THOUSAND as u64)); group.bench_with_input(BenchmarkId::new("mpsc size 1", input), ¶m, |b, i| { b.iter(|| _flume_bounded_sync(1, i.tx_count, i.rx_count, TEN_THOUSAND)) }); } for input in n_1() { let param = Concurrency { tx_count: input, rx_count: 1 }; group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_with_input(BenchmarkId::new("mpsc size 100", input), ¶m, |b, i| { b.iter(|| _flume_bounded_sync(100, i.tx_count, i.rx_count, ONE_MILLION)) }); } for input in n_n() { let param = Concurrency { tx_count: input.0, rx_count: input.1 }; group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_with_input( BenchmarkId::new("mpmc size 100", param.to_string()), ¶m, |b, i| b.iter(|| _flume_bounded_sync(100, i.tx_count, i.rx_count, ONE_MILLION)), ); } group.finish(); } fn bench_flume_unbounded_async(c: &mut Criterion) { let mut group = c.benchmark_group("flume_unbounded_async"); group.significance_level(0.1).sample_size(50); group.measurement_time(Duration::from_secs(20)); for input in n_1() { let param = Concurrency { tx_count: input, rx_count: 1 }; group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_with_input(BenchmarkId::new("mpsc", ¶m), ¶m, |b, i| { b.to_async(BenchExecutor()) .iter(|| _flume_unbounded_async(i.tx_count, i.rx_count, ONE_MILLION)) }); } for input in n_n() { let param = Concurrency { tx_count: input.0, rx_count: input.1 }; group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_with_input(BenchmarkId::new("mpmc", ¶m), ¶m, |b, i| { b.to_async(BenchExecutor()) .iter(|| _flume_unbounded_async(i.tx_count, i.rx_count, ONE_MILLION)) }); } } fn bench_flume_bounded_async(c: &mut Criterion) { let mut group = c.benchmark_group("flume_bounded_async"); group.significance_level(0.1).sample_size(50); group.measurement_time(Duration::from_secs(20)); for input in n_1() { let param = Concurrency { tx_count: input, rx_count: 1 }; group.throughput(Throughput::Elements(TEN_THOUSAND as u64)); group.bench_with_input(BenchmarkId::new("mpsc size 1", ¶m), ¶m, |b, i| { b.to_async(BenchExecutor()) .iter(|| _flume_bounded_async(1, i.tx_count, i.rx_count, TEN_THOUSAND)) }); } for input in n_1() { let param = Concurrency { tx_count: input, rx_count: 1 }; group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_with_input(BenchmarkId::new("mpsc size 100", ¶m), ¶m, |b, i| { b.to_async(BenchExecutor()) .iter(|| _flume_bounded_async(100, i.tx_count, i.rx_count, ONE_MILLION)) }); } for input in n_n() { let param = Concurrency { tx_count: input.0, rx_count: input.1 }; group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_with_input(BenchmarkId::new("mpmc size 100", ¶m), ¶m, |b, i| { b.to_async(BenchExecutor()) .iter(|| _flume_bounded_async(100, i.tx_count, i.rx_count, ONE_MILLION)) }); } } fn bench_flume_unbounded_sync(c: &mut Criterion) { let mut group = c.benchmark_group("flume_unbounded_blocking"); group.significance_level(0.1).sample_size(50); group.measurement_time(Duration::from_secs(20)); for input in n_1() { let param = Concurrency { tx_count: input, rx_count: 1 }; group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_with_input(BenchmarkId::new("mpsc", ¶m), ¶m, |b, i| { b.iter(|| _flume_unbounded_sync(i.tx_count, i.rx_count, ONE_MILLION)) }); } for input in n_n() { let param = Concurrency { tx_count: input.0, rx_count: input.1 }; group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_with_input(BenchmarkId::new("mpmc", ¶m), ¶m, |b, i| { b.iter(|| _flume_unbounded_sync(i.tx_count, i.rx_count, ONE_MILLION)) }); } } criterion_group!( benches, bench_flume_bounded_sync, bench_flume_bounded_async, bench_flume_unbounded_async, bench_flume_unbounded_sync, ); criterion_main!(benches); ================================================ FILE: test-suite/benches/kanal.rs ================================================ use criterion::*; use std::thread; use std::time::Duration; #[allow(unused_imports)] mod common; use common::*; fn _kanal_bounded_blocking(bound: usize, tx_count: usize, rx_count: usize, msg_count: usize) { let (tx, rx) = kanal::bounded::(bound); let mut th_tx = Vec::new(); let mut th_rx = Vec::new(); let mut send_counter: usize = 0; let _send_counter = msg_count / tx_count; for _ in 0..tx_count { send_counter += _send_counter; let _tx = tx.clone(); th_tx.push(thread::spawn(move || { for i in 0.._send_counter { _tx.send(i).expect("send"); } })); } drop(tx); let mut recv_counter = 0; for _ in 0..(rx_count - 1) { let _rx = rx.clone(); th_rx.push(thread::spawn(move || -> usize { let mut i = 0; loop { match _rx.recv() { Ok(_) => { i += 1; } Err(_) => { break; } } } i })); } loop { match rx.recv() { Ok(_) => { recv_counter += 1; } Err(_) => { break; } } } for th in th_tx { let _ = th.join(); } for th in th_rx { if let Ok(count) = th.join() { recv_counter += count; } } assert_eq!(send_counter, recv_counter); } fn _kanal_unbounded_blocking(tx_count: usize, rx_count: usize, msg_count: usize) { let (tx, rx) = kanal::unbounded::(); let mut th_tx = Vec::new(); let mut th_rx = Vec::new(); let mut send_counter: usize = 0; let _send_counter = msg_count / tx_count; for _ in 0..tx_count { send_counter += _send_counter; let _tx = tx.clone(); th_tx.push(thread::spawn(move || { for i in 0.._send_counter { _tx.send(i).expect("send"); } })); } drop(tx); let mut recv_counter = 0; for _ in 0..(rx_count - 1) { let _rx = rx.clone(); th_rx.push(thread::spawn(move || -> usize { let mut i = 0; loop { match _rx.recv() { Ok(_) => { i += 1; } Err(_) => { break; } } } i })); } loop { match rx.recv() { Ok(_) => { recv_counter += 1; } Err(_) => { break; } } } for th in th_tx { let _ = th.join(); } for th in th_rx { if let Ok(count) = th.join() { recv_counter += count; } } assert_eq!(send_counter, recv_counter); } async fn _kanal_bounded_async(bound: usize, tx_count: usize, rx_count: usize, msg_count: usize) { let (tx, rx) = kanal::bounded_async(bound); let mut th_tx = Vec::new(); let mut th_rx = Vec::new(); let mut send_counter: usize = 0; let _send_counter = msg_count / tx_count; for _tx_i in 0..tx_count { send_counter += _send_counter; let _tx = tx.clone(); th_tx.push(async_spawn!(async move { for i in 0.._send_counter { if let Err(e) = _tx.send(i).await { panic!("send error: {:?}", e); } } })); } drop(tx); let mut recv_counter = 0; for _ in 0..(rx_count - 1) { let _rx = rx.clone(); th_rx.push(async_spawn!(async move { let mut i = 0; loop { match _rx.recv().await { Ok(_) => { i += 1; } Err(_) => { break; } } } i })); } loop { match rx.recv().await { Ok(_) => { recv_counter += 1; } Err(_) => { break; } } } for th in th_tx { let _ = async_join_result!(th); } for th in th_rx { recv_counter += async_join_result!(th); } assert_eq!(send_counter, recv_counter); } async fn _kanal_unbounded_async(tx_count: usize, rx_count: usize, msg_count: usize) { let (tx, rx) = kanal::unbounded_async(); let mut th_tx = Vec::new(); let mut th_rx = Vec::new(); let mut send_counter: usize = 0; let _send_counter = msg_count / tx_count; for _tx_i in 0..tx_count { send_counter += _send_counter; let _tx = tx.clone(); th_tx.push(async_spawn!(async move { for i in 0.._send_counter { if let Err(e) = _tx.send(i).await { panic!("send error: {:?}", e); } } })); } drop(tx); let mut recv_counter = 0; for _ in 0..(rx_count - 1) { let _rx = rx.clone(); th_rx.push(async_spawn!(async move { let mut i = 0; loop { match _rx.recv().await { Ok(_) => { i += 1; } Err(_) => { break; } } } i })); } loop { match rx.recv().await { Ok(_) => { recv_counter += 1; } Err(_) => { break; } } } for th in th_tx { let _ = async_join_result!(th); } for th in th_rx { recv_counter += async_join_result!(th); } assert_eq!(send_counter, recv_counter); } fn bench_kanal_bounded_blocking(c: &mut Criterion) { let mut group = c.benchmark_group("kanal_bounded_blocking"); group.significance_level(0.1).sample_size(50); group.measurement_time(Duration::from_secs(20)); for input in n_1() { let param = Concurrency { tx_count: input, rx_count: 1 }; group.throughput(Throughput::Elements(TEN_THOUSAND as u64)); group.bench_with_input(BenchmarkId::new("mpsc size 1", ¶m), ¶m, |b, i| { b.iter(|| _kanal_bounded_blocking(1, i.tx_count, i.rx_count, TEN_THOUSAND)) }); } for input in n_1() { let param = Concurrency { tx_count: input, rx_count: 1 }; group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_with_input(BenchmarkId::new("mpsc size 100", ¶m), ¶m, |b, i| { b.iter(|| _kanal_bounded_blocking(100, i.tx_count, i.rx_count, ONE_MILLION)) }); } for input in n_n() { let param = Concurrency { tx_count: input.0, rx_count: input.1 }; group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_with_input(BenchmarkId::new("mpmc size 100", ¶m), ¶m, |b, i| { b.iter(|| _kanal_bounded_blocking(100, i.tx_count, i.rx_count, ONE_MILLION)) }); } } fn bench_kanal_unbounded_blocking(c: &mut Criterion) { let mut group = c.benchmark_group("kanal_unbounded_blocking"); group.significance_level(0.1).sample_size(50); group.measurement_time(Duration::from_secs(20)); for input in n_1() { let param = Concurrency { tx_count: input, rx_count: 1 }; group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_with_input(BenchmarkId::new("mpsc", ¶m), ¶m, |b, i| { b.iter(|| _kanal_unbounded_blocking(i.tx_count, i.rx_count, ONE_MILLION)) }); } for input in n_n() { let param = Concurrency { tx_count: input.0, rx_count: input.1 }; group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_with_input(BenchmarkId::new("mpmc", ¶m), ¶m, |b, i| { b.iter(|| _kanal_unbounded_blocking(i.tx_count, i.rx_count, ONE_MILLION)) }); } } fn bench_kanal_bounded_async(c: &mut Criterion) { let mut group = c.benchmark_group("kanal_bounded_async"); group.significance_level(0.1).sample_size(50); group.measurement_time(Duration::from_secs(20)); for input in n_1() { let param = Concurrency { tx_count: input, rx_count: 1 }; group.throughput(Throughput::Elements(TEN_THOUSAND as u64)); group.bench_with_input(BenchmarkId::new("mpsc size 1", ¶m), ¶m, |b, i| { b.to_async(BenchExecutor()) .iter(|| _kanal_bounded_async(1, i.tx_count, i.rx_count, TEN_THOUSAND)) }); } for input in n_1() { let param = Concurrency { tx_count: input, rx_count: 1 }; group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_with_input(BenchmarkId::new("mpsc size 100", ¶m), ¶m, |b, i| { b.to_async(BenchExecutor()) .iter(|| _kanal_bounded_async(100, i.tx_count, i.rx_count, ONE_MILLION)) }); } for input in n_n() { let param = Concurrency { tx_count: input.0, rx_count: input.1 }; group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_with_input(BenchmarkId::new("mpmc size 100", ¶m), ¶m, |b, i| { b.to_async(BenchExecutor()) .iter(|| _kanal_bounded_async(100, i.tx_count, i.rx_count, ONE_MILLION)) }); } } fn bench_kanal_unbounded_async(c: &mut Criterion) { let mut group = c.benchmark_group("kanal_unbounded_async"); group.significance_level(0.1).sample_size(50); group.measurement_time(Duration::from_secs(20)); for input in n_1() { let param = Concurrency { tx_count: input, rx_count: 1 }; group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_with_input(BenchmarkId::new("mpsc", ¶m), ¶m, |b, i| { b.to_async(BenchExecutor()) .iter(|| _kanal_unbounded_async(i.tx_count, i.rx_count, ONE_MILLION)) }); } for input in n_n() { let param = Concurrency { tx_count: input.0, rx_count: input.1 }; group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_with_input(BenchmarkId::new("mpmc", ¶m), ¶m, |b, i| { b.to_async(BenchExecutor()) .iter(|| _kanal_unbounded_async(i.tx_count, i.rx_count, ONE_MILLION)) }); } } criterion_group!( benches, bench_kanal_bounded_async, bench_kanal_unbounded_async, bench_kanal_bounded_blocking, bench_kanal_unbounded_blocking ); criterion_main!(benches); ================================================ FILE: test-suite/benches/tokio.rs ================================================ use criterion::*; use std::time::Duration; #[allow(unused_imports)] mod common; use common::*; async fn _tokio_bounded_mpsc(bound: usize, tx_count: usize, msg_count: usize) { let (tx, mut rx) = tokio::sync::mpsc::channel::(bound); let _send_counter = msg_count / tx_count; for _tx_i in 0..tx_count { let _tx = tx.clone(); async_spawn!(async move { for i in 0.._send_counter { let _ = _tx.send(i).await; } }); } drop(tx); for _ in 0..(tx_count * _send_counter) { if let Some(_msg) = rx.recv().await { // println!("recv {}", _msg); } else { panic!("recv error"); } } } async fn _tokio_unbounded_mpsc(tx_count: usize, msg_count: usize) { let (tx, mut rx) = tokio::sync::mpsc::unbounded_channel::(); let _send_counter = msg_count / tx_count; for _tx_i in 0..tx_count { let _tx = tx.clone(); async_spawn!(async move { for i in 0.._send_counter { let _ = _tx.send(i); } }); } drop(tx); for _ in 0..(tx_count * _send_counter) { if let Some(_msg) = rx.recv().await { // println!("recv {}", _msg); } else { panic!("recv error"); } } } fn bench_tokio_bounded(c: &mut Criterion) { let mut group = c.benchmark_group("tokio_bounded_100"); group.significance_level(0.1).sample_size(50); group.measurement_time(Duration::from_secs(10)); for input in n_1() { let param = Concurrency { tx_count: input, rx_count: 1 }; group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_with_input(BenchmarkId::new("mpsc", input), ¶m, |b, i| { b.to_async(BenchExecutor()).iter(|| _tokio_bounded_mpsc(100, i.tx_count, ONE_MILLION)) }); } group.finish(); } fn bench_tokio_unbounded(c: &mut Criterion) { let mut group = c.benchmark_group("tokio_unbounded"); group.significance_level(0.1).sample_size(50); group.measurement_time(Duration::from_secs(10)); for input in n_1() { let param = Concurrency { tx_count: input, rx_count: 1 }; group.throughput(Throughput::Elements(ONE_MILLION as u64)); group.bench_with_input(BenchmarkId::new("mpsc", input), ¶m, |b, i| { b.to_async(BenchExecutor()).iter(|| _tokio_unbounded_mpsc(i.tx_count, ONE_MILLION)) }); } group.finish(); } fn bench_tokio_oneshot(c: &mut Criterion) { let mut group = c.benchmark_group("tokio_oneshot"); let count = TEN_THOUSAND; group.throughput(Throughput::Elements(count as u64)); group.bench_function("oneshot", |b| { b.to_async(BenchExecutor()).iter(|| async move { let mut txs = Vec::with_capacity(count); let mut rxs = Vec::with_capacity(count); for _i in 0..count { let (tx, rx) = tokio::sync::oneshot::channel(); txs.push(tx); rxs.push(rx); } async_spawn!(async move { for tx in txs { let _ = tx.send(0); } }); for rx in rxs { let _ = rx.await; } }) }); group.finish(); } criterion_group!(benches, bench_tokio_bounded, bench_tokio_unbounded, bench_tokio_oneshot); criterion_main!(benches); ================================================ FILE: test-suite/scripts/miri.sh ================================================ #!/bin/bash # -Zmiri-no-short-fd-operations is to prevent short write perform by miri, which breaks to atomic appending in log # -Zmiri-permissive-provenance is to disable warning about parking_lot # By default log is off, if you need to enable, pass the option with the script: --features trace_log if [ -z "$MIRI_SEED" ]; then MIRI_SEED="$(shuf -i 1-1000 -n 1)" fi echo "MIRI_SEED" $MIRI_SEED MIRIFLAGS="$MIRIFLAGS -Zmiri-seed=$MIRI_SEED -Zmiri-disable-isolation -Zmiri-no-short-fd-operations -Zmiri-backtrace=full -Zmiri-permissive-provenance" export MIRIFLAGS echo $MIRIFLAGS # --lib: to skip doctest RUSTFLAGS="--cfg tokio_unstable" RUST_BACKTRACE=1 cargo +${NIGHTLY_VERSION:-nightly} miri test --lib $@ -- --no-capture --test-threads=1 ================================================ FILE: test-suite/src/lib.rs ================================================ #[cfg(test)] mod test_async; #[cfg(test)] mod test_async_blocking; #[cfg(test)] mod test_blocking_async; #[cfg(test)] mod test_blocking_context; #[cfg(test)] mod test_oneshot; #[cfg(test)] mod test_select_async; #[cfg(test)] mod test_select_blocking; #[cfg(test)] mod test_waitgroup; // we don't want to import smol-timeout #[cfg(test)] #[cfg(all(feature = "time", not(feature = "smol")))] mod test_type_switch; use captains_log::*; use std::sync::atomic::{AtomicUsize, Ordering}; #[cfg(not(miri))] pub const ROUND: usize = 10000; #[cfg(miri)] pub const ROUND: usize = 20; #[cfg(feature = "compio_dispatcher")] use std::sync::OnceLock; #[cfg(feature = "compio_dispatcher")] use compio::dispatcher::Dispatcher; #[cfg(feature = "compio_dispatcher")] pub static COMPIO_DISPATCHER: OnceLock = OnceLock::new(); pub fn _setup_log() { #[cfg(feature = "trace_log")] { let format = recipe::LOG_FORMAT_THREADED_DEBUG; #[cfg(miri)] { let _ = std::fs::remove_file("/tmp/crossfire_miri.log"); let file = LogRawFile::new("/tmp", "crossfire_miri.log", Level::Debug, format); captains_log::Builder::default() // .tracing_global() .add_sink(file) .test() .build() .expect("log setup"); } #[cfg(not(miri))] { let ring = ringfile::LogRingFile::new( "/tmp/crossfire_ring.log", 500 * 1024 * 1024, Level::Debug, format, ); let mut config = Builder::default() .signal(signal_consts::SIGINT) .signal(signal_consts::SIGTERM) // .tracing_global() .add_sink(ring) .add_sink(LogConsole::new( ConsoleTarget::Stdout, Level::Info, recipe::LOG_FORMAT_DEBUG, )); config.dynamic = true; config.build().expect("log_setup"); } } #[cfg(not(feature = "trace_log"))] { let _ = recipe::env_logger("LOG_FILE", "LOG_LEVEL").build().expect("log setup"); } } #[macro_export] macro_rules! runtime_block_on { ($f: expr) => {{ #[cfg(feature = "smol")] { log::info!("run with smol"); smol::block_on($f) } #[cfg(feature = "async_std")] { log::info!("run with async_std"); async_std::task::block_on($f) } #[cfg(any(feature = "compio", feature = "compio_dispatcher"))] { log::info!("run with compio"); let rt = compio::runtime::Runtime::new().unwrap(); rt.block_on($f) } #[cfg(not(any( feature = "compio", feature = "compio_dispatcher", feature = "async_std", feature = "smol" )))] { let runtime_flag = std::env::var("SINGLE_THREAD_RUNTIME").unwrap_or("".to_string()); let mut rt = if runtime_flag.len() > 0 { log::info!("run with tokio current thread"); tokio::runtime::Builder::new_current_thread() } else { log::info!("run with tokio multi thread"); tokio::runtime::Builder::new_multi_thread() }; rt.enable_all().build().unwrap().block_on($f) } }}; } #[macro_export] macro_rules! async_spawn { ($f: expr) => {{ #[cfg(feature = "smol")] { smol::spawn($f) } #[cfg(feature = "async_std")] { async_std::task::spawn($f) } #[cfg(feature = "compio")] { compio::runtime::spawn($f) } #[cfg(feature = "compio_dispatcher")] { let disp = COMPIO_DISPATCHER.get_or_init(|| { compio::dispatcher::DispatcherBuilder::new() .worker_threads(std::num::NonZero::new(8).unwrap()) .build() .expect("create dispatcher") }); disp.dispatch(move || $f).expect("dispatch") } #[cfg(not(any( feature = "compio", feature = "compio_dispatcher", feature = "async_std", feature = "smol" )))] { tokio::spawn($f) } }}; } #[macro_export] macro_rules! async_join_result { ($th: expr) => {{ #[cfg(any(feature = "async_std", feature = "smol"))] { $th.await } #[cfg(not(any(feature = "async_std", feature = "smol")))] { // compio and tokio are the same $th.await.expect("join") } }}; } static DROP_COUNTER: AtomicUsize = AtomicUsize::new(0); pub trait TestDropMsg: Unpin + Send + 'static { fn new(v: usize) -> Self; fn get_value(&self) -> usize; } pub struct SmallMsg(pub usize); impl Drop for SmallMsg { fn drop(&mut self) { DROP_COUNTER.fetch_add(1, Ordering::SeqCst); } } impl TestDropMsg for SmallMsg { fn new(v: usize) -> Self { Self(v) } fn get_value(&self) -> usize { self.0 } } pub struct LargeMsg([usize; 4]); impl TestDropMsg for LargeMsg { fn new(v: usize) -> Self { Self([v, v, v, v]) } fn get_value(&self) -> usize { self.0[0] } } impl Drop for LargeMsg { fn drop(&mut self) { DROP_COUNTER.fetch_add(1, Ordering::SeqCst); } } pub fn get_drop_counter() -> usize { DROP_COUNTER.load(Ordering::SeqCst) } pub fn reset_drop_counter() { DROP_COUNTER.store(0, Ordering::SeqCst); } #[cfg(feature = "time")] pub async fn sleep(duration: std::time::Duration) { #[cfg(feature = "smol")] { smol::Timer::after(duration).await; } #[cfg(feature = "async_std")] { async_std::task::sleep(duration).await; } #[cfg(any(feature = "compio", feature = "compio_dispatcher"))] { compio::time::sleep(duration).await; } #[cfg(not(any( feature = "compio", feature = "compio_dispatcher", feature = "async_std", feature = "smol" )))] { tokio::time::sleep(duration).await; } } #[cfg(all(feature = "time", not(feature = "smol")))] pub async fn timeout(duration: std::time::Duration, future: F) -> Result where F: std::future::Future, { #[cfg(feature = "async_std")] { return async_std::future::timeout(duration, future) .await .map_err(|_| format!("Test timed out after {:?}", duration)); } #[cfg(any(feature = "compio", feature = "compio_dispatcher"))] { return compio::time::timeout(duration, future) .await .map_err(|_| format!("Test timed out after {:?}", duration)); } #[cfg(not(any( feature = "compio", feature = "compio_dispatcher", feature = "async_std", feature = "smol" )))] { return tokio::time::timeout(duration, future) .await .map_err(|_| format!("Test timed out after {:?}", duration)); } } pub fn spawn_named_thread(name: &str, f: F) -> std::thread::JoinHandle where F: FnOnce() -> T + Send + 'static, T: Send + 'static, { std::thread::Builder::new() .name(name.to_string()) .spawn(f) .unwrap_or_else(|e| panic!("Failed to spawn thread '{}': {:?}", name, e)) } ================================================ FILE: test-suite/src/test_async.rs ================================================ use crate::*; use captains_log::{logfn, *}; use crossfire::flavor::Flavor; use crossfire::tokio_task_id; use crossfire::{sink::*, stream::*, *}; use futures_util::{ pin_mut, select, stream::{Stream, StreamExt}, FutureExt, }; use rstest::*; use std::future::Future; use std::pin::Pin; use std::sync::Arc; use std::sync::Mutex; use std::task::*; use std::thread; use std::time::Duration; #[fixture] fn setup_log() { _setup_log(); } #[logfn] #[rstest] fn test_basic_weak(setup_log: ()) { runtime_block_on!(async move { let (tx, rx) = mpsc::bounded_async::(100); assert_eq!(tx.get_tx_count(), 1); let weak_tx = tx.downgrade(); let tx_clone = weak_tx.upgrade::>().unwrap(); tx_clone.send(1).await.expect("ok"); assert_eq!(tx.get_tx_count(), 2); drop(tx); drop(tx_clone); assert!(weak_tx.upgrade::>().is_none()); assert_eq!(weak_tx.get_tx_count(), 0); drop(rx); }); } #[logfn] #[rstest] #[case(spsc::bounded_async(1))] #[case(mpsc::bounded_async(1))] #[case(mpmc::bounded_async(1))] fn test_basic_bounded_empty_full_drop_rx, R: AsyncRxTrait>( setup_log: (), #[case] channel: (T, R), ) { let (tx, rx) = channel; assert!(tx.is_empty()); assert!(rx.is_empty()); assert_eq!(tx.capacity(), Some(1)); assert_eq!(rx.capacity(), Some(1)); tx.try_send(1).expect("Ok"); assert!(tx.is_full()); assert!(rx.is_full()); assert!(!tx.is_empty()); assert_eq!(tx.is_disconnected(), false); assert_eq!(rx.is_disconnected(), false); drop(rx); assert_eq!(tx.is_disconnected(), true); assert_eq!(tx.get_rx_count(), 0); assert_eq!(tx.get_tx_count(), 1); } #[logfn] #[rstest] #[case(spsc::bounded_async(1))] #[case(mpsc::bounded_async(1))] #[case(mpmc::bounded_async(1))] fn test_basic_bounded_empty_full_drop_tx, R: AsyncRxTrait>( setup_log: (), #[case] channel: (T, R), ) { let (tx, rx) = channel; assert!(tx.is_empty()); assert!(rx.is_empty()); assert_eq!(tx.capacity(), Some(1)); assert_eq!(rx.capacity(), Some(1)); tx.try_send(1).expect("Ok"); assert!(tx.is_full()); assert!(rx.is_full()); assert!(!tx.is_empty()); assert_eq!(tx.is_disconnected(), false); assert_eq!(rx.is_disconnected(), false); drop(tx); assert_eq!(rx.is_disconnected(), true); assert_eq!(rx.get_tx_count(), 0); assert_eq!(rx.get_rx_count(), 1); } #[logfn] #[rstest] fn test_basic_compile_bounded_empty_full() { let (tx, rx) = mpmc::bounded_async(1); assert!(tx.is_empty()); assert!(rx.is_empty()); tx.try_send(1).expect("ok"); assert!(tx.is_full()); assert!(!tx.is_empty()); assert!(rx.is_full()); assert_eq!(tx.get_tx_count(), 1); assert_eq!(rx.get_tx_count(), 1); assert_eq!(tx.is_disconnected(), false); assert_eq!(rx.is_disconnected(), false); drop(rx); assert_eq!(tx.is_disconnected(), true); } #[cfg(feature = "time")] #[logfn] #[rstest] fn test_sync() { runtime_block_on!(async move { let (tx, rx) = spsc::bounded_async::(100); // Example1: should fail to compile with Arc // let tx = Arc::new(tx); let _task = async_spawn!(async move { let _ = tx.send(2).await; }); drop(rx); let (tx, rx) = mpsc::bounded_async::(100); // example2: should fail to compile with Arc // let rx = Arc::new(rx); let _task = async_spawn!(async move { let _ = rx.recv().await; }); drop(tx); let (tx, rx) = mpsc::bounded_blocking::(100); //// example3: should fail to compile with Arc // let rx = Arc::new(rx); let _task = std::thread::spawn(move || { let _ = rx.recv(); }); drop(tx); let (tx, rx) = spsc::bounded_blocking::(100); //// example4: should fail to compile after Arc // let tx = Arc::new(tx); std::thread::spawn(move || { let _ = tx.send(1); }); drop(rx); let (tx, rx) = mpmc::bounded_blocking::(100); // MRx can put in Arc let rx = Arc::new(rx); std::thread::spawn(move || { let _ = rx.try_recv(); }); // MTx can put in Arc let tx = Arc::new(tx); std::thread::spawn(move || { let _ = tx.try_send(1); }); let (tx, rx) = spsc::bounded_async::(100); let th = async_spawn!(async move { let mut i = 0; loop { sleep(Duration::from_secs(1)).await; i += 1; if let Err(_) = tx.send(i).await { println!("rx dropped"); return; } } }); 'LOOP: for _ in 0..10 { select! { _ = sleep(Duration::from_millis(500)).fuse() =>{ println!("tick"); }, r = rx.recv().fuse() => { match r { Ok(item)=>{ println!("recv {}", item); } Err(e)=>{ println!("tx dropped {:?}", e); break 'LOOP; } } } } } drop(rx); let _ = async_join_result!(th); }); } #[logfn] #[rstest] #[case(spsc::bounded_async(100))] #[case(mpsc::bounded_async(100))] #[case(mpmc::bounded_async(100))] fn test_basic_bounded_rx_drop, R: AsyncRxTrait>( setup_log: (), #[case] channel: (T, R), ) { runtime_block_on!(async move { let tx = { let (tx, _rx) = channel; tx.send(1).await.expect("ok"); tx.send(2).await.expect("ok"); tx.send(3).await.expect("ok"); tx }; { info!("try to send after rx dropped"); assert_eq!(tx.send(4).await.unwrap_err(), SendError(4)); drop(tx); info!("dropped tx"); } }); } #[logfn] #[rstest] #[case(spsc::unbounded_async())] #[case(mpsc::unbounded_async())] #[case(mpmc::unbounded_async())] fn test_basic_unbounded_rx_drop, R: AsyncRxTrait>( setup_log: (), #[case] channel: (T, R), ) { runtime_block_on!(async move { let tx = { let (tx, _rx) = channel; tx.send(1).expect("ok"); tx.send(2).expect("ok"); tx.send(3).expect("ok"); tx }; { info!("try to send after rx dropped"); assert_eq!(tx.send(4).unwrap_err(), SendError(4)); drop(tx); info!("dropped tx"); } }); } #[cfg(feature = "time")] #[logfn] #[rstest] #[case(spsc::bounded_async(10))] #[case(mpsc::bounded_async(10))] #[case(mpmc::bounded_async(10))] fn test_basic_bounded_1_thread, R: AsyncRxTrait>( setup_log: (), #[case] channel: (T, R), ) { let (tx, rx) = channel; runtime_block_on!(async move { let rx_res = rx.try_recv(); assert!(rx_res.is_err()); assert!(rx_res.unwrap_err().is_empty()); for i in 0usize..10 { let tx_res = tx.try_send(i); assert!(tx_res.is_ok()); } let tx_res = tx.try_send(11); assert!(tx_res.is_err()); assert!(tx_res.unwrap_err().is_full()); let th = async_spawn!(async move { for i in 0usize..12 { match rx.recv().await { Ok(j) => { trace!("recv {}", i); assert_eq!(i, j); } Err(e) => { panic!("error {}", e); } } } let res = rx.recv().await; assert!(res.is_err()); debug!("rx close"); }); assert!(tx.send(10).await.is_ok()); sleep(Duration::from_secs(1)).await; assert!(tx.send(11).await.is_ok()); drop(tx); let _ = async_join_result!(th); }); } #[cfg(feature = "time")] #[logfn] #[rstest] #[case(spsc::unbounded_async())] #[case(mpsc::unbounded_async())] #[case(mpmc::unbounded_async())] fn test_basic_unbounded_1_thread, R: AsyncRxTrait>( setup_log: (), #[case] channel: (T, R), ) { let (tx, rx) = channel; assert_eq!(tx.capacity(), None); assert_eq!(rx.capacity(), None); runtime_block_on!(async move { let rx_res = rx.try_recv(); assert!(rx_res.is_err()); assert!(rx_res.unwrap_err().is_empty()); for i in 0usize..10 { let tx_res = tx.try_send(i); assert!(tx_res.is_ok()); } let th = async_spawn!(async move { for i in 0usize..12 { match rx.recv().await { Ok(j) => { trace!("recv {}", i); assert_eq!(i, j); } Err(e) => { panic!("error {}", e); } } } let res = rx.recv().await; assert!(res.is_err()); debug!("rx close"); }); assert!(tx.send(10).is_ok()); sleep(Duration::from_secs(1)).await; assert!(tx.send(11).is_ok()); drop(tx); let _ = async_join_result!(th); }); } #[cfg(feature = "time")] #[logfn] #[rstest] #[case(spsc::unbounded_async())] #[case(mpsc::unbounded_async())] #[case(mpmc::unbounded_async())] fn test_basic_unbounded_idle_select, R: AsyncRxTrait>( setup_log: (), #[case] channel: (T, R), ) { let (_tx, rx) = channel; let round = { #[cfg(miri)] { 10 } #[cfg(not(miri))] { 200 } }; runtime_block_on!(async move { let c = rx.recv().fuse(); pin_mut!(c); for _ in 0..round { { let f = sleep(Duration::from_millis(1)).fuse(); pin_mut!(f); select! { _ = f => { let (_tx_wakers, _rx_wakers) = rx.get_wakers_count(); trace!("waker tx {} rx {}", _tx_wakers, _rx_wakers); }, _ = c => { unreachable!() }, } } } let (tx_wakers, rx_wakers) = rx.get_wakers_count(); assert_eq!(tx_wakers, 0); info!("waker rx {}", rx_wakers); }); } #[logfn] #[rstest] #[case(spsc::bounded_async(10))] #[case(mpsc::bounded_async(10))] #[case(mpmc::bounded_async(10))] fn test_basic_bounded_recv_after_sender_close, R: AsyncRxTrait>( setup_log: (), #[case] channel: (T, R), ) { let (tx, rx) = channel; let total_msg_count = 5; for i in 0..total_msg_count { let _ = tx.try_send(i).expect("send ok"); } drop(tx); runtime_block_on!(async move { // NOTE: 5 < 10 let mut recv_msg_count = 0; loop { match rx.recv().await { Ok(_) => { recv_msg_count += 1; } Err(_) => { break; } } } assert_eq!(recv_msg_count, total_msg_count); }); } #[logfn] #[rstest] #[case(spsc::unbounded_async())] #[case(mpsc::unbounded_async())] #[case(mpmc::unbounded_async())] fn test_basic_unbounded_recv_after_sender_close< T: BlockingTxTrait, R: AsyncRxTrait, >( setup_log: (), #[case] channel: (T, R), ) { let (tx, rx) = channel; let total_msg_count = 500; for i in 0..total_msg_count { let _ = tx.send(i).expect("send ok"); } drop(tx); runtime_block_on!(async move { let mut recv_msg_count = 0; loop { match rx.recv().await { Ok(_) => { recv_msg_count += 1; } Err(_) => { break; } } } assert_eq!(recv_msg_count, total_msg_count); }); } #[cfg(feature = "time")] #[logfn] #[rstest] #[case(spsc::bounded_async(100))] #[case(mpsc::bounded_async(100))] #[case(mpmc::bounded_async(100))] fn test_basic_timeout_recv_async_waker, R: AsyncRxTrait>( setup_log: (), #[case] channel: (T, R), ) { let (tx, rx) = channel; let rounds = { #[cfg(miri)] { 10 } #[cfg(not(miri))] { 1000 } }; runtime_block_on!(async move { for _ in 0..rounds { assert!(rx.recv_with_timer(sleep(Duration::from_millis(1))).await.is_err()); } let (tx_wakers, rx_wakers) = rx.get_wakers_count(); println!("wakers: {}, {}", tx_wakers, rx_wakers); assert!(tx_wakers <= 1); assert!(rx_wakers <= 1); sleep(Duration::from_secs(1)).await; let _ = tx.send(1).await; assert_eq!(rx.recv().await.unwrap(), 1); let (tx_wakers, rx_wakers) = rx.get_wakers_count(); println!("wakers: {}, {}", tx_wakers, rx_wakers); assert!(tx_wakers <= 1); assert!(rx_wakers <= 1); }); } #[cfg(feature = "time")] #[logfn] #[rstest] #[case(spsc::unbounded_async())] #[case(mpsc::unbounded_async())] #[case(mpmc::unbounded_async())] fn test_basic_unbounded_recv_timeout_async, R: AsyncRxTrait>( setup_log: (), #[case] _channel: (T, R), ) { let (tx, rx) = _channel; runtime_block_on!(async move { let th = async_spawn!(async move { sleep(Duration::from_millis(50)).await; let _ = tx.send(1); }); let _r = rx.recv_with_timer(sleep(Duration::from_millis(1))).await; #[cfg(not(miri))] { assert_eq!(_r.unwrap_err(), RecvTimeoutError::Timeout); } let _ = async_join_result!(th); let (tx_wakers, rx_wakers) = rx.get_wakers_count(); println!("wakers: {}, {}", tx_wakers, rx_wakers); assert_eq!(tx_wakers, 0); assert_eq!(rx_wakers, 0); let _r = rx.recv_with_timer(sleep(Duration::from_millis(200))).await; #[cfg(not(miri))] { assert_eq!(_r.unwrap(), 1); } }); } #[cfg(feature = "time")] #[logfn] #[rstest] #[case(spsc::bounded_async(10))] #[case(mpsc::bounded_async(10))] #[case(mpmc::bounded_async(10))] fn test_basic_send_timeout_async, R: AsyncRxTrait>( setup_log: (), #[case] _channel: (T, R), ) { let (tx, rx) = _channel; for i in 0..10 { assert!(tx.try_send(i).is_ok()); } runtime_block_on!(async move { assert_eq!( tx.send_with_timer(11, sleep(Duration::from_millis(1))).await.unwrap_err(), SendTimeoutError::Timeout(11) ); let th = async_spawn!(async move { loop { sleep(Duration::from_millis(2)).await; if let Err(_) = rx.recv().await { println!("tx dropped"); break; } } }); let mut try_times = 0; loop { try_times += 1; match tx.send_with_timer(11, sleep(Duration::from_millis(1))).await { Ok(_) => { println!("send ok after {} tries", try_times); break; } Err(SendTimeoutError::Timeout(msg)) => { println!("timeout"); assert_eq!(msg, 11); } Err(SendTimeoutError::Disconnected(_)) => { unreachable!(); } } } let (tx_wakers, rx_wakers) = tx.get_wakers_count(); println!("wakers: {}, {}", tx_wakers, rx_wakers); assert!(tx_wakers <= 1, "{:?}", tx_wakers); assert!(rx_wakers <= 1, "{:?}", rx_wakers); drop(tx); let _ = async_join_result!(th); }); } #[cfg(feature = "time")] #[logfn] #[rstest] #[case(mpmc::bounded_async(1))] fn test_pressure_bounded_timeout_async + 'static>( setup_log: (), #[case] _channel: (MAsyncTx, MAsyncRx), ) { use std::collections::HashMap; let (tx, rx) = _channel; let tx_count: usize = 3; let rx_count: usize = 2; runtime_block_on!(async move { assert_eq!( rx.recv_with_timer(sleep(Duration::from_millis(1))).await.unwrap_err(), RecvTimeoutError::Timeout ); let (tx_wakers, rx_wakers) = rx.get_wakers_count(); println!("wakers: {}, {}", tx_wakers, rx_wakers); assert_eq!(tx_wakers, 0); assert_eq!(rx_wakers, 0); let recv_map = Arc::new(Mutex::new(HashMap::new())); let mut th_tx = Vec::new(); let mut th_rx = Vec::new(); for thread_id in 0..tx_count { let _recv_map = recv_map.clone(); let _tx = tx.clone(); th_tx.push(async_spawn!(async move { let mut local_send_timeout_count = 0; let mut i = 0; // randomize start up sleep(Duration::from_millis((thread_id & 3) as u64)).await; loop { if i >= ROUND { return local_send_timeout_count; } { let mut guard = _recv_map.lock().unwrap(); guard.insert(i, ()); } if i & 2 == 0 { sleep(Duration::from_millis(3)).await; } else { sleep(Duration::from_millis(1)).await; } loop { match _tx.send_with_timer(i, sleep(Duration::from_millis(1))).await { Ok(_) => { i += 1; break; } Err(SendTimeoutError::Timeout(_i)) => { local_send_timeout_count += 1; assert_eq!(_i, i); } Err(SendTimeoutError::Disconnected(_)) => { unreachable!(); } } } } })); } for _thread_id in 0..rx_count { let _rx = rx.clone(); let _recv_map = recv_map.clone(); th_rx.push(async_spawn!(async move { let mut step: usize = 0; let mut local_recv_count: usize = 0; let mut local_recv_timeout_count: usize = 0; loop { step += 1; let timeout = if step & 2 == 0 { 1 } else { 2 }; if step & 2 > 0 { sleep(Duration::from_millis(1)).await; } match _rx.recv_with_timer(sleep(Duration::from_millis(timeout))).await { Ok(item) => { local_recv_count += 1; { let mut guard = _recv_map.lock().unwrap(); guard.remove(&item); } } Err(RecvTimeoutError::Timeout) => { local_recv_timeout_count += 1; } Err(RecvTimeoutError::Disconnected) => { return (local_recv_count, local_recv_timeout_count); } } } })); } drop(tx); drop(rx); let mut total_send_timeout_count = 0; for th in th_tx { total_send_timeout_count += async_join_result!(th); } let mut total_recv_count = 0; let mut total_recv_timeout_count = 0; for th in th_rx { let (recv_count, recv_timeout_count) = async_join_result!(th); total_recv_count += recv_count; total_recv_timeout_count += recv_timeout_count; } { let guard = recv_map.lock().unwrap(); assert!(guard.is_empty()); } assert_eq!(ROUND * tx_count, total_recv_count); println!("send timeout count: {}", total_send_timeout_count); println!("recv timeout count: {}", total_recv_timeout_count); }); } #[logfn] #[rstest] #[case(spsc::bounded_async(1))] #[case(spsc::bounded_async(10))] #[case(spsc::bounded_async(100))] #[case(mpmc::bounded_async(1))] #[case(mpmc::bounded_async(10))] #[case(mpmc::bounded_async(100))] #[case(mpmc::bounded_async(300))] fn test_pressure_bounded_async_1_1, R: AsyncRxTrait>( setup_log: (), #[case] channel: (T, R), ) { let (tx, rx) = channel; runtime_block_on!(async move { let mut counter: usize = 0; let th = async_spawn!(async move { for i in 0..ROUND { if let Err(e) = tx.send(i).await { panic!("{:?}", e); } } debug!("tx{:?} exit", tokio_task_id!()); }); 'A: loop { match rx.recv().await { Ok(_i) => { assert_eq!(_i, counter); trace!("recv {}", _i); counter += 1; } Err(_) => break 'A, } } drop(rx); let _ = async_join_result!(th); assert_eq!(counter, ROUND); }); } #[logfn] #[rstest] #[case(mpsc::bounded_async(1), 5)] #[case(mpsc::bounded_async(1), 100)] #[case(mpsc::bounded_async(1), 300)] #[case(mpsc::bounded_async(10), 5)] #[case(mpsc::bounded_async(10), 100)] #[case(mpsc::bounded_async(10), 300)] #[case(mpsc::bounded_async(100), 10)] #[case(mpsc::bounded_async(100), 100)] #[case(mpsc::bounded_async(100), 300)] #[case(mpmc::bounded_async(1), 5)] #[case(mpmc::bounded_async(1), 100)] #[case(mpmc::bounded_async(1), 300)] #[case(mpmc::bounded_async(10), 5)] #[case(mpmc::bounded_async(10), 100)] #[case(mpmc::bounded_async(10), 300)] #[case(mpmc::bounded_async(100), 5)] #[case(mpmc::bounded_async(100), 100)] #[case(mpmc::bounded_async(100), 300)] fn test_pressure_bounded_async_multi_1< F: Flavor + 'static, R: AsyncRxTrait, >( setup_log: (), #[case] channel: (MAsyncTx, R), #[case] tx_count: usize, ) { let (tx, rx) = channel; #[cfg(miri)] { if tx_count > 10 { println!("skip"); return; } } runtime_block_on!(async move { let mut counter = 0; let mut th_s = Vec::new(); for _tx_i in 0..tx_count { let _tx = tx.clone(); th_s.push(async_spawn!(async move { debug!("tx{:?} {} spawn", tokio_task_id!(), _tx_i); for i in 0..ROUND { match _tx.send(i).await { Err(e) => panic!("{:?}", e), _ => {} } } debug!("tx{:?} {} exit", tokio_task_id!(), _tx_i); })); } drop(tx); 'A: loop { match rx.recv().await { Ok(_i) => { counter += 1; trace!("recv {}", _i); } Err(_) => break 'A, } } drop(rx); for th in th_s { let _ = async_join_result!(th); } assert_eq!(counter, ROUND * tx_count); }); } #[logfn] #[rstest] #[case(mpmc::bounded_async(1), 5, 5)] #[case(mpmc::bounded_async(1), 100, 10)] #[case(mpmc::bounded_async(1), 10, 100)] #[case(mpmc::bounded_async(1), 300, 300)] #[case(mpmc::bounded_async(10), 5, 5)] #[case(mpmc::bounded_async(10), 100, 10)] #[case(mpmc::bounded_async(10), 10, 100)] #[case(mpmc::bounded_async(10), 300, 300)] #[case(mpmc::bounded_async(100), 5, 5)] #[case(mpmc::bounded_async(100), 100, 10)] #[case(mpmc::bounded_async(100), 10, 100)] #[case(mpmc::bounded_async(100), 300, 300)] fn test_pressure_bounded_async_multi + 'static>( setup_log: (), #[case] channel: (MAsyncTx, MAsyncRx), #[case] tx_count: usize, #[case] rx_count: usize, ) { #[cfg(miri)] { if rx_count > 5 || tx_count > 5 { println!("skip"); return; } } let (tx, rx) = channel; runtime_block_on!(async move { let mut th_tx = Vec::new(); let mut th_rx = Vec::new(); for _tx_i in 0..tx_count { let _tx = tx.clone(); th_tx.push(async_spawn!(async move { debug!("tx{:?} {} spawn", tokio_task_id!(), _tx_i); for i in 0..ROUND { match _tx.send(i).await { Err(e) => panic!("{:?}", e), _ => {} } } debug!("tx{:?} {} exit", tokio_task_id!(), _tx_i); })); } for _rx_i in 0..rx_count { let _rx = rx.clone(); th_rx.push(async_spawn!(async move { debug!("rx{:?} {} spawn", tokio_task_id!(), _rx_i); let mut count = 0; 'A: loop { match _rx.recv().await { Ok(_i) => { count += 1; trace!("recv {} {}", _rx_i, _i); } Err(_) => break 'A, } } debug!("rx{:?} {} exit", tokio_task_id!(), _rx_i); count })); } drop(tx); drop(rx); for th in th_tx { let _ = async_join_result!(th); } let mut recv_count = 0; for th in th_rx { recv_count += async_join_result!(th); } assert_eq!(recv_count, ROUND * tx_count); }); } #[logfn] #[rstest] #[case(mpmc::bounded_async(1))] #[case(mpmc::bounded_async(10))] #[case(mpmc::bounded_async(100))] fn test_pressure_bounded_mixed_async_blocking_conversion + 'static>( setup_log: (), #[case] channel: (MAsyncTx, MAsyncRx), ) { let (tx, rx) = channel; runtime_block_on!(async move { let mut recv_counter = 0; let mut th_tx = Vec::new(); let mut th_rx = Vec::new(); let mut co_tx = Vec::new(); let mut co_rx = Vec::new(); let _tx: MTx = tx.clone().into(); th_tx.push(thread::spawn(move || { for i in 0..ROUND { match _tx.send(i) { Err(e) => panic!("{:?}", e), _ => {} } } debug!("tx blocking exit"); })); co_tx.push(async_spawn!(async move { for i in 0..ROUND { match tx.send(i).await { Err(e) => panic!("{:?}", e), _ => {} } } debug!("tx{:?} async exit", tokio_task_id!()); })); let _rx: MRx = rx.clone().into(); th_rx.push(thread::spawn(move || { let mut count: usize = 0; 'A: loop { match _rx.recv() { Ok(_i) => { count += 1; trace!("recv blocking {}", _i); } Err(_) => break 'A, } } debug!("rx blocking exit"); count })); co_rx.push(async_spawn!(async move { let mut count: usize = 0; 'A: loop { match rx.recv().await { Ok(_i) => { count += 1; trace!("recv async {}", _i); } Err(_) => break 'A, } } debug!("rx{:?} async exit", tokio_task_id!()); count })); for th in co_tx { let _ = async_join_result!(th); } for th in co_rx { recv_counter += async_join_result!(th); } for th in th_tx { let _ = th.join().unwrap(); } for th in th_rx { recv_counter += th.join().unwrap(); } assert_eq!(recv_counter, ROUND * 2); }); } #[test] fn test_conversion() { let (mtx, mrx) = mpmc::bounded_async::(1); let _tx: AsyncTx<_> = mtx.into(); let _rx: AsyncRx<_> = mrx.into(); let (_mtx, rx) = mpsc::bounded_async::(1); let _stream: AsyncStream<_> = rx.into(); // AsyncRx -> AsyncStream let (_mtx, mrx) = mpmc::bounded_async::(1); let _stream: AsyncStream<_> = mrx.into(); // AsyncRx -> AsyncStream } #[allow(dead_code)] struct SpuriousTx { sink: AsyncSink, normal: bool, step: usize, } impl + Unpin> Future for SpuriousTx { type Output = Result; fn poll(self: Pin<&mut Self>, ctx: &mut std::task::Context) -> Poll { let mut _self = self.get_mut(); if !_self.normal && _self.step > 0 { return Poll::Ready(Err(_self.step)); } match _self.sink.poll_send(ctx, _self.step) { Ok(_) => { let res = _self.step; _self.step += 1; return Poll::Ready(Ok(res)); } Err(TrySendError::Disconnected(_)) => { return Poll::Ready(Err(_self.step)); } Err(TrySendError::Full(_)) => { _self.step += 1; return Poll::Pending; } } } } #[allow(dead_code)] struct SpuriousRx { stream: AsyncStream, normal: bool, step: usize, } impl + Unpin> Future for SpuriousRx { type Output = Result; fn poll(self: Pin<&mut Self>, ctx: &mut std::task::Context) -> Poll { let mut _self = self.get_mut(); if !_self.normal && _self.step > 0 { return Poll::Ready(Err(_self.step)); } match _self.stream.poll_item(ctx) { Poll::Ready(Some(item)) => { _self.step += 1; return Poll::Ready(Ok(item)); } Poll::Ready(None) => { return Poll::Ready(Err(_self.step)); } Poll::Pending => { _self.step += 1; return Poll::Pending; } } } } #[cfg(feature = "time")] #[logfn] #[rstest] fn test_spurious_sink(setup_log: ()) { #[cfg(feature = "tokio")] { let (tx, rx) = mpmc::bounded_async(1); async fn spawn_tx + Unpin + 'static>( tx: MAsyncTx, normal: bool, ) { let sink = tx.into_sink(); let _tx = SpuriousTx { sink, normal, step: 0 }; if normal { assert_eq!(_tx.await.expect("send ok"), 1); } else { match tokio::time::timeout(Duration::from_secs(5), _tx).await { Ok(Err(step)) => { assert_eq!(step, 1); } Ok(Ok(step)) => { panic!("unexpected ok in step={}", step); } Err(_) => { panic!("tokio timeout"); } } } } runtime_block_on!(async move { tx.send(0).await.expect("send"); let _tx = tx.clone(); let mut th_s = Vec::new(); println!("spawn spurious"); // Make sure its the first th_s.push(tokio::spawn(async move { spawn_tx(_tx, false).await })); sleep(Duration::from_secs(1)).await; let _tx = tx.clone(); println!("spawn normal"); th_s.push(tokio::spawn(async move { spawn_tx(_tx, true).await })); sleep(Duration::from_secs(1)).await; println!("recv 1 to wake the 2 senders in Init state"); assert_eq!(rx.recv().await.expect("recv"), 0); for th in th_s { let _ = async_join_result!(th); } }); } } #[cfg(feature = "time")] #[logfn] #[rstest] fn test_spurious_stream(setup_log: ()) { #[cfg(feature = "tokio")] { let (tx, rx) = mpmc::bounded_async(1); async fn spawn_rx + Unpin + 'static>( rx: MAsyncRx, normal: bool, ) { let stream = rx.into_stream(); let _rx = SpuriousRx { stream, normal, step: 0 }; if normal { assert_eq!(_rx.await.expect("recv ok"), 1); } else { if let Ok(Err(step)) = tokio::time::timeout(Duration::from_secs(10), _rx).await { assert_eq!(step, 1); } else { unreachable!(); } } } runtime_block_on!(async move { let _rx = rx.clone(); let mut th_s = Vec::new(); println!("spawn spurious"); // Make sure its the first th_s.push(tokio::spawn(async move { spawn_rx(_rx, false).await })); sleep(Duration::from_millis(500)).await; let _rx = rx.clone(); println!("spawn normal"); th_s.push(tokio::spawn(async move { spawn_rx(_rx, true).await })); sleep(Duration::from_secs(1)).await; println!("send"); tx.send(1).await.expect("send"); sleep(Duration::from_secs(2)).await; for th in th_s { let _ = async_join_result!(th); } }); } } #[logfn] #[rstest] #[case(spsc::bounded_async(1))] #[case(spsc::bounded_async(2))] #[case(mpsc::bounded_async(1))] #[case(mpsc::bounded_async(2))] #[case(mpmc::bounded_async(1))] #[case(mpmc::bounded_async(2))] fn test_basic_into_stream_1_1, R: AsyncRxTrait>( setup_log: (), #[case] channel: (T, R), ) { runtime_block_on!(async move { let total_message = 100; let (tx, rx) = channel; let th = async_spawn!(async move { println!("sender thread send {} message start", total_message); for i in 0usize..total_message { let _ = tx.send(i).await; // println!("send {}", i); } println!("sender thread send {} message end", total_message); }); let mut s: Pin>> = rx.to_stream(); for _i in 0..total_message { assert_eq!(s.next().await, Some(_i)); } assert_eq!(s.next().await, None); //assert!(s.is_terminated()); async_join_result!(th); }); } #[logfn] #[rstest] #[case(mpmc::bounded_async(1), 2)] #[case(mpmc::bounded_async(2), 4)] #[case(mpmc::bounded_async(2), 10)] #[case(mpmc::bounded_async(10), 3)] #[case(mpmc::bounded_async(10), 30)] #[case(mpmc::bounded_async(100), 2)] #[case(mpmc::bounded_async(100), 4)] #[case(mpmc::bounded_async(100), 50)] fn test_pressure_stream_multi + 'static>( setup_log: (), #[case] channel: (MAsyncTx, MAsyncRx), #[case] rx_count: usize, ) { #[cfg(miri)] { if rx_count > 5 { println!("skip"); return; } } runtime_block_on!(async move { let (tx, rx) = channel; let mut th_s = Vec::new(); let mut recv_counter = 0; for rx_i in 0..rx_count { let _rx = rx.clone(); th_s.push(async_spawn!(async move { let mut counter = 0; let mut stream = _rx.into_stream(); while let Some(_item) = stream.next().await { counter += 1; } debug!("rx{:?} {} exit", tokio_task_id!(), rx_i); counter })); } drop(rx); for i in 0..ROUND { tx.send(i).await.expect("send"); } drop(tx); for th in th_s { recv_counter += async_join_result!(th); } assert_eq!(recv_counter, ROUND); }); } #[cfg(feature = "time")] #[logfn] #[rstest] #[case(mpmc::bounded_async(1), 2)] #[case(mpmc::bounded_async(2), 4)] #[case(mpmc::bounded_async(2), 10)] #[case(mpmc::bounded_async(10), 3)] #[case(mpmc::bounded_async(10), 30)] #[case(mpmc::bounded_async(100), 2)] #[case(mpmc::bounded_async(100), 4)] #[case(mpmc::bounded_async(100), 50)] fn test_pressure_stream_multi_idle + 'static>( setup_log: (), #[case] channel: (MAsyncTx, MAsyncRx), #[case] rx_count: usize, ) { #[cfg(miri)] { if rx_count > 5 { println!("skip"); return; } } runtime_block_on!(async move { let total_message = ROUND / rx_count; let (tx, rx) = channel; let mut th_s = Vec::new(); for rx_i in 0..rx_count { let _rx = rx.clone(); th_s.push(async_spawn!(async move { debug!("rx{:?} {} spawn", tokio_task_id!(), rx_i); let mut count = 0; let mut stream = _rx.into_stream(); while let Some(_item) = stream.next().await { count += 1; } debug!("rx{:?} {} exit", tokio_task_id!(), rx_i); count })); } drop(rx); for i in 0..total_message { tx.send(i).await.expect("send"); sleep(Duration::from_millis(3)).await; } drop(tx); let mut recv_counter = 0; for th in th_s { recv_counter += async_join_result!(th); } assert_eq!(recv_counter, total_message); }); } // This test make sure we have correctly use of maybeuninit #[logfn] #[rstest] #[case(spsc::bounded_async(1))] #[case(spsc::bounded_async(10))] #[case(mpsc::bounded_async(1))] #[case(mpsc::bounded_async(10))] #[case(mpmc::bounded_async(1))] #[case(mpmc::bounded_async(10))] fn test_async_drop_small_msg, R: AsyncRxTrait>( setup_log: (), #[case] channel: (T, R), ) { println!("needs_drop {}", std::mem::needs_drop::()); _test_async_drop_msg(channel); } // This test make sure we have correctly use of maybeuninit #[logfn] #[rstest] #[case(spsc::bounded_async(1))] #[case(spsc::bounded_async(10))] #[case(mpsc::bounded_async(1))] #[case(mpsc::bounded_async(10))] #[case(mpmc::bounded_async(1))] #[case(mpmc::bounded_async(10))] fn test_async_drop_large_msg, R: AsyncRxTrait>( setup_log: (), #[case] channel: (T, R), ) { println!("needs_drop {}", std::mem::needs_drop::()); _test_async_drop_msg(channel); } fn _test_async_drop_msg, R: AsyncRxTrait>(channel: (T, R)) { let (tx, rx) = channel; reset_drop_counter(); runtime_block_on!(async move { let cap = tx.capacity().unwrap(); let mut ids = cap; for i in 0..ids { let msg = M::new(i); assert!(tx.try_send(msg).is_ok()); } assert_eq!(get_drop_counter(), 0); let msg = M::new(ids); if let Err(TrySendError::Full(_msg)) = tx.try_send(msg) { assert_eq!(_msg.get_value(), ids); assert_eq!(get_drop_counter(), 0); drop(_msg); assert_eq!(get_drop_counter(), 1); } else { unreachable!(); } let th = async_spawn!(async move { let _msg = rx.recv().await.expect("recv"); assert_eq!(_msg.get_value(), 0); drop(_msg); rx }); let msg = M::new(ids); tx.send(msg).await.expect("send"); ids += 1; let rx = async_join_result!(th); drop(rx); assert_eq!(get_drop_counter(), 2); let msg = M::new(ids); if let Err(TrySendError::Disconnected(_msg)) = tx.try_send(msg) { assert_eq!(_msg.get_value(), ids); } else { unreachable!(); } ids += 1; let msg = M::new(ids); if let Err(SendError(_msg)) = tx.send(msg).await { assert_eq!(_msg.get_value(), ids); } else { unreachable!(); } assert_eq!(get_drop_counter(), 4); ids += 1; drop(tx); // every thing dropped inside the channel assert_eq!(get_drop_counter(), ids + 1); // ids begins at 0 assert_eq!(get_drop_counter(), 4 + cap); }); } ================================================ FILE: test-suite/src/test_async_blocking.rs ================================================ use crate::*; use captains_log::{logfn, *}; use crossfire::flavor::Flavor; use crossfire::tokio_task_id; use crossfire::*; use rstest::*; use std::thread; use std::time::Duration; #[fixture] fn setup_log() { _setup_log(); } #[logfn] #[rstest] #[case(spsc::bounded_async_blocking(1))] #[case(mpsc::bounded_async_blocking(1))] #[case(mpmc::bounded_async_blocking(1))] fn test_basic_bounded_empty_full_drop_rx, R: BlockingRxTrait>( setup_log: (), #[case] channel: (T, R), ) { let (tx, rx) = channel; assert!(tx.is_empty()); assert!(rx.is_empty()); tx.try_send(1).expect("Ok"); assert!(tx.is_full()); assert!(rx.is_full()); assert!(!tx.is_empty()); assert_eq!(tx.is_disconnected(), false); assert_eq!(rx.is_disconnected(), false); drop(rx); assert_eq!(tx.is_disconnected(), true); assert_eq!(tx.get_rx_count(), 0); assert_eq!(tx.get_tx_count(), 1); } #[logfn] #[rstest] #[case(spsc::bounded_async_blocking(1))] #[case(mpsc::bounded_async_blocking(1))] #[case(mpmc::bounded_async_blocking(1))] fn test_basic_bounded_empty_full_drop_tx, R: BlockingRxTrait>( setup_log: (), #[case] channel: (T, R), ) { let (tx, rx) = channel; assert!(tx.is_empty()); assert!(rx.is_empty()); tx.try_send(1).expect("Ok"); assert!(tx.is_full()); assert!(rx.is_full()); assert!(!tx.is_empty()); assert_eq!(tx.is_disconnected(), false); assert_eq!(rx.is_disconnected(), false); drop(tx); assert_eq!(rx.is_disconnected(), true); assert_eq!(rx.get_tx_count(), 0); assert_eq!(rx.get_rx_count(), 1); } #[logfn] #[rstest] fn test_basic_compile_bounded_empty_full() { let (tx, rx) = mpmc::bounded_async_blocking(1); assert!(tx.is_empty()); assert!(rx.is_empty()); tx.try_send(1).expect("ok"); assert!(tx.is_full()); assert!(!tx.is_empty()); assert!(rx.is_full()); assert_eq!(tx.get_tx_count(), 1); assert_eq!(rx.get_tx_count(), 1); assert_eq!(tx.is_disconnected(), false); assert_eq!(rx.is_disconnected(), false); drop(rx); assert_eq!(tx.is_disconnected(), true); } #[cfg(feature = "time")] #[logfn] #[rstest] #[case(spsc::bounded_async_blocking(100))] #[case(mpsc::bounded_async_blocking(100))] #[case(mpmc::bounded_async_blocking(100))] fn test_basic_1_tx_async_1_rx_blocking, R: BlockingRxTrait>( setup_log: (), #[case] channel: (T, R), ) { let (tx, rx) = channel; let rx_res = rx.try_recv(); assert!(rx_res.is_err()); assert!(rx_res.unwrap_err().is_empty()); let batch_1: usize = 100; let batch_2: usize = 200; let th = thread::spawn(move || { for count in 0..(batch_1 + batch_2) { match rx.recv() { Ok(i) => { trace!("recv {}", i); if count < batch_1 { // First batch: values 0..batch_1 assert_eq!(i, count); } else { // Second batch: values 10+batch_1..10+batch_1+batch_2 assert_eq!(i, 10 + count); } } Err(e) => { panic!("error {}", e); } } } let res = rx.recv(); assert!(res.is_err()); }); runtime_block_on!(async move { for i in 0..batch_1 { let tx_res = tx.send(i).await; assert!(tx_res.is_ok()); } for i in batch_1..(batch_1 + batch_2) { assert!(tx.send(10 + i).await.is_ok()); sleep(Duration::from_millis(2)).await; } }); let _ = th.join().unwrap(); } #[cfg(feature = "time")] #[logfn] #[rstest] #[case(mpsc::bounded_async_blocking(10), 5)] #[case(mpsc::bounded_async_blocking(10), 8)] #[case(mpsc::bounded_async_blocking(10), 100)] #[case(mpsc::bounded_async_blocking(10), 1000)] #[case(mpmc::bounded_async_blocking(10), 5)] #[case(mpmc::bounded_async_blocking(10), 8)] #[case(mpmc::bounded_async_blocking(10), 100)] fn test_basic_multi_tx_async_1_rx_blocking< F: Flavor + 'static, R: BlockingRxTrait, >( setup_log: (), #[case] channel: (MAsyncTx, R), #[case] tx_count: usize, ) { let (tx, rx) = channel; let batch_1: usize; let batch_2: usize; #[cfg(miri)] { if tx_count > 5 { println!("skip"); return; } batch_1 = 10; batch_2 = 20; } #[cfg(not(miri))] { batch_1 = 100; batch_2 = 200; } let rx_res = rx.try_recv(); assert!(rx_res.is_err()); assert!(rx_res.unwrap_err().is_empty()); let th = thread::spawn(move || { for _ in 0..((batch_1 + batch_2) * tx_count) { match rx.recv() { Ok(i) => { trace!("recv {}", i); } Err(e) => { panic!("error {}", e); } } } let res = rx.recv(); assert!(res.is_err()); // Wait for spawn exit }); runtime_block_on!(async move { let mut th_s = Vec::new(); for _tx_i in 0..tx_count { let _tx = tx.clone(); th_s.push(async_spawn!(async move { for i in 0..batch_1 { let tx_res = _tx.send(i).await; assert!(tx_res.is_ok()); } for i in batch_1..(batch_1 + batch_2) { assert!(_tx.send(10 + i).await.is_ok()); sleep(Duration::from_millis(2)).await; } })); } drop(tx); for th in th_s { let _ = async_join_result!(th); } }); let _ = th.join().unwrap(); } #[logfn] #[rstest] #[case(spsc::bounded_async_blocking(1))] #[case(spsc::bounded_async_blocking(10))] #[case(spsc::bounded_async_blocking(100))] #[case(spsc::bounded_async_blocking(1000))] #[case(mpsc::bounded_async_blocking(1))] #[case(mpsc::bounded_async_blocking(10))] #[case(mpsc::bounded_async_blocking(100))] #[case(mpsc::bounded_async_blocking(1000))] #[case(mpmc::bounded_async_blocking(1))] #[case(mpmc::bounded_async_blocking(10))] #[case(mpmc::bounded_async_blocking(100))] #[case(mpmc::bounded_async_blocking(1000))] fn test_pressure_1_tx_async_1_rx_blocking, R: BlockingRxTrait>( setup_log: (), #[case] channel: (T, R), ) { let (tx, rx) = channel; let round: usize; #[cfg(miri)] { round = ROUND; } #[cfg(not(miri))] { round = ROUND * 100; } let th = thread::spawn(move || { let mut count = 0; 'A: loop { match rx.recv() { Ok(i) => { assert_eq!(i, count); count += 1; trace!("recv {}", i); } Err(_) => break 'A, } } debug!("rx exit"); count }); runtime_block_on!(async move { for i in 0..round { match tx.send(i).await { Err(e) => panic!("{}", e), _ => {} } } debug!("tx{:?} exit", tokio_task_id!()); }); let rx_count = th.join().unwrap(); assert_eq!(rx_count, round); } #[logfn] #[rstest] #[case(mpsc::bounded_async_blocking(10), 5)] #[case(mpsc::bounded_async_blocking(10), 10)] #[case(mpsc::bounded_async_blocking(10), 100)] #[case(mpsc::bounded_async_blocking(100), 50)] #[case(mpmc::bounded_async_blocking(10), 5)] #[case(mpmc::bounded_async_blocking(10), 100)] #[case(mpmc::bounded_async_blocking(10), 10)] #[case(mpmc::bounded_async_blocking(10), 1000)] #[case(mpmc::bounded_async_blocking(100), 100)] fn test_pressure_multi_tx_async_1_rx_blocking< F: Flavor + 'static, R: BlockingRxTrait, >( setup_log: (), #[case] channel: (MAsyncTx, R), #[case] tx_count: usize, ) { let (tx, rx) = channel; #[cfg(miri)] { if tx_count > 5 { println!("skip"); return; } } let round: usize = ROUND; let th = thread::spawn(move || { let mut count = 0; 'A: loop { match rx.recv() { Ok(_i) => { count += 1; trace!("recv {}", _i); } Err(_) => break 'A, } } debug!("rx exit"); count }); runtime_block_on!(async move { let mut th_co = Vec::new(); for _tx_i in 0..tx_count { let _tx = tx.clone(); th_co.push(async_spawn!(async move { debug!("tx{:?} {} spawn", tokio_task_id!(), _tx_i); for i in 0..round { match _tx.send(i).await { Err(e) => panic!("{}", e), _ => {} } } debug!("tx{:?} {} exit", tokio_task_id!(), _tx_i); })); } drop(tx); for th in th_co { let _ = async_join_result!(th); } }); let rx_count = th.join().unwrap(); assert_eq!(rx_count, round * tx_count); } #[logfn] #[rstest] #[case(mpmc::bounded_async_blocking(10), 5, 5)] #[case(mpmc::bounded_async_blocking(10), 100, 50)] #[case(mpmc::bounded_async_blocking(10), 10, 100)] #[case(mpmc::bounded_async_blocking(100), 300, 100)] fn test_pressure_multi_tx_async_multi_rx_blocking + 'static>( setup_log: (), #[case] channel: (MAsyncTx, MRx), #[case] tx_count: usize, #[case] rx_count: usize, ) { let (tx, rx) = channel; #[cfg(miri)] { if tx_count > 5 || rx_count > 5 { println!("skip"); return; } } let round: usize = ROUND; let mut rx_th_s = Vec::new(); for _rx_i in 0..rx_count { let _rx = rx.clone(); rx_th_s.push(thread::spawn(move || { debug!("rx {} spawn", _rx_i); let mut count = 0; 'A: loop { match _rx.recv() { Ok(i) => { count += 1; trace!("recv {} {}", _rx_i, i); } Err(_) => break 'A, } } debug!("rx {} exit", _rx_i); count })); } drop(rx); runtime_block_on!(async move { let mut th_co = Vec::new(); for _tx_i in 0..tx_count { let _tx = tx.clone(); th_co.push(async_spawn!(async move { debug!("tx{:?} {} spawn", tokio_task_id!(), _tx_i); for i in 0..round { match _tx.send(i).await { Err(e) => panic!("{}", e), _ => {} } } debug!("tx{:?} {} exit", tokio_task_id!(), _tx_i); })); } drop(tx); for th in th_co { let _ = async_join_result!(th); } }); let mut total_count = 0; for th in rx_th_s { total_count += th.join().unwrap(); } assert_eq!(total_count, round * tx_count); } ================================================ FILE: test-suite/src/test_blocking_async.rs ================================================ use crate::*; use captains_log::{logfn, *}; use crossfire::flavor::Flavor; use crossfire::tokio_task_id; use crossfire::*; use rstest::*; use std::time::*; #[fixture] fn setup_log() { _setup_log(); } #[logfn] #[rstest] #[case(spsc::bounded_blocking_async(1))] #[case(mpsc::bounded_blocking_async(1))] #[case(mpmc::bounded_blocking_async(1))] fn test_basic_bounded_empty_full_drop_rx, R: AsyncRxTrait>( setup_log: (), #[case] channel: (T, R), ) { let (tx, rx) = channel; assert!(tx.is_empty()); assert!(rx.is_empty()); tx.try_send(1).expect("Ok"); assert!(tx.is_full()); assert!(rx.is_full()); assert!(!tx.is_empty()); assert_eq!(tx.is_disconnected(), false); assert_eq!(rx.is_disconnected(), false); drop(rx); assert_eq!(tx.is_disconnected(), true); assert_eq!(tx.get_rx_count(), 0); assert_eq!(tx.get_tx_count(), 1); } #[logfn] #[rstest] #[case(spsc::bounded_blocking_async(1))] #[case(mpsc::bounded_blocking_async(1))] #[case(mpmc::bounded_blocking_async(1))] fn test_basic_bounded_empty_full_drop_tx, R: AsyncRxTrait>( setup_log: (), #[case] channel: (T, R), ) { let (tx, rx) = channel; assert!(tx.is_empty()); assert!(rx.is_empty()); tx.try_send(1).expect("Ok"); assert!(tx.is_full()); assert!(rx.is_full()); assert!(!tx.is_empty()); assert_eq!(tx.is_disconnected(), false); assert_eq!(rx.is_disconnected(), false); drop(tx); assert_eq!(rx.is_disconnected(), true); assert_eq!(rx.get_tx_count(), 0); assert_eq!(rx.get_rx_count(), 1); } #[logfn] #[rstest] #[case(spsc::unbounded_async())] #[case(mpsc::unbounded_async())] #[case(mpmc::unbounded_async())] fn test_basic_unbounded_empty_drop_tx, R: AsyncRxTrait>( setup_log: (), #[case] channel: (T, R), ) { let (tx, rx) = channel; assert!(tx.is_empty()); assert!(rx.is_empty()); tx.try_send(1).expect("Ok"); assert!(!tx.is_empty()); assert_eq!(tx.is_disconnected(), false); assert_eq!(rx.is_disconnected(), false); drop(tx); assert_eq!(rx.is_disconnected(), true); assert_eq!(rx.get_tx_count(), 0); assert_eq!(rx.get_rx_count(), 1); } #[logfn] #[rstest] fn test_basic_compile_bounded_empty_full() { let (tx, rx) = mpmc::bounded_blocking_async(1); assert!(tx.is_empty()); assert!(rx.is_empty()); tx.try_send(1).expect("ok"); assert!(tx.is_full()); assert!(!tx.is_empty()); assert!(rx.is_full()); assert_eq!(tx.get_tx_count(), 1); assert_eq!(rx.get_tx_count(), 1); assert_eq!(tx.is_disconnected(), false); assert_eq!(rx.is_disconnected(), false); drop(rx); assert_eq!(tx.is_disconnected(), true); } #[logfn] #[rstest] #[case(spsc::bounded_blocking_async(10))] #[case(mpsc::bounded_blocking_async(10))] #[case(mpmc::bounded_blocking_async(10))] fn test_basic_1_tx_blocking_1_rx_async, R: AsyncRxTrait>( setup_log: (), #[case] channel: (T, R), ) { let (tx, rx) = channel; let rx_res = rx.try_recv(); assert!(rx_res.is_err()); assert!(rx_res.unwrap_err().is_empty()); for i in 0usize..10 { let tx_res = tx.send(i); assert!(tx_res.is_ok()); } let tx_res = tx.try_send(11); assert!(tx_res.is_err()); assert!(tx_res.unwrap_err().is_full()); let th = spawn_named_thread("sender_1", move || { assert!(tx.send(10).is_ok()); std::thread::sleep(Duration::from_secs(1)); assert!(tx.send(11).is_ok()); }); runtime_block_on!(async move { for i in 0usize..12 { match rx.recv().await { Ok(j) => { trace!("recv {}", i); assert_eq!(i, j); } Err(e) => { panic!("error {}", e); } } } let res = rx.recv().await; assert!(res.is_err()); trace!("rx close"); }); let _ = th.join().unwrap(); } #[logfn] #[rstest] #[case(spsc::bounded_blocking_async(1))] #[case(mpsc::bounded_blocking_async(1))] #[case(mpmc::bounded_blocking_async(1))] #[case(spsc::bounded_blocking_async(100))] #[case(mpsc::bounded_blocking_async(100))] #[case(mpmc::bounded_blocking_async(100))] #[case(spsc::unbounded_async())] #[case(mpsc::unbounded_async())] #[case(mpmc::unbounded_async())] fn test_pressure_1_tx_blocking_1_rx_async, R: AsyncRxTrait>( setup_log: (), #[case] channel: (T, R), ) { let (tx, rx) = channel; let round: usize; #[cfg(miri)] { round = ROUND; } #[cfg(not(miri))] { round = ROUND * 100; } let th = spawn_named_thread("sender_2", move || { for i in 0..round { tx.send(i).expect("send ok"); } }); runtime_block_on!(async move { for i in 0..round { match rx.recv().await { Ok(msg) => { trace!("recv {}", msg); assert_eq!(msg, i); } Err(_e) => { panic!("channel closed"); } } } assert!(rx.recv().await.is_err()); }); let _ = th.join().unwrap(); } #[logfn] #[rstest] #[case(mpsc::bounded_blocking_async(1), 5)] #[case(mpsc::bounded_blocking_async(1), 100)] #[case(mpsc::bounded_blocking_async(1), 200)] #[case(mpsc::bounded_blocking_async(100), 10)] #[case(mpsc::bounded_blocking_async(100), 100)] #[case(mpsc::bounded_blocking_async(100), 200)] #[case(mpmc::bounded_blocking_async(1), 5)] #[case(mpmc::bounded_blocking_async(1), 100)] #[case(mpmc::bounded_blocking_async(1), 300)] #[case(mpmc::bounded_blocking_async(100), 5)] #[case(mpmc::bounded_blocking_async(100), 100)] #[case(mpmc::bounded_blocking_async(100), 200)] #[case(mpsc::unbounded_async(), 5)] #[case(mpsc::unbounded_async(), 100)] #[case(mpsc::unbounded_async(), 300)] #[case(mpmc::unbounded_async(), 6)] #[case(mpmc::unbounded_async(), 100)] #[case(mpmc::unbounded_async(), 300)] fn test_pressure_tx_multi_blocking_1_rx_async< F: Flavor + 'static, R: AsyncRxTrait, >( setup_log: (), #[case] channel: (MTx, R), #[case] tx_count: usize, ) { let (tx, rx) = channel; #[cfg(miri)] { if tx_count > 5 { println!("skip"); return; } } let mut tx_th_s = Vec::new(); for _tx_i in 0..tx_count { let _tx = tx.clone(); tx_th_s.push(spawn_named_thread(&format!("sender_{}", _tx_i), move || { debug!("tx {} spawn", _tx_i); for i in 0..ROUND { match _tx.send(i) { Err(e) => panic!("{}", e), _ => { trace!("tx {} {}", _tx_i, i); } } } debug!("tx {} exit", _tx_i); })); } drop(tx); let rx_count = runtime_block_on!(async move { let mut count = 0; 'A: loop { match rx.recv().await { Ok(_i) => { count += 1; trace!("rx {}", _i); } Err(_) => break 'A, } } count }); for th in tx_th_s { let _ = th.join().unwrap(); } assert_eq!(rx_count, ROUND * tx_count); } #[logfn] #[rstest] #[case(mpmc::bounded_blocking_async(1), 5, 5)] #[case(mpmc::bounded_blocking_async(1), 20, 20)] #[case(mpmc::bounded_blocking_async(1), 20, 200)] #[case(mpmc::bounded_blocking_async(10), 10, 10)] #[case(mpmc::bounded_blocking_async(10), 50, 20)] #[case(mpmc::bounded_blocking_async(10), 100, 200)] #[case(mpmc::bounded_blocking_async(100), 10, 200)] #[case(mpmc::bounded_blocking_async(100), 100, 200)] #[case(mpmc::bounded_blocking_async(100), 300, 300)] #[case(mpmc::bounded_blocking_async(100), 30, 500)] #[case(mpmc::unbounded_async(), 5, 5)] #[case(mpmc::unbounded_async(), 50, 20)] #[case(mpmc::unbounded_async(), 200, 200)] #[case(mpmc::unbounded_async(), 10, 200)] #[case(mpmc::unbounded_async(), 100, 200)] #[case(mpmc::unbounded_async(), 300, 300)] #[case(mpmc::unbounded_async(), 30, 500)] fn test_pressure_tx_multi_blocking_multi_rx_async + 'static>( setup_log: (), #[case] channel: (MTx, MAsyncRx), #[case] tx_count: usize, #[case] rx_count: usize, ) { let (tx, rx) = channel; #[cfg(miri)] { if tx_count > 5 || rx_count > 5 { println!("skip"); return; } } let mut tx_th_s = Vec::new(); for _tx_i in 0..tx_count { let _tx = tx.clone(); tx_th_s.push(spawn_named_thread(&format!("sender_{}", _tx_i), move || { for i in 0..ROUND { match _tx.send(i) { Err(e) => panic!("{}", e), _ => { trace!("tx {} {}", _tx_i, i); } } } debug!("tx {} exit", _tx_i); })); } drop(tx); let total_count = runtime_block_on!(async move { let mut th_co = Vec::new(); for _rx_i in 0..rx_count { let _rx = rx.clone(); th_co.push(async_spawn!(async move { debug!("rx{:?} {} spawn", tokio_task_id!(), _rx_i); let mut count = 0; 'A: loop { match _rx.recv().await { Ok(_i) => { count += 1; trace!("rx {} {}", _rx_i, _i); } Err(_) => break 'A, } } debug!("rx{:?} {} exit", tokio_task_id!(), _rx_i); count })); } drop(rx); let mut total = 0; for th in th_co { total += async_join_result!(th); } total }); for th in tx_th_s { let _ = th.join().unwrap(); } assert_eq!(total_count, tx_count * ROUND); } ================================================ FILE: test-suite/src/test_blocking_context.rs ================================================ use crate::*; use captains_log::{logfn, *}; use crossfire::flavor::Flavor; use crossfire::*; use rstest::*; use std::sync::Arc; use std::thread::sleep; use std::time::{Duration, Instant}; #[fixture] fn setup_log() { _setup_log(); } #[logfn] #[rstest] #[case(spsc::bounded_blocking(1))] #[case(mpsc::bounded_blocking(1))] #[case(mpmc::bounded_blocking(1))] fn test_basic_bounded_empty_full_drop_rx, R: BlockingRxTrait>( setup_log: (), #[case] _channel: (T, R), ) { // Just don't want to run duplicately in the workflow #[cfg(not(feature = "async_std"))] { let (tx, rx) = _channel; assert!(tx.is_empty()); assert!(rx.is_empty()); assert_eq!(tx.capacity(), Some(1)); assert_eq!(rx.capacity(), Some(1)); tx.try_send(1).expect("Ok"); assert!(tx.is_full()); assert!(rx.is_full()); assert!(!tx.is_empty()); assert_eq!(tx.is_disconnected(), false); assert_eq!(rx.is_disconnected(), false); drop(rx); assert_eq!(tx.is_disconnected(), true); assert_eq!(tx.get_rx_count(), 0); assert_eq!(tx.get_tx_count(), 1); assert_eq!(tx.try_send(2).unwrap_err(), TrySendError::Disconnected(2)); assert_eq!(tx.send(2).unwrap_err(), SendError(2)); let start = Instant::now(); assert_eq!( tx.send_timeout(3, Duration::from_secs(1)).unwrap_err(), SendTimeoutError::Disconnected(3) ); assert!(Instant::now() - start < Duration::from_secs(1)); } } #[logfn] #[rstest] #[case(spsc::bounded_blocking(1))] #[case(mpsc::bounded_blocking(1))] #[case(mpmc::bounded_blocking(1))] fn test_basic_bounded_empty_full_drop_tx, R: BlockingRxTrait>( setup_log: (), #[case] _channel: (T, R), ) { #[cfg(not(feature = "async_std"))] { let (tx, rx) = _channel; assert!(tx.is_empty()); assert!(rx.is_empty()); assert_eq!(tx.capacity(), Some(1)); assert_eq!(rx.capacity(), Some(1)); tx.try_send(1).expect("Ok"); assert!(tx.is_full()); assert!(rx.is_full()); assert!(!tx.is_empty()); assert_eq!(tx.is_disconnected(), false); assert_eq!(rx.is_disconnected(), false); drop(tx); assert_eq!(rx.is_disconnected(), true); assert_eq!(rx.get_tx_count(), 0); assert_eq!(rx.get_rx_count(), 1); assert_eq!(rx.try_recv().unwrap(), 1); assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Disconnected); assert_eq!(rx.recv().unwrap_err(), RecvError); let start = Instant::now(); assert_eq!( rx.recv_timeout(Duration::from_secs(1)).unwrap_err(), RecvTimeoutError::Disconnected ); assert!(Instant::now() - start < Duration::from_secs(1)); } } #[logfn] #[rstest] #[case(spsc::unbounded_blocking())] #[case(mpsc::unbounded_blocking())] #[case(mpmc::unbounded_blocking())] fn test_basic_unbounded_empty_drop_rx, R: BlockingRxTrait>( setup_log: (), #[case] _channel: (T, R), ) { #[cfg(not(feature = "async_std"))] { let (tx, rx) = _channel; assert!(tx.is_empty()); assert!(rx.is_empty()); assert_eq!(tx.capacity(), None); assert_eq!(rx.capacity(), None); tx.try_send(1).expect("Ok"); assert!(!tx.is_empty()); assert_eq!(tx.is_disconnected(), false); assert_eq!(rx.is_disconnected(), false); drop(rx); assert_eq!(tx.is_disconnected(), true); assert_eq!(tx.get_rx_count(), 0); assert_eq!(tx.get_tx_count(), 1); assert_eq!(tx.try_send(2).unwrap_err(), TrySendError::Disconnected(2)); assert_eq!(tx.send(2).unwrap_err(), SendError(2)); let start = Instant::now(); assert_eq!( tx.send_timeout(3, Duration::from_secs(1)).unwrap_err(), SendTimeoutError::Disconnected(3) ); assert!(Instant::now() - start < Duration::from_secs(1)); } } #[logfn] #[rstest] #[case(spsc::unbounded_blocking())] #[case(mpsc::unbounded_blocking())] #[case(mpmc::unbounded_blocking())] fn test_basic_unbounded_empty_drop_tx, R: BlockingRxTrait>( setup_log: (), #[case] _channel: (T, R), ) { #[cfg(not(feature = "async_std"))] { let (tx, rx) = _channel; assert!(tx.is_empty()); assert!(rx.is_empty()); tx.try_send(1).expect("Ok"); assert!(!tx.is_empty()); assert_eq!(tx.is_disconnected(), false); assert_eq!(rx.is_disconnected(), false); drop(tx); assert_eq!(rx.is_disconnected(), true); assert_eq!(rx.get_tx_count(), 0); assert_eq!(rx.get_rx_count(), 1); assert_eq!(rx.recv().unwrap(), 1); assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Disconnected); assert_eq!(rx.recv().unwrap_err(), RecvError); let start = Instant::now(); assert_eq!( rx.recv_timeout(Duration::from_secs(1)).unwrap_err(), RecvTimeoutError::Disconnected ); assert!(Instant::now() - start < Duration::from_secs(1)); } } #[logfn] #[rstest] #[case(spsc::bounded_blocking(10))] #[case(mpsc::bounded_blocking(10))] #[case(mpmc::bounded_blocking(10))] fn test_basic_bounded_1_thread, R: BlockingRxTrait>( setup_log: (), #[case] _channel: (T, R), ) { #[cfg(not(feature = "async_std"))] { let (tx, rx) = _channel; let rx_res = rx.try_recv(); assert!(rx_res.is_err()); assert!(rx_res.unwrap_err().is_empty()); for i in 0i32..10 { let tx_res = tx.try_send(i); assert!(tx_res.is_ok()); } let tx_res = tx.try_send(11); assert!(tx_res.is_err()); assert!(tx_res.unwrap_err().is_full()); let th = spawn_named_thread("receiver_1", move || { for i in 0i32..12 { match rx.recv() { Ok(j) => { trace!("recv {}", i); assert_eq!(i, j); } Err(e) => { panic!("error {}", e); } } } let res = rx.recv(); assert!(res.is_err()); trace!("rx close"); }); assert!(tx.send(10).is_ok()); sleep(Duration::from_secs(1)); assert!(tx.send(11).is_ok()); drop(tx); let _ = th.join().unwrap(); } } #[logfn] #[rstest] #[case(spsc::unbounded_blocking())] #[case(mpsc::unbounded_blocking())] #[case(mpmc::unbounded_blocking())] fn test_basic_unbounded_1_thread, R: BlockingRxTrait>( setup_log: (), #[case] _channel: (T, R), ) { #[cfg(not(feature = "async_std"))] { let (tx, rx) = _channel; let rx_res = rx.try_recv(); assert!(rx_res.is_err()); assert!(rx_res.unwrap_err().is_empty()); for i in 0i32..10 { let tx_res = tx.try_send(i); assert!(tx_res.is_ok()); } let th = spawn_named_thread("receiver_1", move || { for i in 0i32..12 { match rx.recv() { Ok(j) => { trace!("recv {}", i); assert_eq!(i, j); } Err(e) => { panic!("error {}", e); } } } let res = rx.recv(); assert!(res.is_err()); trace!("rx close"); }); assert!(tx.send(10).is_ok()); sleep(Duration::from_secs(1)); assert!(tx.send(11).is_ok()); drop(tx); let _ = th.join().unwrap(); } } #[logfn] #[rstest] #[case(spsc::bounded_blocking(10))] #[case(mpsc::bounded_blocking(10))] #[case(mpmc::bounded_blocking(10))] #[case(spsc::unbounded_blocking())] #[case(mpsc::unbounded_blocking())] #[case(mpmc::unbounded_blocking())] fn test_basic_recv_after_sender_close, R: BlockingRxTrait>( setup_log: (), #[case] _channel: (T, R), ) { #[cfg(not(feature = "async_std"))] { let (tx, rx) = _channel; let total_msg_count = 5; for i in 0..total_msg_count { let _ = tx.try_send(i).expect("send ok"); } drop(tx); // NOTE: 5 < 10 let mut recv_msg_count = 0; loop { match rx.recv() { Ok(_) => { recv_msg_count += 1; } Err(_) => { break; } } } assert_eq!(recv_msg_count, total_msg_count); } } #[logfn] #[rstest] #[case(spsc::bounded_blocking(1))] #[case(spsc::bounded_blocking(10))] #[case(spsc::bounded_blocking(100))] #[case(spsc::bounded_blocking(300))] #[case(mpsc::bounded_blocking(1))] #[case(mpsc::bounded_blocking(10))] #[case(mpsc::bounded_blocking(100))] #[case(mpsc::bounded_blocking(300))] #[case(mpmc::bounded_blocking(1))] #[case(mpmc::bounded_blocking(10))] #[case(mpmc::bounded_blocking(100))] #[case(mpmc::bounded_blocking(300))] fn test_pressure_bounded_blocking_1_1, R: BlockingRxTrait>( setup_log: (), #[case] _channel: (T, R), ) { #[cfg(not(feature = "async_std"))] { let (tx, rx) = _channel; let round: usize; #[cfg(miri)] { round = ROUND; } #[cfg(not(miri))] { round = ROUND * 100; } let th = spawn_named_thread("sender_1", move || { for i in 0..round { if let Err(e) = tx.send(i) { panic!("{:?}", e); } } trace!("tx exit"); }); let mut count = 0; 'A: loop { match rx.recv() { Ok(_i) => { assert_eq!(_i, count); count += 1; trace!("recv {}", _i); } Err(_) => break 'A, } } drop(rx); let _ = th.join().unwrap(); assert_eq!(count, round); } } #[logfn] #[rstest] #[case(mpsc::bounded_blocking(1), 3)] #[case(mpsc::bounded_blocking(1), 5)] #[case(mpsc::bounded_blocking(1), 10)] #[case(mpsc::bounded_blocking(1), 16)] #[case(mpsc::bounded_blocking(10), 4)] #[case(mpsc::bounded_blocking(10), 7)] #[case(mpsc::bounded_blocking(10), 12)] #[case(mpsc::bounded_blocking(100), 3)] #[case(mpsc::bounded_blocking(100), 9)] #[case(mpsc::bounded_blocking(100), 13)] #[case(mpmc::bounded_blocking(1), 2)] #[case(mpmc::bounded_blocking(1), 5)] #[case(mpmc::bounded_blocking(1), 15)] #[case(mpmc::bounded_blocking(10), 3)] #[case(mpmc::bounded_blocking(10), 7)] #[case(mpmc::bounded_blocking(10), 16)] #[case(mpmc::bounded_blocking(100), 2)] #[case(mpmc::bounded_blocking(100), 8)] #[case(mpmc::bounded_blocking(100), 16)] fn test_pressure_bounded_blocking_multi_1< F: Flavor + 'static, R: BlockingRxTrait, >( setup_log: (), #[case] _channel: (MTx, R), #[case] tx_count: usize, ) { #[cfg(not(feature = "async_std"))] { let (tx, rx) = _channel; #[cfg(miri)] { if tx_count > 5 { println!("skip"); return; } } let round: usize = ROUND * 10; let mut th_s = Vec::new(); for _tx_i in 0..tx_count { let _tx = tx.clone(); th_s.push(spawn_named_thread(&format!("sender_{}", _tx_i), move || { for i in 0..round { match _tx.send(i) { Err(e) => panic!("{:?}", e), _ => {} } } trace!("tx {} exit", _tx_i); })); } drop(tx); let mut count = 0; 'A: loop { match rx.recv() { Ok(_i) => { count += 1; trace!("recv {}", _i); } Err(_) => break 'A, } } drop(rx); for th in th_s { let _: () = th.join().unwrap(); } assert_eq!(count, round * tx_count); } } #[logfn] #[rstest] #[case(mpmc::bounded_blocking(1), 2, 2)] #[case(mpmc::bounded_blocking(1), 16, 2)] #[case(mpmc::bounded_blocking(1), 2, 16)] #[case(mpmc::bounded_blocking(10), 2, 2)] #[case(mpmc::bounded_blocking(10), 13, 2)] #[case(mpmc::bounded_blocking(10), 3, 10)] #[case(mpmc::bounded_blocking(100), 3, 3)] #[case(mpmc::bounded_blocking(100), 8, 3)] #[case(mpmc::bounded_blocking(100), 3, 8)] #[case(mpmc::bounded_blocking(100), 5, 5)] fn test_pressure_bounded_blocking_multi + 'static>( setup_log: (), #[case] _channel: (MTx, MRx), #[case] tx_count: usize, #[case] rx_count: usize, ) { #[cfg(not(feature = "async_std"))] { let round: usize; #[cfg(miri)] { if tx_count > 5 || rx_count > 5 { println!("skip"); return; } round = ROUND; } #[cfg(not(miri))] { round = ROUND * 10; } let (tx, rx) = _channel; let mut th_tx = Vec::new(); let mut th_rx = Vec::new(); for _tx_i in 0..tx_count { let _tx = tx.clone(); th_tx.push(spawn_named_thread(&format!("sender_{}", _tx_i), move || { for i in 0..round { match _tx.send(i) { Err(e) => panic!("{:?}", e), _ => {} } } trace!("tx {} exit", _tx_i); })); } for _rx_i in 0..rx_count { let _rx = rx.clone(); th_rx.push(spawn_named_thread(&format!("receiver_{}", _rx_i), move || { let mut count = 0; 'A: loop { match _rx.recv() { Ok(_i) => { count += 1; trace!("recv {} {}", _rx_i, _i); } Err(_) => break 'A, } } trace!("rx {} exit", _rx_i); count })); } drop(tx); drop(rx); let mut total_count = 0; for th in th_tx { let _ = th.join().unwrap(); } for th in th_rx { total_count += th.join().unwrap(); } assert_eq!(total_count, round * tx_count); } } #[logfn] #[rstest] #[case(mpmc::bounded_blocking(1))] #[case(mpmc::bounded_blocking(10))] fn test_pressure_bounded_timeout_blocking + 'static>( setup_log: (), #[case] _channel: (MTx, MRx), ) { #[cfg(not(feature = "async_std"))] { use std::collections::HashMap; use std::sync::Mutex; let (tx, rx) = _channel; assert_eq!( rx.recv_timeout(Duration::from_millis(1)).unwrap_err(), RecvTimeoutError::Timeout ); let (tx_wakers, rx_wakers) = rx.get_wakers_count(); println!("wakers: {}, {}", tx_wakers, rx_wakers); assert_eq!(tx_wakers, 0); assert_eq!(rx_wakers, 0); let recv_map = Arc::new(Mutex::new(HashMap::new())); let mut th_tx = Vec::new(); let mut th_rx = Vec::new(); let tx_count: usize = 3; for thread_id in 0..tx_count { let _recv_map = recv_map.clone(); let _tx = tx.clone(); th_tx.push(spawn_named_thread(&format!("sender_{}", thread_id), move || { // randomize start up sleep(Duration::from_millis((thread_id & 3) as u64)); let mut local_timeout_counter = 0; for i in 0..ROUND { { let mut guard = _recv_map.lock().unwrap(); guard.insert(i, ()); } if i & 2 == 0 { sleep(Duration::from_millis(3)); } else { sleep(Duration::from_millis(1)); } loop { match _tx.send_timeout(i, Duration::from_millis(1)) { Ok(_) => break, Err(SendTimeoutError::Timeout(_i)) => { local_timeout_counter += 1; assert_eq!(_i, i); } Err(SendTimeoutError::Disconnected(_)) => { unreachable!(); } } } } local_timeout_counter })); } for _thread_id in 0..2 { let _rx = rx.clone(); let _recv_map = recv_map.clone(); th_rx.push(spawn_named_thread(&format!("receiver_{}", _thread_id), move || { let mut step: usize = 0; let mut local_recv_counter = 0; let mut local_timeout_counter = 0; loop { step += 1; let timeout = if step & 2 == 0 { 1 } else { 2 }; if step & 2 > 0 { sleep(Duration::from_millis(1)); } match _rx.recv_timeout(Duration::from_millis(timeout)) { Ok(item) => { local_recv_counter += 1; { let mut guard = _recv_map.lock().unwrap(); guard.remove(&item); } } Err(RecvTimeoutError::Timeout) => { local_timeout_counter += 1; } Err(RecvTimeoutError::Disconnected) => { return (local_recv_counter, local_timeout_counter); } } } })); } drop(tx); drop(rx); let mut total_recv_count = 0; let mut total_send_timeout = 0; let mut total_recv_timeout = 0; for th in th_tx { total_send_timeout += th.join().unwrap(); } for th in th_rx { // rx threads return recv_count let (local_recv_counter, local_timeout_counter) = th.join().unwrap(); total_recv_count += local_recv_counter; total_recv_timeout += local_timeout_counter; } { let guard = recv_map.lock().unwrap(); assert!(guard.is_empty()); } assert_eq!(ROUND * tx_count, total_recv_count); println!("send timeout count: {}", total_send_timeout); println!("recv timeout count: {}", total_recv_timeout); } } #[test] fn test_conversion() { let (mtx, mrx) = mpmc::bounded_blocking::(1); let _tx: Tx<_> = mtx.into(); let _rx: Rx<_> = mrx.into(); } // This test make sure we have correctly use of maybeuninit #[logfn] #[rstest] #[case(spsc::bounded_blocking(1))] #[case(spsc::bounded_blocking(10))] #[case(mpsc::bounded_blocking(1))] #[case(mpsc::bounded_blocking(10))] #[case(mpmc::bounded_blocking(1))] #[case(mpmc::bounded_blocking(10))] fn test_drop_small_msg, R: BlockingRxTrait>( setup_log: (), #[case] channel: (T, R), ) { println!("needs_drop {}", std::mem::needs_drop::()); _test_drop_msg(channel); } // This test make sure we have correctly use of maybeuninit #[logfn] #[rstest] #[case(spsc::bounded_blocking(1))] #[case(spsc::bounded_blocking(10))] #[case(mpsc::bounded_blocking(1))] #[case(mpsc::bounded_blocking(10))] #[case(mpmc::bounded_blocking(1))] #[case(mpmc::bounded_blocking(10))] fn test_drop_large_msg, R: BlockingRxTrait>( setup_log: (), #[case] channel: (T, R), ) { println!("needs_drop {}", std::mem::needs_drop::()); _test_drop_msg(channel); } fn _test_drop_msg, R: BlockingRxTrait>(channel: (T, R)) { let (tx, rx) = channel; reset_drop_counter(); let cap = tx.capacity().unwrap(); let mut ids = cap; for i in 0..ids { let msg = M::new(i); assert!(tx.try_send(msg).is_ok()); } assert_eq!(get_drop_counter(), 0); let msg = M::new(ids); if let Err(TrySendError::Full(_msg)) = tx.try_send(msg) { assert_eq!(_msg.get_value(), ids); assert_eq!(get_drop_counter(), 0); drop(_msg); assert_eq!(get_drop_counter(), 1); } else { unreachable!(); } let th = spawn_named_thread("receiver_3", move || { let _msg = rx.recv().expect("recv"); assert_eq!(_msg.get_value(), 0); drop(_msg); rx }); let msg = M::new(ids); tx.send(msg).expect("send"); ids += 1; let rx = th.join().unwrap(); drop(rx); assert_eq!(get_drop_counter(), 2); let msg = M::new(ids); if let Err(TrySendError::Disconnected(_msg)) = tx.try_send(msg) { assert_eq!(_msg.get_value(), ids); } else { unreachable!(); } ids += 1; let msg = M::new(ids); if let Err(SendError(_msg)) = tx.send(msg) { assert_eq!(_msg.get_value(), ids); } else { unreachable!(); } assert_eq!(get_drop_counter(), 4); ids += 1; drop(tx); // every thing dropped inside the channel assert_eq!(get_drop_counter(), ids + 1); // ids begins at 0 assert_eq!(get_drop_counter(), 4 + cap); } ================================================ FILE: test-suite/src/test_oneshot.rs ================================================ use crate::*; use captains_log::logfn; use crossfire::*; use fastrand; use rstest::*; use std::thread; use std::time::Duration; #[fixture] fn setup_log() { _setup_log(); } #[logfn] #[rstest] fn test_oneshot_blocking_basic(setup_log: ()) { let (tx, mut rx) = oneshot::oneshot(); assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); assert_eq!(rx.is_empty(), true); tx.send(42); assert_eq!(rx.is_empty(), false); assert_eq!(rx.recv(), Ok(42)); let (tx, mut rx) = oneshot::oneshot(); assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); tx.send(41); assert_eq!(rx.try_recv(), Ok(41)); assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Disconnected); assert_eq!(rx.recv().unwrap_err(), RecvError); } #[logfn] #[rstest] fn test_oneshot_blocking_drop_tx(setup_log: ()) { let (tx, rx) = oneshot::oneshot::(); drop(tx); assert_eq!(rx.recv(), Err(RecvError)); let (tx, rx) = oneshot::oneshot::(); let th = thread::spawn(move || { // Should be wake up on sender drop assert_eq!(rx.recv(), Err(RecvError)); }); thread::sleep(Duration::from_millis(fastrand::u64(1..=500))); drop(tx); th.join().expect("join"); } #[logfn] #[rstest] fn test_oneshot_blocking_drop_rx(setup_log: ()) { let (tx, rx) = oneshot::oneshot::(); drop(rx); assert!(tx.is_disconnected()); // send consumes tx, returns () tx.send(42); } #[logfn] #[rstest] fn test_oneshot_blocking_leak(setup_log: ()) { // Check if OneShot drops the value if not received reset_drop_counter(); { let (tx, _rx) = oneshot::oneshot::(); tx.send(SmallMsg::new(1)); } // tx dropped (closed), rx dropped (OneShot dropped). msg should be dropped. assert_eq!(get_drop_counter(), 1); } #[logfn] #[rstest] fn test_oneshot_blocking_drop_after_recv(setup_log: ()) { // Check if OneShot drops the value after recv (it shouldn't, Rx has it) reset_drop_counter(); { let (tx, rx) = oneshot::oneshot::(); tx.send(SmallMsg::new(1)); let msg = rx.recv().unwrap(); assert_eq!(get_drop_counter(), 0); drop(msg); assert_eq!(get_drop_counter(), 1); } // OneShot dropped. Should NOT drop again. assert_eq!(get_drop_counter(), 1); } #[logfn] #[rstest] fn test_oneshot_async_basic(setup_log: ()) { runtime_block_on!(async move { let (tx, mut rx) = oneshot::oneshot(); assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); assert_eq!(rx.is_empty(), true); tx.send(42); assert_eq!(rx.is_empty(), false); assert_eq!(rx.await, Ok(42)); let (tx, mut rx) = oneshot::oneshot(); assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty); tx.send(41); assert_eq!(rx.try_recv(), Ok(41)); assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Disconnected); assert_eq!(rx.await.unwrap_err(), RecvError); }); } #[cfg(feature = "time")] #[logfn] #[rstest] fn test_oneshot_async_drop_tx(setup_log: ()) { runtime_block_on!(async move { let (tx, rx) = oneshot::oneshot::(); drop(tx); assert_eq!(rx.await, Err(RecvError)); log::debug!("next test"); let (tx, rx) = oneshot::oneshot::(); let th = async_spawn!(async move { // Should be wake up on sender drop assert_eq!(rx.await, Err(RecvError)); }); sleep(Duration::from_millis(fastrand::u64(1..=500))).await; drop(tx); let _ = async_join_result!(th); }); } #[logfn] #[rstest] fn test_oneshot_async_pressure(setup_log: ()) { let count = { #[cfg(miri)] { 10usize } #[cfg(not(miri))] { 100usize } }; runtime_block_on!(async move { let mut tasks = Vec::new(); for i in 0..count { tasks.push(async_spawn!(async move { let (tx, rx) = oneshot::oneshot(); tx.send(i); assert_eq!(rx.await, Ok(i)); })); } for t in tasks { let _ = async_join_result!(t); } }); } #[logfn] #[rstest] fn test_oneshot_blocking_batch(setup_log: ()) { let mut txs = Vec::with_capacity(ROUND); let mut rxs = Vec::with_capacity(ROUND); for _i in 0..ROUND { let (tx, rx) = oneshot::oneshot(); txs.push(tx); rxs.push(rx); } let th = thread::spawn(move || { for (i, tx) in txs.into_iter().enumerate() { tx.send(i); } }); for (i, rx) in rxs.into_iter().enumerate() { assert_eq!(rx.recv(), Ok(i)); } th.join().unwrap(); } #[logfn] #[rstest] fn test_oneshot_async_batch(setup_log: ()) { runtime_block_on!(async move { let mut txs = Vec::with_capacity(ROUND); let mut rxs = Vec::with_capacity(ROUND); for _i in 0..ROUND { let (tx, rx) = oneshot::oneshot(); txs.push(tx); rxs.push(rx); } let th = async_spawn!(async move { for (i, tx) in txs.into_iter().enumerate() { tx.send(i); } }); for (i, rx) in rxs.into_iter().enumerate() { assert_eq!(rx.await, Ok(i)); } async_join_result!(th); }); } #[logfn] #[rstest] fn test_oneshot_blocking_concurrent(setup_log: ()) { let count = { #[cfg(miri)] { 10usize } #[cfg(not(miri))] { 50usize } }; let mut th_s = Vec::new(); for i in 0..count { let (tx, rx) = oneshot::oneshot(); th_s.push(thread::spawn(move || { tx.send(i); })); th_s.push(thread::spawn(move || { assert_eq!(rx.recv(), Ok(i)); })); } for th in th_s { th.join().unwrap(); } } #[logfn] #[rstest] fn test_oneshot_async_concurrent(setup_log: ()) { let count = { #[cfg(miri)] { 10usize } #[cfg(not(miri))] { 100usize } }; runtime_block_on!(async move { let mut tasks = Vec::new(); for i in 0..count { let (tx, rx) = oneshot::oneshot(); tasks.push(async_spawn!(async move { tx.send(i); })); tasks.push(async_spawn!(async move { assert_eq!(rx.await, Ok(i)); })); } for t in tasks { let _ = async_join_result!(t); } }); } #[logfn] #[rstest] fn test_oneshot_blocking_with_sleep(setup_log: ()) { #[cfg(miri)] { // sleep in miri will be too slow println!("skip on miri"); return; } #[cfg(not(miri))] { let count = 50usize; let mut th_s = Vec::new(); for i in 0..(count as u64) { th_s.push(thread::spawn(move || { let (tx, rx) = oneshot::oneshot(); // Spawn a thread that sends after a short delay thread::spawn(move || { thread::sleep(Duration::from_millis(i % 10)); // Vary the delay tx.send(i); }); // Wait for the value assert_eq!(rx.recv(), Ok(i)); })); } for th in th_s { th.join().unwrap(); } } } #[cfg(feature = "time")] #[logfn] #[rstest] fn test_oneshot_async_with_sleep(setup_log: ()) { #[cfg(miri)] { // sleep in miri will be too slow println!("skip on miri"); } #[cfg(not(miri))] { let count = 50usize; runtime_block_on!(async move { let mut tasks = Vec::new(); for i in 0..count { tasks.push(async_spawn!(async move { let (tx, rx) = oneshot::oneshot(); let th = async_spawn!(async move { sleep(Duration::from_millis((i % 10) as u64)).await; tx.send(i); }); // Wait for the value assert_eq!(rx.await, Ok(i)); let _ = async_join_result!(th); })); } for t in tasks { let _ = async_join_result!(t); } }); } } #[cfg(feature = "time")] #[logfn] #[rstest] fn test_oneshot_async_batch_with_interval(setup_log: ()) { #[cfg(miri)] { // sleep in miri will be too slow println!("skip on miri"); return; } #[cfg(not(miri))] { let batch_size = 30; runtime_block_on!(async move { let mut tasks = Vec::new(); // Create a batch of oneshots for i in 0..batch_size { tasks.push(async_spawn!(async move { let (tx, rx) = oneshot::oneshot(); let th = async_spawn!(async move { // Sleep for different durations based on index sleep(Duration::from_millis((i * 2) as u64)).await; tx.send(i); }); // Wait for the value assert_eq!(rx.await, Ok(i)); let _ = async_join_result!(th); })); } for t in tasks { let _ = async_join_result!(t); } }); } } #[cfg(feature = "time")] #[logfn] #[rstest] fn test_oneshot_blocking_timeout_fail(setup_log: ()) { let (_tx, rx) = oneshot::oneshot::(); let start = std::time::Instant::now(); let res = rx.recv_timeout(Duration::from_millis(100)); assert_eq!(res, Err(RecvTimeoutError::Timeout)); assert!(start.elapsed() >= Duration::from_millis(100)); } #[cfg(feature = "time")] #[logfn] #[rstest] fn test_oneshot_blocking_timeout_success(setup_log: ()) { let (tx, rx) = oneshot::oneshot::(); let th = thread::spawn(move || { thread::sleep(Duration::from_millis(50)); tx.send(42); }); let _res = rx.recv_timeout(Duration::from_secs(1)); #[cfg(not(miri))] assert_eq!(_res, Ok(42)); let _ = th.join(); } #[cfg(feature = "time")] #[logfn] #[rstest] fn test_oneshot_blocking_timeout_disconnected(setup_log: ()) { let (tx, rx) = oneshot::oneshot::(); let th = thread::spawn(move || { thread::sleep(Duration::from_millis(50)); drop(tx); }); let _res = rx.recv_timeout(Duration::from_millis(200)); let _ = th.join(); assert!(_res.is_err()); // might be timeout or disconnected } #[cfg(feature = "time")] #[logfn] #[rstest] fn test_oneshot_async_timeout_fail(setup_log: ()) { runtime_block_on!(async move { let (_tx, rx) = oneshot::oneshot::(); let start = std::time::Instant::now(); let sleep_fut = sleep(Duration::from_millis(100)); futures_util::pin_mut!(sleep_fut); let res = rx.recv_async_with_timer(sleep_fut).await; assert_eq!(res, Err(RecvTimeoutError::Timeout)); assert!(start.elapsed() >= Duration::from_millis(100)); }); } #[cfg(feature = "time")] #[logfn] #[rstest] fn test_oneshot_async_timeout_disconnected(setup_log: ()) { runtime_block_on!(async move { let (tx, rx) = oneshot::oneshot::(); let th = std::thread::spawn(move || { std::thread::sleep(Duration::from_millis(50)); drop(tx); }); let _res = rx.recv_async_with_timer(sleep(Duration::from_secs(1))).await; let _ = th.join(); #[cfg(not(miri))] assert_eq!(_res, Err(RecvTimeoutError::Disconnected)); }); } #[cfg(feature = "time")] #[logfn] #[rstest] fn test_oneshot_async_timeout_success(setup_log: ()) { runtime_block_on!(async move { let (tx, rx) = oneshot::oneshot::(); let th = async_spawn!(async move { sleep(Duration::from_millis(50)).await; tx.send(42); }); let _res = rx.recv_async_with_timer(sleep(Duration::from_secs(2))).await; #[cfg(not(miri))] assert_eq!(_res, Ok(42)); async_join_result!(th); }); } ================================================ FILE: test-suite/src/test_select_async.rs ================================================ use crate::*; use captains_log::logfn; use crossfire::{mpmc, mpsc}; use futures_util::{select, FutureExt}; use rstest::*; use std::time::Duration; #[fixture] fn setup_log() { _setup_log(); } #[cfg(feature = "time")] #[logfn] #[rstest] fn test_mpmc_null_async_close(setup_log: ()) { let flavor = mpmc::Null::new(); let (tx, rx) = flavor.new_async(); runtime_block_on!(async move { let th = async_spawn!(async move { sleep(Duration::from_millis(50)).await; drop(tx); }); let res = rx.recv().await; assert!(res.is_err()); async_join_result!(th); }); } #[cfg(feature = "time")] #[logfn] #[rstest] fn test_mpsc_null_async_close(setup_log: ()) { let flavor = mpsc::Null::new(); let (tx, rx) = flavor.new_async(); runtime_block_on!(async move { let th = async_spawn!(async move { sleep(Duration::from_millis(50)).await; drop(tx); }); let res = rx.recv().await; assert!(res.is_err()); async_join_result!(th); }); } #[cfg(feature = "time")] #[logfn] #[rstest] fn test_mpmc_null_select(setup_log: ()) { let flavor = mpmc::Null::new(); let (tx, rx) = flavor.new_async(); runtime_block_on!(async move { let th = async_spawn!(async move { sleep(Duration::from_millis(50)).await; drop(tx); }); let closed = select! { res = rx.recv().fuse() => { if res.is_err() { true } else { panic!("Should not receive message from null"); } } }; assert!(closed); async_join_result!(th); }); } #[cfg(feature = "time")] #[logfn] #[rstest] fn test_mpsc_null_select(setup_log: ()) { let flavor = mpsc::Null::new(); let (tx, rx) = flavor.new_async(); runtime_block_on!(async move { let th = async_spawn!(async move { sleep(Duration::from_millis(50)).await; drop(tx); }); let closed = select! { res = rx.recv().fuse() => { if res.is_err() { true } else { panic!("Should not receive message from null"); } } }; assert!(closed); async_join_result!(th); }); } #[cfg(feature = "time")] #[logfn] #[rstest] fn test_null_select_timeout(setup_log: ()) { let flavor = mpmc::Null::new(); let (tx, rx) = flavor.new_async(); runtime_block_on!(async move { // Don't drop tx yet let timed_out = select! { res = rx.recv().fuse() => { if res.is_err() { panic!("Should not be closed yet"); } false } _ = sleep(Duration::from_millis(50)).fuse() => { true } }; assert!(timed_out); drop(tx); }); } #[logfn] #[rstest] fn test_null_mixed_with_active_channel(setup_log: ()) { let flavor = mpmc::Null::new(); let (tx_null, rx_null) = flavor.new_async(); let (tx_data, rx_data) = mpmc::bounded_async::(10); runtime_block_on!(async move { tx_data.send(42).await.unwrap(); // Data ready, null not triggered select! { _ = rx_null.recv().fuse() => { panic!("Null triggered unexpectedly"); } res = rx_data.recv().fuse() => { assert_eq!(res.unwrap(), 42); } } drop(tx_null); }); } #[cfg(feature = "time")] #[logfn] #[rstest] fn test_null_mixed_trigger(setup_log: ()) { let flavor = mpmc::Null::new(); let (tx_null, rx_null) = flavor.new_async(); let (_tx_data, rx_data) = mpmc::bounded_async::(10); runtime_block_on!(async move { let th = async_spawn!(async move { sleep(Duration::from_millis(50)).await; drop(tx_null); }); // Data not ready (empty), null triggered via drop let null_triggered = select! { res = rx_null.recv().fuse() => { assert!(res.is_err()); true } _ = rx_data.recv().fuse() => { panic!("Data triggered unexpectedly"); } }; assert!(null_triggered); async_join_result!(th); }); } ================================================ FILE: test-suite/src/test_select_blocking.rs ================================================ use crate::*; use captains_log::logfn; use crossfire::select::{Multiplex, Mux, Select}; use crossfire::*; use rstest::*; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, Barrier}; use std::thread; use std::time::Duration; #[fixture] fn setup_log() { _setup_log(); } #[logfn] #[rstest] fn test_select_basic(setup_log: ()) { let (tx1, rx1) = mpmc::bounded_blocking::(10); let (tx2, rx2) = mpsc::bounded_blocking::(10); tx1.send(100).expect("send"); tx2.send(200).expect("send"); let mut select = Select::new(); select.add(&rx1); select.add(&rx2); let mut results = Vec::new(); // Select twice for _ in 0..2 { let res = select.select().unwrap(); if res == rx1 { results.push(rx1.read_select(res).unwrap()); } else if res == rx2 { results.push(rx2.read_select(res).unwrap()); } else { panic!("Unexpected token"); } } results.sort(); assert_eq!(results, vec![100, 200]); } #[logfn] #[rstest] fn test_select_basic_timeout(setup_log: ()) { let (_tx1, rx1) = mpmc::bounded_blocking::(10); let (_tx2, rx2) = mpmc::bounded_blocking::(10); let (_tx3, rx3) = mpmc::bounded_blocking::(10); let mut select = Select::new(); select.add(&rx1); select.add(&rx2); select.add(&rx3); let start = std::time::Instant::now(); let res = select.select_timeout(Duration::from_millis(100)); let elapsed = start.elapsed(); assert!(res.is_err()); assert!(elapsed >= Duration::from_millis(100)); } #[logfn] #[rstest] fn test_select_basic_disconnect_before_park(setup_log: ()) { let (_tx1, rx1) = mpmc::bounded_blocking::(10); let (_tx2, rx2) = mpmc::bounded_blocking::(10); let (_tx3, rx3) = mpmc::bounded_blocking::(10); let (_tx4, rx4) = mpmc::bounded_blocking::(10); let mut select = Select::new(); select.add(&rx1); select.add(&rx2); select.add(&rx3); select.add(&rx4); drop(_tx3); let res = select.select(); assert!(res.is_ok()); let res = res.unwrap(); assert!(res == rx3); // Disconnected and empty assert!(rx3.read_select(res).is_err()); select.remove(&rx3); assert_eq!(select.try_select().unwrap_err(), TryRecvError::Empty); _tx2.send(200).expect("send"); let res = select.select().unwrap(); assert!(res == rx2); println!("select_result {:?}, rx2 {:?}", res, rx2); assert_eq!(rx2.read_select(res).unwrap(), 200); } #[logfn] #[rstest] fn test_select_basic_disconnect_after_park(setup_log: ()) { let (_tx1, rx1) = mpmc::bounded_blocking::(10); let (_tx2, rx2) = mpmc::bounded_blocking::(10); let (_tx3, rx3) = mpmc::bounded_blocking::(10); let (_tx4, rx4) = mpmc::bounded_blocking::(10); let mut select = Select::new(); select.add(&rx1); select.add(&rx2); select.add(&rx3); select.add(&rx4); let barrier = Arc::new(Barrier::new(2)); let _barrier = barrier.clone(); let th = thread::spawn(move || { _barrier.wait(); thread::sleep(Duration::from_millis(500)); drop(_tx3); }); barrier.wait(); let res = select.select(); assert!(res.is_ok()); let res = res.unwrap(); assert!(res == rx3); // Disconnected and empty assert!(rx3.read_select(res).is_err()); let _ = th.join(); select.remove(&rx3); assert_eq!(select.try_select().unwrap_err(), TryRecvError::Empty); _tx2.send(200).expect("send"); let res = select.select().unwrap(); assert!(res == rx2); assert_eq!(rx2.read_select(res).unwrap(), 200); } #[logfn] #[rstest] fn test_select_basic_loop(setup_log: ()) { let (tx1, rx1) = mpmc::unbounded_blocking::(); let (tx2, rx2) = mpmc::bounded_blocking::(10); let (tx3, rx3): (MTx>, MRx>) = mpmc::build(mpmc::One::new()); let (tx4, rx4) = mpsc::unbounded_blocking::(); let (tx5, rx5) = mpsc::bounded_blocking::(10); let (tx6, rx6): (MTx>, Rx>) = mpsc::new(); let mut select = Select::new(); select.add(&rx1); select.add(&rx2); select.add(&rx3); select.add(&rx4); select.add(&rx5); select.add(&rx6); let t1 = thread::spawn(move || { for i in 0..10 { tx1.send(i).expect("send"); thread::sleep(Duration::from_millis(10)); } }); let t2 = thread::spawn(move || { for i in 0..10 { tx2.send(i + 100).expect("send"); thread::sleep(Duration::from_millis(10)); } }); let t3 = thread::spawn(move || { for i in 0..10 { tx3.send(i + 200).expect("send"); thread::sleep(Duration::from_millis(10)); } }); let t4 = thread::spawn(move || { for i in 0..10 { tx4.send(i + 300).expect("send"); thread::sleep(Duration::from_millis(10)); } }); let t5 = thread::spawn(move || { for i in 0..10 { tx5.send(i + 400).expect("send"); thread::sleep(Duration::from_millis(10)); } }); let t6 = thread::spawn(move || { for i in 0..10 { tx6.send(i + 500).expect("send"); thread::sleep(Duration::from_millis(10)); } }); let mut sum = 0; loop { let res = match select.select() { Ok(res) => res, Err(RecvError) => { println!("All channels disconnected or removed from select. Breaking loop."); break; } }; if res == rx1 { match rx1.read_select(res) { Ok(val) => { sum += val; } Err(RecvError) => { println!("rx1 disconnected, removing from select."); select.remove(&rx1); } } } else if res == rx2 { match rx2.read_select(res) { Ok(val) => { sum += val; } Err(RecvError) => { println!("rx2 disconnected, removing from select."); select.remove(&rx2); } } } else if res == rx3 { match rx3.read_select(res) { Ok(val) => { sum += val; } Err(RecvError) => { println!("rx3 disconnected, removing from select."); select.remove(&rx3); } } } else if res == rx4 { match rx4.read_select(res) { Ok(val) => { sum += val; } Err(RecvError) => { println!("rx4 disconnected, removing from select."); select.remove(&rx4); } } } else if res == rx5 { match rx5.read_select(res) { Ok(val) => { sum += val; } Err(RecvError) => { println!("rx5 disconnected, removing from select."); select.remove(&rx5); } } } else if res == rx6 { match rx6.read_select(res) { Ok(val) => { sum += val; } Err(RecvError) => { println!("rx6 disconnected, removing from select."); select.remove(&rx6); } } } else { panic!("unknown token"); } } t1.join().unwrap(); t2.join().unwrap(); t3.join().unwrap(); t4.join().unwrap(); t5.join().unwrap(); t6.join().unwrap(); assert_eq!(sum, 15270); } #[logfn] #[rstest] fn test_select_remove_mid(setup_log: ()) { // Test removing a receiver from the middle of the list let (tx1, rx1) = mpmc::bounded_blocking::(10); let (tx2, rx2) = mpsc::bounded_blocking::(10); let (tx3, rx3) = spsc::bounded_blocking::(10); let mut select = Select::new(); select.add(&rx1); select.add(&rx2); select.add(&rx3); // Remove rx2 (middle) select.remove(&rx2); tx1.send(1).unwrap(); tx3.send(3).unwrap(); tx2.send(2).unwrap(); // Should be ignored let mut results = Vec::new(); for _ in 0..2 { let res = select.select().unwrap(); if res == rx1 { results.push(rx1.read_select(res).unwrap()); } else if res == rx3 { results.push(rx3.read_select(res).unwrap()); } else { panic!("Unexpected token"); } } // Should not receive from rx2 assert!(select.select_timeout(Duration::from_millis(50)).is_err()); results.sort(); assert_eq!(results, vec![1, 3]); } #[logfn] #[rstest] fn test_select_mixed_flavors(setup_log: ()) { // Test mixing List (unbounded), Array (bounded > 1) and One (explicit One) let (tx_list, rx_list) = mpmc::unbounded_blocking::(); let (tx_array, rx_array) = mpmc::bounded_blocking::(10); let (tx_one, rx_one): (MTx>, MRx>) = mpmc::build(mpmc::One::new()); let mut select = Select::new(); select.add(&rx_list); select.add(&rx_array); select.add(&rx_one); tx_list.send(1).expect("send"); tx_array.send(2).expect("send"); tx_one.send(3).expect("send"); let mut results = Vec::new(); for _ in 0..3 { let res = select.select().unwrap(); if res == rx_list { results.push(rx_list.read_select(res).unwrap()); } else if res == rx_array { results.push(rx_array.read_select(res).unwrap()); } else if res == rx_one { results.push(rx_one.read_select(res).unwrap()); } else { panic!("Unexpected token"); } } results.sort(); assert_eq!(results, vec![1, 2, 3]); } #[logfn] #[rstest] #[case(1)] #[case(5)] #[case(10)] fn test_select_pressure(setup_log: (), #[case] producers: usize) { #[cfg(miri)] { if producers > 5 { println!("skip"); return; } } let (tx_list, rx_list) = mpmc::unbounded_blocking::(); let (tx_array, rx_array) = mpmc::bounded_blocking::(100); let (tx_one, rx_one): (MTx>, MRx>) = mpmc::build(mpmc::One::new()); let (tx_mpsc_list, rx_mpsc_list) = mpsc::unbounded_blocking::(); let (tx_mpsc_array, rx_mpsc_array) = mpsc::bounded_blocking::(100); let (tx_mpsc_one, rx_mpsc_one): (MTx>, Rx>) = mpsc::new(); let mut select = Select::new(); select.add(&rx_list); select.add(&rx_array); select.add(&rx_one); select.add(&rx_mpsc_list); select.add(&rx_mpsc_array); select.add(&rx_mpsc_one); let round = ROUND; let total_messages = round * 6 * producers; let mut handlers = Vec::new(); for _ in 0..producers { let tx = tx_list.clone(); handlers.push(thread::spawn(move || { for i in 0..round { tx.send(i).expect("send"); } })); let tx = tx_array.clone(); handlers.push(thread::spawn(move || { for i in 0..round { tx.send(i).expect("send"); } })); let tx = tx_one.clone(); handlers.push(thread::spawn(move || { for i in 0..round { tx.send(i).expect("send"); } })); let tx = tx_mpsc_list.clone(); handlers.push(thread::spawn(move || { for i in 0..round { tx.send(i).expect("send"); } })); let tx = tx_mpsc_array.clone(); handlers.push(thread::spawn(move || { for i in 0..round { tx.send(i).expect("send"); } })); let tx = tx_mpsc_one.clone(); handlers.push(thread::spawn(move || { for i in 0..round { tx.send(i).expect("send"); } })); } // Drop original senders to ensure we don't hang if we were counting on close drop(tx_list); drop(tx_array); drop(tx_one); drop(tx_mpsc_list); drop(tx_mpsc_array); drop(tx_mpsc_one); let mut count = 0; while count < total_messages { let res = select.select(); match res { Ok(token) => { if token == rx_list { if rx_list.read_select(token).is_ok() { count += 1; } } else if token == rx_array { if rx_array.read_select(token).is_ok() { count += 1; } } else if token == rx_one { if rx_one.read_select(token).is_ok() { count += 1; } } else if token == rx_mpsc_list { if rx_mpsc_list.read_select(token).is_ok() { count += 1; } } else if token == rx_mpsc_array { if rx_mpsc_array.read_select(token).is_ok() { count += 1; } } else if token == rx_mpsc_one { if rx_mpsc_one.read_select(token).is_ok() { count += 1; } } else { panic!("unknown token"); } } Err(_) => { break; } } } for h in handlers { h.join().unwrap(); } assert_eq!(count, total_messages); } #[logfn] #[rstest] fn test_select_null(setup_log: ()) { let (tx, rx) = mpmc::bounded_blocking::(10); let (stop_tx, stop_rx) = mpmc::Null::new().new_blocking(); let mut select = Select::new(); select.add(&rx); select.add(&stop_rx); let h = thread::spawn(move || { for i in 0..10 { tx.send(i).unwrap(); } thread::sleep(Duration::from_millis(50)); drop(stop_tx); }); let mut count = 0; loop { let res = select.select().unwrap(); if res == rx { if let Ok(_item) = rx.read_select(res) { assert_eq!(_item, count); count += 1; } } else if res == stop_rx { if stop_rx.read_select(res).is_err() { while let Ok(_item) = rx.try_recv() { assert_eq!(_item, count); count += 1; } break; } else { unreachable!(); } } } h.join().unwrap(); assert_eq!(count, 10); } #[logfn] #[rstest] fn test_select_pressure_concurrent(setup_log: ()) { let (tx_list, rx_list) = mpmc::unbounded_blocking::(); let (tx_array, rx_array) = mpmc::bounded_blocking::(100); let mut th_recv = Vec::new(); for _ in 0..2 { let rx_list_clone = rx_list.clone(); let rx_array_clone = rx_array.clone(); th_recv.push(thread::spawn(move || { let mut select = Select::new(); select.add(&rx_list_clone); select.add(&rx_array_clone); let mut local_sum: usize = 0; loop { match select.select() { Ok(res) => { if res == rx_list_clone { if rx_list_clone.read_select(res).is_err() { select.remove(&rx_list_clone); } else { local_sum += 1; } } else if res == rx_array_clone { if rx_array_clone.read_select(res).is_err() { select.remove(&rx_array_clone); } else { local_sum += 1; } } else { unreachable!(); } } Err(_) => break, } } local_sum })); } let mut th_send = Vec::new(); for _ in 0..2 { let tx_list_clone = tx_list.clone(); let tx_array_clone = tx_array.clone(); th_send.push(thread::spawn(move || { for i in 0..ROUND { tx_list_clone.send(i as i32).expect("send"); } })); th_send.push(thread::spawn(move || { for i in 0..ROUND { tx_array_clone.send((i + ROUND) as i32).expect("send"); } })); } drop(tx_list); drop(tx_array); for th in th_send { let _ = th.join(); } let mut total_sum = 0; for th in th_recv { total_sum += th.join().unwrap(); } assert_eq!(total_sum, 4 * ROUND); } #[logfn] #[rstest] fn test_multiplex_basic(setup_log: ()) { let mut mp = Multiplex::>::new(); let tx1: MTx<_> = mp.bounded_tx(10); let tx2: MTx<_> = mp.bounded_tx(10); // Send values from different threads let h1 = thread::spawn(move || { tx1.send(1).unwrap(); }); let h2 = thread::spawn(move || { tx2.send(2).unwrap(); }); h1.join().unwrap(); h2.join().unwrap(); // Collect received values let mut received = Vec::new(); for _ in 0..2 { let val = mp.recv().unwrap(); received.push(val); } // Verify we received both values (order may vary due to round-robin selection) assert!(received.contains(&1)); assert!(received.contains(&2)); assert_eq!(received.len(), 2); } #[logfn] #[rstest] fn test_multiplex_timeout(setup_log: ()) { let mut mp = Multiplex::>::new(); let _tx: MTx<_> = mp.bounded_tx(10); let result = mp.recv_timeout(Duration::from_millis(10)); assert_eq!(result, Err(RecvTimeoutError::Timeout)); } #[logfn] #[rstest] fn test_multiplex_try_recv(setup_log: ()) { let mut mp = Multiplex::>::new(); let tx: MTx<_> = mp.bounded_tx(10); assert_eq!(mp.try_recv(), Err(TryRecvError::Empty)); tx.send(42).unwrap(); assert_eq!(mp.try_recv(), Ok(42)); assert_eq!(mp.try_recv(), Err(TryRecvError::Empty)); } #[logfn] #[rstest] fn test_multiplex_basic_array_blocking(setup_log: ()) { let mut mp = Multiplex::>::new(); let tx1: MTx<_> = mp.bounded_tx(10); let tx2: MTx<_> = mp.bounded_tx(10); let tx3: MTx<_> = mp.bounded_tx(10); let h1 = thread::spawn(move || { thread::sleep(Duration::from_millis(50)); tx1.send(10).expect("send"); }); let h2 = thread::spawn(move || { thread::sleep(Duration::from_millis(100)); tx2.send(20).expect("send"); }); let h3 = thread::spawn(move || { thread::sleep(Duration::from_millis(25)); tx3.send(30).expect("send"); }); let mut received_values = Vec::new(); for _ in 0..3 { received_values.push(mp.recv().unwrap()); } received_values.sort(); assert_eq!(received_values, vec![10, 20, 30]); h1.join().unwrap(); h2.join().unwrap(); h3.join().unwrap(); } #[logfn] #[rstest] fn test_multiplex_basic_list_blocking(setup_log: ()) { let mut mp = Multiplex::>::new(); let tx1: MTx>> = mp.new_tx(); let tx2: MTx>> = mp.new_tx(); let round = { #[cfg(miri)] { 99 } #[cfg(not(miri))] { 999 } }; let h1 = thread::spawn(move || { for i in 0..round { tx1.send(1000 + i).expect("send"); } }); let h2 = thread::spawn(move || { for i in 0..round { tx2.send(2000 + i).expect("send"); } }); let mut received_values = Vec::with_capacity(round * 2); for _ in 0..(2 * round) { if let Ok(item) = mp.recv() { received_values.push(item); } else { panic!("Unexpected early close, count={:?}", received_values.len()); } } received_values.sort(); for i in 0..received_values.len() { let item = received_values[i]; if item < 2000 { assert_eq!(item, 1000 + i); } else { assert_eq!(item, 2000 + i - round); } } h1.join().unwrap(); h2.join().unwrap(); } #[logfn] #[rstest] fn test_multiplex_sender_close(setup_log: ()) { let mut mp = Multiplex::>::new(); let tx1: MTx<_> = mp.bounded_tx(10); let tx2: MTx<_> = mp.bounded_tx(10); tx1.send(1).expect("send"); tx2.send(2).expect("send"); drop(tx1); drop(tx2); let mut received = 0; while let Ok(_) = mp.recv() { received += 1; } assert_eq!(received, 2); } #[logfn] #[rstest] #[case(1, 1)] #[case(5, 1)] #[case(5, 5)] fn test_multiplex_basic_drop_on_sender_blocked( setup_log: (), #[case] producers: usize, #[case] bound: usize, ) { macro_rules! run_test { ($flavor: path, $tx_t: tt)=>{{ let mut mp = Multiplex::<$flavor>::new(); println!("run_test {:?}", mp); let mut senders: Vec<$tx_t>> = Vec::new(); for _ in 0..producers { senders.push(mp.bounded_tx(bound)); } let results = Arc::new(AtomicUsize::new(0)); // To count how many senders returned disconnected // Fill the channel initially so the first sender blocks for tx in &senders { for i in 0..bound { tx.send(i).expect("send"); // Fill up the capacity } } let mut handles = Vec::new(); let barrier = Arc::new(Barrier::new(producers + 1)); // +1 for the main thread for tx in senders { let barrier_clone = barrier.clone(); let results_clone = results.clone(); handles.push(thread::spawn(move || { barrier_clone.wait(); // Wait for all senders to be ready to block let res = tx.send(100); if let Err(SendError(_)) = res { results_clone.fetch_add(1, Ordering::SeqCst); } })); } barrier.wait(); // Main thread waits for all sender threads to reach the barrier // Give a moment for threads to potentially block thread::sleep(Duration::from_millis(50)); // Drop the multiplexer, which should wake up all blocking senders drop(mp); for handle in handles { handle.join().unwrap(); } assert_eq!(results.load(Ordering::SeqCst), producers); println!(""); }}; } run_test!(spsc::Array, Tx); run_test!(mpsc::Array, MTx); run_test!(mpmc::Array, MTx); } #[logfn] #[rstest] #[case(1, 1)] #[case(1, 10)] #[case(20, 1)] #[case(10, 10)] #[case(5, 100)] fn test_pressure_multiplex_array(setup_log: (), #[case] producers: usize, #[case] bound: usize) { #[cfg(miri)] { if producers > 5 { println!("skip"); return; } } let mut mp = Multiplex::>::new(); let round = ROUND; let total_messages = round * producers; let mut handlers = Vec::new(); for _ in 0..producers { let tx: Tx<_> = mp.bounded_tx(bound); handlers.push(thread::spawn(move || { for i in 0..round { tx.send(i).expect("send"); } })); } let mut count = 0; while count < total_messages { match mp.recv() { Ok(_) => count += 1, Err(_) => break, } } for h in handlers { h.join().unwrap(); } assert_eq!(count, total_messages); } #[logfn] #[rstest] #[case(1, 1)] #[case(1, 10)] #[case(20, 1)] #[case(10, 10)] #[case(5, 20)] fn test_pressure_multiplex_array_mp(setup_log: (), #[case] producers: usize, #[case] bound: usize) { #[cfg(miri)] { if producers > 5 { println!("skip"); return; } } macro_rules! run_test { ($mp: expr) => { println!("run_test {:?}", $mp); let round = ROUND; let total_messages = round * producers * 4; let mut handlers = Vec::new(); for _ in 0..producers { let tx: MTx<_> = $mp.bounded_tx(bound); for _ in 0..4 { let _tx = tx.clone(); handlers.push(thread::spawn(move || { for i in 0..round { _tx.send(i).expect("send"); } })); } } let mut count = 0; while count < total_messages { match $mp.recv() { Ok(_) => count += 1, Err(_) => break, } } for h in handlers { h.join().unwrap(); } assert_eq!(count, total_messages); }; } let mut mp = Multiplex::>::new(); run_test!(mp); let mut mp = Multiplex::>::new(); run_test!(mp); } #[logfn] #[rstest] #[case(1)] #[case(5)] #[case(20)] fn test_pressure_multiplex_list(setup_log: (), #[case] producers: usize) { #[cfg(miri)] { if producers > 5 { println!("skip"); return; } } macro_rules! run_test { ($mp: expr, $tx_c: tt) => { println!("run_test {:?}", $mp); let round = ROUND; let total_messages = round * producers; let mut handlers = Vec::new(); for _ in 0..producers { let tx: $tx_c<_> = $mp.new_tx(); handlers.push(thread::spawn(move || { for i in 0..round { tx.send(i).expect("send"); } })); } let mut count = 0; while count < total_messages { match $mp.recv() { Ok(_) => count += 1, Err(_) => break, } } for h in handlers { h.join().unwrap(); } assert_eq!(count, total_messages); }; } let mut mp = Multiplex::>::new(); run_test!(mp, Tx); let mut mp = Multiplex::>::new(); run_test!(mp, MTx); let mut mp = Multiplex::>::new(); run_test!(mp, MTx); } #[logfn] #[rstest] fn test_multiplex_weighted_round_robin(setup_log: ()) { let mut mp = Multiplex::>::new(); // Channel 1 with weight 2 let tx1: MTx<_> = mp.bounded_tx_with_weight(10, 2); // Channel 2 with weight 2 let tx2: MTx<_> = mp.bounded_tx_with_weight(10, 2); // Send data for i in 0..6 { tx1.send(10 + i).unwrap(); // 10, 11, 12, 13, 14, 15 tx2.send(20 + i).unwrap(); // 20, 21, 22, 23, 24, 25 } let mut received = Vec::new(); for _ in 0..12 { received.push(mp.recv().unwrap()); } // Expected sequence: // tx1 (10), tx1 (11), tx1 (12) -> weight exhausted (actually weight+1 logic), switch to tx2 // tx2 (20), tx2 (21), tx2 (22) -> weight exhausted, switch to tx1 // tx1 (13), tx1 (14), tx1 (15) // tx2 (23), tx2 (24), tx2 (25) let expected = vec![10, 11, 20, 21, 12, 13, 22, 23, 14, 15, 24, 25]; println!("Received: {:?}", received); assert_eq!(received, expected); } #[logfn] #[rstest] fn test_multiplex_weighted_skip_empty(setup_log: ()) { // Test that if weight is not exhausted but channel is empty, we skips to next let mut mp = Multiplex::>::new(); let tx1: MTx<_> = mp.bounded_tx_with_weight(10, 5); // High weight let tx2: MTx<_> = mp.bounded_tx_with_weight(10, 2); tx1.send(1).unwrap(); tx2.send(2).unwrap(); tx2.send(3).unwrap(); // 1. Recv from tx1 (left=4). Empty now. assert_eq!(mp.recv().unwrap(), 1); // 2. Recv. tx1 is empty (but left=4). Logic should skip tx1 and go to tx2. // tx2 has item (2). Return 2. assert_eq!(mp.recv().unwrap(), 2); // 3. Recv. tx2 is current (left=1). assert_eq!(mp.recv().unwrap(), 3); } ================================================ FILE: test-suite/src/test_type_switch.rs ================================================ use crate::*; use captains_log::{logfn, *}; use crossfire::flavor::Flavor; use crossfire::*; use rstest::*; use std::time::Duration; #[fixture] fn setup_log() { let _ = recipe::env_logger("LOG_FILE", "LOG_LEVEL").build().expect("log setup"); } // Macro to wrap tests with a 5-second timeout macro_rules! runtime_block_on_with_timeout { ($async_block:expr) => {{ runtime_block_on!(async move { timeout(Duration::from_secs(5), $async_block) .await .expect("Test timed out after 5 seconds") }) }}; } // Test async-to-blocking receiver switching for bounded channels with messages in buffer #[logfn] #[rstest] #[case(spsc::bounded_async(5))] // Small buffer to create backpressure #[case(mpsc::bounded_async(5))] fn test_bounded_async_with_sync_receiver_switch_buffered< F: Flavor + 'static, T: AsyncTxTrait, >( setup_log: (), #[case] channel: (T, AsyncRx), ) { let (tx, rx) = channel; let total_messages = 20; // More messages than buffer capacity let async_consumed = 4; // Leave messages in buffer during switch runtime_block_on_with_timeout!(async move { // Spawn async sender task - will block when buffer fills let sender_task = async_spawn!(async move { for i in 0..total_messages { trace!("Async sender sending message {}", i); tx.send(i).await.expect("Failed to send message"); } trace!("Async sender completed all {} messages", total_messages); }); // Consume some messages with async receiver (in async task) let receiver_task = async_spawn!(async move { let mut async_received = Vec::new(); for _ in 0..async_consumed { match rx.recv().await { Ok(value) => { trace!("Async receiver got message: {}", value); async_received.push(value); } Err(e) => { panic!("Failed to receive message: {:?}", e); } } } trace!("Async receiver consumed {} messages", async_received.len()); (rx, async_received) }); // Get the receiver back after partial consumption let (rx, async_received) = async_join_result!(receiver_task); // CRITICAL: Convert to blocking receiver while messages are still in buffer AND sender is waiting let blocking_rx: Rx = rx.into(); // Continue receiving with blocking receiver in a thread let remaining_messages = total_messages - async_consumed; let sync_th = std::thread::spawn(move || { let mut sync_received = Vec::new(); while let Ok(value) = blocking_rx.recv() { trace!("Sync receiver got message: {}", value); sync_received.push(value); } trace!("Sync receiver consumed {} messages from buffer", sync_received.len()); sync_received }); // Wait for sender to complete let _ = sender_task.await; let sync_received = sync_th.join().expect("Sync receiver thread panicked"); // Verify all messages were received assert_eq!(async_received.len(), async_consumed); assert_eq!(sync_received.len(), remaining_messages); let mut all_received = async_received; all_received.extend(sync_received); assert_eq!(all_received.len(), total_messages); for i in 0..total_messages { assert!(all_received.contains(&i), "Missing value: {}", i); } trace!( "Successfully switched bounded channel from async to sync receiver with backpressure" ); }); } // Test async-to-blocking receiver switching for MPMC bounded channels with messages in buffer #[logfn] #[rstest] #[case(mpmc::bounded_async(5))] // Small buffer to create backpressure fn test_mpmc_bounded_async_with_sync_receiver_switch_buffered + 'static>( setup_log: (), #[case] channel: (MAsyncTx, MAsyncRx), ) { let (tx, rx) = channel; let total_messages = 20; // More messages than buffer capacity let async_consumed = 4; // Consume some messages before switching runtime_block_on_with_timeout!(async move { // Send all messages first to fill buffer (async sender in task) let sender_task = async_spawn!(async move { for i in 0..total_messages { tx.send(i).await.expect("Failed to send message"); trace!("Async sender sent message: {}", i); } trace!("Async sender completed all {} messages", total_messages); }); // Consume some messages with async receiver (in async task) let receiver_task = async_spawn!(async move { let mut async_received = Vec::new(); for _ in 0..async_consumed { match rx.recv().await { Ok(value) => { trace!("Async receiver got message: {}", value); async_received.push(value); } Err(e) => { panic!("Failed to receive message with async receiver: {:?}", e); } } } trace!("Async receiver consumed {} messages", async_received.len()); (rx, async_received) }); // Get the receiver back after partial consumption let (rx, async_received) = async_join_result!(receiver_task); // Convert to blocking receiver while messages are still in buffer let sync_rx: MRx = rx.into(); // Consume remaining messages with blocking receiver in thread let remaining_messages = total_messages - async_consumed; let sync_th = std::thread::spawn(move || { let mut sync_received = Vec::new(); while let Ok(value) = sync_rx.recv() { trace!("Sync receiver got message: {}", value); sync_received.push(value); } trace!("Sync receiver consumed {} messages from buffer", sync_received.len()); sync_received }); // Wait for sender to complete let _ = sender_task.await; let sync_received = sync_th.join().expect("Sync receiver thread panicked"); // Verify all messages were received assert_eq!(async_received.len(), async_consumed); assert_eq!(sync_received.len(), remaining_messages); let mut all_received = async_received; all_received.extend(sync_received); assert_eq!(all_received.len(), total_messages); for i in 0..total_messages { assert!(all_received.contains(&i), "Missing value: {}", i); } trace!( "Successfully switched MPMC bounded channel from async to sync receiver with backpressure" ); }); } // Test blocking-to-async sender switching for bounded channels #[logfn] #[rstest] #[case(spsc::bounded_blocking(5))] // Small buffer for backpressure fn test_spsc_bounded_blocking_with_async_sender_switch + 'static>( setup_log: (), #[case] channel: (Tx, Rx), ) { let (tx, rx) = channel; let total_messages = 20; let sync_sent = 4; // Fill buffer, then switch while sender would block runtime_block_on_with_timeout!(async move { // Start blocking receiver in a thread let receiver_handle = std::thread::spawn(move || { let mut all_received = Vec::new(); while let Ok(value) = rx.recv() { trace!("Blocking receiver got message: {}", value); all_received.push(value); } trace!("Blocking receiver completed"); all_received }); // Send messages with blocking sender in a thread (will block when buffer fills) let sender_handle = std::thread::spawn(move || { for i in 0..sync_sent { trace!("Blocking sender sending message {}", i); tx.send(i).expect("Failed to send message"); } trace!("Blocking sender sent {} messages", sync_sent); tx }); // Get the sender back and convert to async let tx = sender_handle.join().expect("Sender thread panicked"); // CRITICAL: Convert to async sender while buffer has backpressure let async_tx: AsyncTx = tx.into(); // Send remaining messages with async sender in task let remaining_messages = total_messages - sync_sent; let async_sender_task = async_spawn!(async move { for i in sync_sent..total_messages { trace!("Async sender sending message {}", i); async_tx.send(i).await.expect("Failed to send message"); } trace!("Async sender sent {} more messages", remaining_messages); }); // Wait for async sender to complete let _ = async_sender_task.await; // Get final results let all_received = receiver_handle.join().expect("Final receiver thread panicked"); // Verify all messages were received assert_eq!(all_received.len(), total_messages); for i in 0..total_messages { assert!(all_received.contains(&i), "Missing value: {}", i); } trace!("Successfully switched from blocking to async sender with backpressure"); }); } // Test blocking-to-async sender switching for multi-producer bounded channels #[logfn] #[rstest] #[case(mpsc::bounded_blocking(5))] // Buffer < 12 total messages fn test_mpsc_bounded_blocking_with_async_sender_switch + 'static>( setup_log: (), #[case] channel: (MTx, Rx), ) { let (tx, rx) = channel; let total_messages = 20; let sync_sent = 4; // Fill buffer before switching runtime_block_on_with_timeout!(async move { // Send messages with blocking multi-sender in a thread let sender_handle = std::thread::spawn(move || { for i in 0..sync_sent { tx.send(i).expect("Failed to send message"); } trace!("Blocking MTx sent {} messages, buffer has messages", sync_sent); tx }); // Get the sender back let tx = sender_handle.join().expect("Sender thread panicked"); // CRITICAL: Convert to async multi-sender while messages are in buffer let async_tx: MAsyncTx = tx.into(); // Send remaining messages with async multi-sender in a task let async_sender_task = async_spawn!(async move { let remaining_messages = total_messages - sync_sent; for i in sync_sent..total_messages { async_tx.send(i).await.expect("Failed to send message"); } trace!("Async MAsyncTx sent {} more messages", remaining_messages); }); // Receive all messages with blocking receiver in a thread let receiver_handle = std::thread::spawn(move || { let mut all_received = Vec::new(); while let Ok(value) = rx.recv() { all_received.push(value); } all_received }); // Wait for async sender to complete let _ = async_sender_task.await; let all_received = receiver_handle.join().expect("Receiver thread panicked"); // Verify all messages were received assert_eq!(all_received.len(), total_messages); for i in 0..total_messages { assert!(all_received.contains(&i), "Missing value: {}", i); } trace!("Successfully switched from blocking MTx to async MAsyncTx with buffered messages"); }); } // Test blocking-to-async sender switching for MPMC bounded channels #[logfn] #[rstest] #[case(mpmc::bounded_blocking(5))] // Buffer < 12 total messages fn test_mpmc_bounded_blocking_with_async_sender_switch + 'static>( setup_log: (), #[case] channel: (MTx, MRx), ) { let (tx, rx) = channel; let total_messages = 20; let sync_sent = 4; // Fill buffer before switching runtime_block_on_with_timeout!(async move { // Send messages with blocking multi-sender in a thread let sender_handle = std::thread::spawn(move || { for i in 0..sync_sent { tx.send(i).expect("Failed to send message"); } trace!("Blocking MTx sent {} messages, buffer has messages", sync_sent); tx }); // Get the sender back let tx = sender_handle.join().expect("Sender thread panicked"); // CRITICAL: Convert to async multi-sender while messages are in buffer let async_tx: MAsyncTx = tx.into(); // Send remaining messages with async multi-sender in a task let async_sender_task = async_spawn!(async move { let remaining_messages = total_messages - sync_sent; for i in sync_sent..total_messages { async_tx.send(i).await.expect("Failed to send message"); } trace!("Async MAsyncTx sent {} more messages", remaining_messages); }); // Receive all messages with blocking receiver in a thread let receiver_handle = std::thread::spawn(move || { let mut all_received = Vec::new(); while let Ok(value) = rx.recv() { all_received.push(value); } all_received }); // Wait for async sender to complete let _ = async_sender_task.await; let all_received = receiver_handle.join().expect("Receiver thread panicked"); // Verify all messages were received assert_eq!(all_received.len(), total_messages); for i in 0..total_messages { assert!(all_received.contains(&i), "Missing value: {}", i); } trace!("Successfully switched from blocking MTx to async MAsyncTx with buffered messages for MPMC"); }); } // Test blocking-to-async receiver switching for bounded channels #[logfn] #[rstest] #[case(spsc::bounded_blocking(5))] // Buffer < 12 total messages fn test_spsc_bounded_blocking_with_async_receiver_switch + 'static>( setup_log: (), #[case] channel: (Tx, Rx), ) { let (tx, rx) = channel; let total_messages = 20; let sync_consumed = 4; // Leave most messages in buffer runtime_block_on_with_timeout!(async move { // Send all messages in a thread (sync sender) let sender_handle = std::thread::spawn(move || { for i in 0..total_messages { tx.send(i).expect("Failed to send message"); } trace!("Sent {} messages to buffer", total_messages); }); // Start receiver in a thread to consume some messages let receiver_handle = std::thread::spawn(move || { let mut sync_received = Vec::new(); for _ in 0..sync_consumed { sync_received.push(rx.recv().expect("Failed to receive message")); } trace!( "Blocking receiver consumed {} messages, {} remain in buffer", sync_received.len(), total_messages - sync_consumed ); (rx, sync_received) }); // Join receiver first, then sender let (rx, sync_received) = receiver_handle.join().expect("Receiver thread panicked"); // CRITICAL: Convert to async receiver while messages are still in buffer let async_rx: AsyncRx = rx.into(); // Consume remaining messages with async receiver in a task let async_receiver_task = async_spawn!(async move { let mut async_received = Vec::new(); while let Ok(value) = async_rx.recv().await { async_received.push(value); } trace!("Async receiver consumed {} messages from buffer", async_received.len()); async_received }); let async_received = async_join_result!(async_receiver_task); sender_handle.join().expect("Sender thread panicked"); // Verify all messages were received let remaining_messages = total_messages - sync_consumed; assert_eq!(sync_received.len(), sync_consumed); assert_eq!(async_received.len(), remaining_messages); let mut all_received = sync_received; all_received.extend(async_received); assert_eq!(all_received.len(), total_messages); for i in 0..total_messages { assert!(all_received.contains(&i), "Missing value: {}", i); } trace!("Successfully switched from blocking to async receiver with buffered messages"); }); } // Test blocking-to-async receiver switching for multi-producer bounded channels #[logfn] #[rstest] #[case(mpsc::bounded_blocking(5))] // Buffer < 12 total messages fn test_mpsc_bounded_blocking_with_async_receiver_switch + 'static>( setup_log: (), #[case] channel: (MTx, Rx), ) { let (tx, rx) = channel; let total_messages = 20; let sync_consumed = 4; // Leave most messages in buffer runtime_block_on_with_timeout!(async move { // Start sender in a thread (sync sender) let sender_handle = std::thread::spawn(move || { for i in 0..total_messages { tx.send(i).expect("Failed to send message"); } trace!("Sent {} messages to buffer", total_messages); }); // Start receiver in a thread to consume some messages let receiver_handle = std::thread::spawn(move || { let mut sync_received = Vec::new(); for _ in 0..sync_consumed { sync_received.push(rx.recv().expect("Failed to receive message")); } trace!( "Blocking receiver consumed {} messages, {} remain in buffer", sync_received.len(), total_messages - sync_consumed ); (rx, sync_received) }); // Join receiver first, then sender let (rx, sync_received) = receiver_handle.join().expect("Receiver thread panicked"); // CRITICAL: Convert to async receiver while messages are still in buffer let async_rx: AsyncRx = rx.into(); // Consume remaining messages with async receiver in a task let async_receiver_task = async_spawn!(async move { let mut async_received = Vec::new(); while let Ok(value) = async_rx.recv().await { async_received.push(value); } trace!("Async receiver consumed {} messages from buffer", async_received.len()); async_received }); let async_received = async_join_result!(async_receiver_task); sender_handle.join().expect("Sender thread panicked"); // Verify all messages were received let remaining_messages = total_messages - sync_consumed; assert_eq!(sync_received.len(), sync_consumed); assert_eq!(async_received.len(), remaining_messages); let mut all_received = sync_received; all_received.extend(async_received); assert_eq!(all_received.len(), total_messages); for i in 0..total_messages { assert!(all_received.contains(&i), "Missing value: {}", i); } trace!("Successfully switched from blocking to async receiver with buffered messages"); }); } // Test blocking-to-async receiver switching for MPMC bounded channels #[logfn] #[rstest] #[case(mpmc::bounded_blocking(5))] // Buffer < 12 total messages fn test_mpmc_bounded_blocking_with_async_receiver_switch + 'static>( setup_log: (), #[case] channel: (MTx, MRx), ) { let (tx, rx) = channel; let total_messages = 20; let sync_consumed = 4; // Leave most messages in buffer runtime_block_on_with_timeout!(async move { // Send all messages in a thread (sync sender) let sender_handle = std::thread::spawn(move || { for i in 0..total_messages { tx.send(i).expect("Failed to send message"); } trace!("Sent {} messages to buffer", total_messages); }); // Start receiver in a thread to consume some messages let receiver_handle = std::thread::spawn(move || { let mut sync_received = Vec::new(); for _ in 0..sync_consumed { sync_received.push(rx.recv().expect("Failed to receive message")); } trace!( "Blocking receiver consumed {} messages, {} remain in buffer", sync_received.len(), total_messages - sync_consumed ); (rx, sync_received) }); // Join receiver first, then sender let (rx, sync_received) = receiver_handle.join().expect("Receiver thread panicked"); // CRITICAL: Convert to async receiver while messages are still in buffer let async_rx: MAsyncRx = rx.into(); // Consume remaining messages with async receiver in a task let async_receiver_task = async_spawn!(async move { let mut async_received = Vec::new(); while let Ok(value) = async_rx.recv().await { async_received.push(value); } trace!("Async receiver consumed {} remaining messages", async_received.len()); async_received }); let async_received = async_join_result!(async_receiver_task); sender_handle.join().expect("Sender thread panicked"); // Verify all messages were received let mut all_received = sync_received; all_received.extend(async_received); assert_eq!(all_received.len(), total_messages); for i in 0..total_messages { assert!(all_received.contains(&i), "Missing value: {}", i); } trace!("Successfully switched MPMC from blocking to async receiver with buffered messages"); }); } // Test multi-producer sender switching (MTx to MAsyncTx) #[logfn] #[rstest] #[case(mpsc::bounded_blocking(5))] // Buffer < 20 total messages #[case(mpmc::bounded_blocking(5))] fn test_multi_producer_sender_switch< F: Flavor + 'static, R: BlockingRxTrait, >( setup_log: (), #[case] channel: (MTx, R), ) { let (tx, rx) = channel; let total_messages = 20; let sync_sent = 4; // Fill most of buffer before switching runtime_block_on_with_timeout!(async move { // Start receiver first to consume messages as they arrive let receiver_handle = std::thread::spawn(move || { let mut all_received = Vec::new(); while let Ok(value) = rx.recv() { all_received.push(value); } all_received }); // Send messages with blocking multi-sender in a thread let sender_handle = std::thread::spawn(move || { for i in 0..sync_sent { tx.send(i).expect("Failed to send message"); } trace!("Blocking MTx sent {} messages, buffer has messages", sync_sent); tx }); // Get the sender back and convert to async let tx = sender_handle.join().expect("Sender thread panicked"); let async_tx: MAsyncTx = tx.into(); // Send remaining messages with async multi-sender in a task let async_sender_task = async_spawn!(async move { for i in sync_sent..total_messages { async_tx.send(i).await.expect("Failed to send message"); } trace!("Async MAsyncTx sent {} more messages", total_messages - sync_sent); }); // Wait for async sender to complete, then join receiver let _ = async_sender_task.await; let all_received = receiver_handle.join().expect("Receiver thread panicked"); // Verify all messages were received assert_eq!(all_received.len(), total_messages); for i in 0..total_messages { assert!(all_received.contains(&i), "Missing value: {}", i); } trace!("Successfully switched from MTx to MAsyncTx with buffered messages"); }); } // Test async-to-blocking sender switching for SPSC bounded channels #[logfn] #[rstest] #[case(spsc::bounded_async(5))] // Small buffer for backpressure fn test_spsc_bounded_async_with_blocking_sender_switch + 'static>( setup_log: (), #[case] channel: (AsyncTx, AsyncRx), ) { let (tx, rx) = channel; let total_messages = 10; let async_sent = 4; // Fill buffer before switching runtime_block_on_with_timeout!(async move { // Send messages with async sender in a task let sender_task = async_spawn!(async move { for i in 0..async_sent { tx.send(i).await.expect("Failed to send message"); } trace!("Async sender sent {} messages, buffer has messages", async_sent); tx }); // Get the sender back and convert to blocking let tx = async_join_result!(sender_task); let blocking_tx: Tx = tx.into(); // Send remaining messages with blocking sender in thread let blocking_sender_handle = std::thread::spawn(move || { let remaining_messages = total_messages - async_sent; for i in async_sent..total_messages { blocking_tx.send(i).expect("Failed to send message"); } trace!("Blocking sender sent {} more messages", remaining_messages); }); // Receive all messages with async receiver in a task let receiver_task = async_spawn!(async move { let mut all_received = Vec::new(); while let Ok(value) = rx.recv().await { all_received.push(value); } all_received }); // Wait for both sender and receiver to complete let all_received = async_join_result!(receiver_task); blocking_sender_handle.join().expect("Blocking sender thread panicked"); // Verify all messages were received assert_eq!(all_received.len(), total_messages); for i in 0..total_messages { assert!(all_received.contains(&i), "Missing value: {}", i); } trace!("Successfully switched SPSC from async to blocking sender with buffered messages"); }); } // Test async-to-blocking sender switching for MPSC bounded channels #[logfn] #[rstest] #[case(mpsc::bounded_async(5))] // Small buffer for backpressure fn test_mpsc_bounded_async_with_blocking_sender_switch + 'static>( setup_log: (), #[case] channel: (MAsyncTx, AsyncRx), ) { let (tx, rx) = channel; let total_messages = 10; let async_sent = 4; // Fill buffer before switching runtime_block_on_with_timeout!(async move { // Send messages with async multi-sender in a task let sender_task = async_spawn!(async move { for i in 0..async_sent { tx.send(i).await.expect("Failed to send message"); } trace!("Async MAsyncTx sent {} messages, buffer has messages", async_sent); tx }); // Get the sender back and convert to blocking let tx = async_join_result!(sender_task); let blocking_tx: MTx = tx.into(); // Send remaining messages with blocking multi-sender in thread let blocking_sender_handle = std::thread::spawn(move || { let remaining_messages = total_messages - async_sent; for i in async_sent..total_messages { blocking_tx.send(i).expect("Failed to send message"); } trace!("Blocking MTx sent {} more messages", remaining_messages); }); // Receive all messages with async receiver in a task let receiver_task = async_spawn!(async move { let mut all_received = Vec::new(); while let Ok(value) = rx.recv().await { all_received.push(value); } all_received }); // Wait for both sender and receiver to complete let all_received = async_join_result!(receiver_task); blocking_sender_handle.join().expect("Blocking sender thread panicked"); // Verify all messages were received assert_eq!(all_received.len(), total_messages); for i in 0..total_messages { assert!(all_received.contains(&i), "Missing value: {}", i); } trace!("Successfully switched MPSC from async to blocking sender with buffered messages"); }); } // Test async-to-blocking sender switching for MPMC bounded channels #[logfn] #[rstest] #[case(mpmc::bounded_async(5))] // Small buffer for backpressure fn test_mpmc_bounded_async_with_blocking_sender_switch + 'static>( setup_log: (), #[case] channel: (MAsyncTx, MAsyncRx), ) { let (tx, rx) = channel; let total_messages = 10; let async_sent = 4; // Fill buffer before switching runtime_block_on_with_timeout!(async move { // Send messages with async multi-sender in a task let sender_task = async_spawn!(async move { for i in 0..async_sent { tx.send(i).await.expect("Failed to send message"); } trace!("Async MAsyncTx sent {} messages, buffer has messages", async_sent); tx }); // Get the sender back and convert to blocking let tx = async_join_result!(sender_task); let blocking_tx: MTx = tx.into(); // Send remaining messages with blocking multi-sender in thread let blocking_sender_handle = std::thread::spawn(move || { let remaining_messages = total_messages - async_sent; for i in async_sent..total_messages { blocking_tx.send(i).expect("Failed to send message"); } trace!("Blocking MTx sent {} more messages", remaining_messages); }); // Receive all messages with async multi-receiver in a task let receiver_task = async_spawn!(async move { let mut all_received = Vec::new(); while let Ok(value) = rx.recv().await { all_received.push(value); } all_received }); // Wait for both sender and receiver to complete let all_received = async_join_result!(receiver_task); blocking_sender_handle.join().expect("Blocking sender thread panicked"); // Verify all messages were received assert_eq!(all_received.len(), total_messages); for i in 0..total_messages { assert!(all_received.contains(&i), "Missing value: {}", i); } trace!("Successfully switched MPMC from async to blocking sender with buffered messages"); }); } ================================================ FILE: test-suite/src/test_waitgroup.rs ================================================ use crate::*; use crossfire::waitgroup::{WaitGroup, WaitGroupInline}; use crossfire::*; use fastrand; use rstest::*; use std::sync::Arc; use std::time::Duration; #[fixture] fn setup_log() { _setup_log(); // Seed fastrand for more deterministic testing. fastrand::seed( std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap().as_secs(), ); } #[logfn] #[rstest] fn test_basic_wg_try_wait(setup_log: ()) { let mut wg = WaitGroup::new((), 0); assert_eq!(wg.get_left(), 0); wg.wait(); // should return immediately assert_eq!(wg.try_wait(), Ok(())); // change threshold wg.set_threshold(1); assert_eq!(wg.try_wait(), Ok(())); let guard1 = wg.add_guard(); assert_eq!(wg.try_wait(), Ok(())); let guard2 = wg.add_guard(); assert_eq!(wg.try_wait(), Err(())); drop(guard2); assert_eq!(wg.try_wait(), Ok(())); // change threshold wg.set_threshold(0); assert_eq!(wg.try_wait(), Err(())); drop(guard1); assert_eq!(wg.try_wait(), Ok(())); assert_eq!(wg.try_wait(), Ok(())); } #[logfn] #[rstest] fn test_waitgroup_with_state(setup_log: ()) { use std::sync::atomic::{AtomicBool, Ordering}; let wg = WaitGroup::new(AtomicBool::new(true), 0); for i in 0..10 { let guard = wg.add_guard(); std::thread::spawn(move || { if i == 5 { guard.store(false, Ordering::SeqCst); } drop(guard); }); } wg.wait(); assert_eq!(wg.load(Ordering::SeqCst), false); } #[logfn] #[rstest] fn test_basic_wg_timeout_blocking(setup_log: ()) { // Test timeout case let wg = WaitGroup::new((), 0); let _guard = wg.add_guard(); assert_eq!(wg.wait_timeout(Duration::from_millis(100)), Err(())); let _wg = WaitGroup::new((), 0); let _guard_parent = _wg.add_guard(); // Test drop while guard not finish let th = std::thread::spawn(move || { _wg.wait(); std::thread::sleep(Duration::from_secs(1)); drop(_guard); }); assert!(wg.wait_timeout(Duration::from_millis(10)).is_err()); drop(_guard_parent); if wg.get_left() > 0 { println!("drop early"); drop(wg); } th.join().expect("join"); } #[logfn] #[rstest] fn test_basic_no_wait_async(setup_log: ()) { runtime_block_on!(async move { let wg = WaitGroup::new((), 0); assert_eq!(wg.get_left(), 0); wg.wait_async().await; // should return immediately assert_eq!(wg.try_wait(), Ok(())); }); } #[cfg(feature = "time")] #[logfn] #[rstest] fn test_basic_wg_one_guard_async(setup_log: ()) { runtime_block_on!(async move { let wg = WaitGroup::new((), 0); let guard = wg.add_guard(); assert_eq!(wg.get_left(), 1); assert_eq!(wg.try_wait(), Err(())); let _ = async_spawn!(async move { sleep(Duration::from_millis(100)).await; drop(guard); }); wg.wait_async().await; assert_eq!(wg.get_left_seqcst(), 0); }); } #[cfg(feature = "time")] #[logfn] #[rstest] fn test_basic_wg_multi_guards_async(setup_log: ()) { const NUM_GUARDS: usize = 10; runtime_block_on!(async move { let mut wg = WaitGroup::new((), 3); let mut guards = Vec::new(); for _ in 0..NUM_GUARDS { guards.push(wg.add_guard()); } assert_eq!(wg.get_left(), NUM_GUARDS); // test clone of the WaitGroupGuard let guards1 = guards.clone(); assert_eq!(wg.get_left(), NUM_GUARDS * 2); let guards2 = guards; let _ = async_spawn!(async move { sleep(Duration::from_millis(10)).await; drop(guards1); }); let _ = async_spawn!(async move { sleep(Duration::from_millis(10)).await; drop(guards2); }); wg.wait_async().await; assert!(wg.get_left() <= 3); // change threshold wg.set_threshold(0); wg.wait_async().await; assert_eq!(wg.get_left(), 0); }); } #[cfg(feature = "time")] #[logfn] #[rstest] fn test_basic_wg_timeout_async(setup_log: ()) { runtime_block_on!(async move { let wg = WaitGroup::new((), 0); let guard = wg.add_guard(); let th = async_spawn!(async move { sleep(Duration::from_millis(50)).await; drop(guard); }); assert_eq!(wg.wait_async_with_timer(sleep(Duration::from_secs(1))).await, Ok(())); async_join_result!(th); #[cfg(feature = "tokio")] { let wg_child = WaitGroup::new((), 0); let guard_parent = wg_child.add_guard(); let guard = wg.add_guard(); let th = async_spawn!(async move { wg_child.wait_async().await; sleep(Duration::from_secs(1)).await; drop(guard); log::info!("drop guard"); }); assert!(tokio::time::timeout(Duration::from_millis(10), wg.wait_async()) .await .is_err()); drop(wg); log::info!("drop wg"); drop(guard_parent); async_join_result!(th); } }); } #[logfn] #[rstest] #[cfg_attr(miri, ignore)] fn test_pressure_wg_blocking_spawn_sleep(setup_log: ()) { let wg = WaitGroup::new((), 0); let mut loop_cnt = 0; for _ in 0..50 { let num_guards = fastrand::u32(1..=10); // Generate between 1 and 10 guards loop_cnt += 1; info!("loop_cnt={} threads={}", loop_cnt, num_guards); let mut guards = Vec::new(); for _ in 0..num_guards { guards.push(wg.add_guard()); } let mut handles = Vec::new(); for (i, guard) in guards.into_iter().enumerate() { handles.push(spawn_named_thread(&format!("worker-{}", i), move || { let millis = fastrand::u64(0..=10); // Sleep for 0 to 10 milliseconds std::thread::sleep(Duration::from_millis(millis)); drop(guard); })); } wg.wait(); assert_eq!(wg.get_left_seqcst(), 0); for handle in handles { handle.join().unwrap(); } } } #[logfn] #[rstest] #[case(0, 5)] #[case(2, 8)] #[case(3, 20)] #[case(10, 50)] fn test_pressure_wg_async_channel( setup_log: (), #[case] threshold: usize, #[case] num_tasks: usize, ) { #[cfg(miri)] { if num_tasks > 10 { println!("skip"); return; } } runtime_block_on!(async move { let (tx, rx) = mpmc::unbounded_async(); let mut wg = WaitGroup::new((), threshold); let mut total_received = 0; // Spawn consumer tasks let mut th_s = Vec::new(); for _ in 0..num_tasks { let _rx = rx.clone(); let th = async_spawn!(async move { let mut count = 0; while let Ok(guard) = _rx.recv().await { count += 1; drop(guard); } count }); th_s.push(th); } drop(rx); for i in 0..ROUND { wg.wait_async().await; assert!(wg.get_left() <= threshold); log::trace!("send {i}"); // Publish next batch. for _ in 0..num_tasks { let guard = wg.add_guard(); tx.send(guard).expect("send"); } } drop(tx); log::info!("change threshold"); wg.set_threshold(0); wg.wait_async().await; assert_eq!(wg.get_left(), 0); for th in th_s { total_received += async_join_result!(th); } assert_eq!(num_tasks * ROUND, total_received); }); } #[cfg(feature = "time")] #[logfn] #[rstest] #[case(0, 5)] #[case(2, 4)] #[case(3, 20)] #[case(10, 50)] fn test_pressure_wg_async_channel_sleep( setup_log: (), #[case] threshold: usize, #[case] num_tasks: usize, ) { let rounds: usize = { #[cfg(miri)] { if num_tasks > 5 { println!("skip"); return; } 10 } #[cfg(not(miri))] 100 }; runtime_block_on!(async move { let (tx, rx) = mpmc::unbounded_async(); let mut wg = WaitGroup::new((), threshold); let mut total_received = 0; // Spawn consumer tasks let mut th_s = Vec::new(); for _ in 0..num_tasks { let _rx = rx.clone(); let th = async_spawn!(async move { let mut count = 0; while let Ok(guard) = _rx.recv().await { count += 1; // Simulate work sleep(Duration::from_millis(fastrand::u64(1..=5))).await; drop(guard); } count }); th_s.push(th); } drop(rx); for i in 0..rounds { wg.wait_async().await; assert!(wg.get_left() <= threshold); log::trace!("send {i}"); // Publish next batch. for _ in 0..num_tasks { let guard = wg.add_guard(); tx.send(guard).expect("send"); } } drop(tx); log::info!("change threshold"); wg.set_threshold(0); wg.wait_async().await; assert_eq!(wg.get_left(), 0); for th in th_s { total_received += async_join_result!(th); } assert_eq!(num_tasks * rounds, total_received); }); } #[logfn] #[rstest] #[case(0, 5)] #[case(2, 8)] #[case(3, 20)] #[case(4, 10)] fn test_pressure_wg_blocking_channel( setup_log: (), #[case] threshold: usize, #[case] num_threads: usize, ) { #[cfg(miri)] { if num_threads > 10 { println!("skip"); return; } } runtime_block_on!(async move { let (tx, rx) = mpmc::unbounded_blocking(); let mut wg = WaitGroup::new((), threshold); let mut total_received = 0; // Spawn consumer tasks let mut th_s = Vec::new(); for _ in 0..num_threads { let _rx = rx.clone(); let th = std::thread::spawn(move || { let mut count = 0; while let Ok(guard) = _rx.recv() { count += 1; drop(guard); } count }); th_s.push(th); } drop(rx); for i in 0..ROUND { wg.wait(); assert!(wg.get_left() <= threshold); log::trace!("send {i}"); // Publish next batch. for _ in 0..num_threads { let guard = wg.add_guard(); tx.send(guard).expect("send"); } } drop(tx); log::info!("change threshold"); wg.set_threshold(0); wg.wait(); assert_eq!(wg.get_left(), 0); for th in th_s { total_received += th.join().unwrap(); } assert_eq!(num_threads * ROUND, total_received); }); } #[logfn] #[rstest] fn test_waitgroup_inline(setup_log: ()) { let wg = Arc::new(WaitGroupInline::<0>::new()); assert_eq!(wg.get_left_seqcst(), 0); wg.add_many(1); assert!(wg.try_wait().is_err()); let _wg = wg.clone(); let th = std::thread::spawn(move || { std::thread::sleep(Duration::from_secs(1)); unsafe { _wg.done_many(1) }; }); unsafe { wg.wait() }; th.join().expect("join"); assert_eq!(wg.get_left_seqcst(), 0); runtime_block_on!(async move { let _wg = wg.clone(); wg.add(); let th = async_spawn!(async move { #[cfg(feature = "time")] { sleep(Duration::from_secs(1)).await; } unsafe { _wg.done() }; }); unsafe { wg.wait_async().await }; async_join_result!(th); assert_eq!(wg.get_left_seqcst(), 0); }); } #[test] #[should_panic] fn test_waitgroup_inline_underflow() { recipe::console_logger(ConsoleTarget::Stdout, Level::Trace).test().build().expect("log"); let wg = WaitGroupInline::<0>::new(); unsafe { wg.done() }; }