[
  {
    "path": ".github/workflows/cron_2.0_arm.yml",
    "content": "name: cron-2.0-arm\n\non:\n  schedule: [cron: \"30 */5 * * *\"]\n  workflow_dispatch:\n\nenv:\n  CARGO_TERM_COLOR: always\n\njobs:\n  build_and_test:\n\n    runs-on: \"macos-15\"\n\n    steps:\n    - uses: actions/checkout@v2\n      with:\n          ref: v2.0\n\n    - name: Run tests with tokio multi-thread\n      run: env WORKFLOW=1 make test\n\n    - name: Run tests with --release\n      run: env WORKFLOW=1 make test_release\n\n    - name: Run tests with --release tokio single-thread\n      run: env WORKFLOW=1 SINGLE_THREAD_RUNTIME=1 make test_release\n"
  },
  {
    "path": ".github/workflows/cron_2.0_x86.yml",
    "content": "name: cron-2.0-x86\n\non:\n  schedule: [cron: \"30 */5 * * *\"]\n  workflow_dispatch:\n\nenv:\n  CARGO_TERM_COLOR: always\n\njobs:\n  build_and_test:\n\n    runs-on: ubuntu-latest\n\n    steps:\n    - uses: actions/checkout@v2\n      with:\n          ref: v2.0\n\n    - name: Run tests with tokio multi-thread\n      run: env WORKFLOW=1 make test\n\n    - name: Run tests with --release\n      run: env WORKFLOW=1 make test_release\n\n    - name: Run tests with --release tokio single-thread\n      run: env WORKFLOW=1 SINGLE_THREAD_RUNTIME=1 make test_release\n"
  },
  {
    "path": ".github/workflows/cron_dev.yml",
    "content": "name: cron-dev\n\non:\n  schedule: [cron: \"0 */6 * * *\"]\n  workflow_dispatch:\n\nenv:\n  CARGO_TERM_COLOR: always\n\njobs:\n  build_and_test:\n\n    runs-on: ubuntu-latest\n\n    steps:\n    - uses: actions/checkout@v2\n      with:\n          ref: dev\n    - name: Build\n      run: cargo build --verbose\n\n    - name: Run tests with tokio multi-thread\n      run: env WORKFLOW=1 make test\n\n    - name: Run tests with tokio single-thread\n      run: env WORKFLOW=1 SINGLE_THREAD_RUNTIME=1 make test\n\n    - name: Run tests with --release\n      run: env WORKFLOW=1 make test_release\n\n    - name: Run tests with async_std\n      run: env WORKFLOW=1 make test_async_std\n\n"
  },
  {
    "path": ".github/workflows/cron_dev_arm.yml",
    "content": "name: cron-dev-arm\n\non:\n  schedule: [cron: \"0 */5 * * *\"]\n  workflow_dispatch:\n\nenv:\n  CARGO_TERM_COLOR: always\n\njobs:\n  build_and_test:\n\n    runs-on: \"macos-15\"\n\n    steps:\n    - uses: actions/checkout@v2\n      with:\n          ref: dev\n    - name: Build\n      run: cargo build --verbose\n\n      # We use cargo-nextest because cargo test does not forward cancellation signal\n    - uses: taiki-e/install-action@nextest\n\n    - name: Run tests with tokio multi-thread\n      run: cd test-suite; RUSTFLAGS=\"--cfg tokio_unstable\" WORKFLOW=1 exec cargo nextest run -F=\"tokio\" --hide-progress-bar -j 1 --no-capture\n\n    - name: Run tests with --release\n      run: cd test-suite; RUSTFLAGS=\"--cfg tokio_unstable\" WORKFLOW=1 exec cargo nextest run -F=\"tokio\" --hide-progress-bar -j 1 --no-capture -r\n\n    - name: Run tests with --release tokio single-thread\n      run: cd test-suite; RUSTFLAGS=\"--cfg tokio_unstable\" WORKFLOW=1 SINGLE_THREAD_RUNTIME=1 exec cargo nextest run -F=\"tokio\" --hide-progress-bar -j 1 --no-capture -r\n\n    - name: Dump log on cancel\n      if: ${{ cancelled() }}\n      uses: actions/upload-artifact@v4\n      with:\n          name: crossfire_ring\n          path: /tmp/crossfire_ring.log\n"
  },
  {
    "path": ".github/workflows/cron_dev_arm_trace.yml",
    "content": "name: cron-dev-arm-trace\n\non:\n  schedule: [cron: \"0 */5 * * *\"]\n  workflow_dispatch:\n\nenv:\n  CARGO_TERM_COLOR: always\n\njobs:\n  build_and_test:\n\n    runs-on: \"macos-15\"\n\n    steps:\n    - uses: actions/checkout@v2\n      with:\n          ref: dev\n    - name: Rust version\n      run: rustc -V\n    - name: rustup\n      run: rustup show\n\n    - name: Build\n      run: cargo build --verbose\n\n      # We use cargo-nextest because cargo test does not forward cancellation signal\n    - uses: taiki-e/install-action@nextest\n\n    - name: Run tests with tokio multi-thread\n      run: cd test-suite; RUSTFLAGS=\"--cfg tokio_unstable\" WORKFLOW=1 exec cargo nextest run -F=\"tokio,trace_log\" --hide-progress-bar -j 1 --no-capture\n\n    - name: Run tests with tokio multi thread --release\n      run: cd test-suite; RUSTFLAGS=\"--cfg tokio_unstable\" WORKFLOW=1 exec cargo nextest run -F=\"tokio,trace_log\" --hide-progress-bar -j 1 --no-capture -r\n\n    - name: Run tests with tokio single-thread --release\n      run: cd test-suite; RUSTFLAGS=\"--cfg tokio_unstable\" WORKFLOW=1 SINGLE_THREAD_RUNTIME=1 exec cargo nextest run -F=\"tokio,trace_log\" --hide-progress-bar -j 1 --no-capture -r\n\n    - name: Dump log on cancel\n      if: ${{ cancelled() || failure() }}\n      uses: actions/upload-artifact@v4\n      with:\n          name: crossfire_ring\n          path: /tmp/crossfire_ring.log\n"
  },
  {
    "path": ".github/workflows/cron_master_async_std_arm.yml",
    "content": "name: cron-master-async_std-arm\n\non:\n  schedule: [cron: \"0 */5 * * *\"]\n  workflow_dispatch:\n\nenv:\n  CARGO_TERM_COLOR: always\n\njobs:\n  build_and_test:\n\n    runs-on: \"macos-15\"\n\n    steps:\n    - uses: actions/checkout@v2\n\n      # We use cargo-nextest because cargo test does not forward cancellation signal\n    - uses: taiki-e/install-action@nextest\n\n    - name: Run tests with async_std\n      run: env WORKFLOW=1 make test_async_std\n\n    - name: Run test with async_std release\n      run: env WORKFLOW=1 make test_async_std_release\n\n    - name: Run test with async_std release and trace_log\n      run: cd test-suite; WORKFLOW=1 exec cargo nextest run -F=\"async_std,trace_log\" --hide-progress-bar -j 1 --no-capture -r\n"
  },
  {
    "path": ".github/workflows/cron_master_async_std_x86.yml",
    "content": "name: cron-master-async_std-x86\n\non:\n  schedule: [cron: \"0 */5 * * *\"]\n  workflow_dispatch:\n\nenv:\n  CARGO_TERM_COLOR: always\n\njobs:\n  build_and_test:\n\n    runs-on: ubuntu-latest\n\n    steps:\n    - uses: actions/checkout@v2\n\n      # We use cargo-nextest because cargo test does not forward cancellation signal\n    - uses: taiki-e/install-action@nextest\n\n    - name: Run tests with async_std\n      run: env WORKFLOW=1 make test_async_std\n\n    - name: Run test with async_std release\n      run: env WORKFLOW=1 make test_async_std_release\n\n    - name: Run test with async_std release and trace_log\n      run: cd test-suite; WORKFLOW=1 exec cargo nextest run -F=\"async_std,trace_log\" --hide-progress-bar -j 1 --no-capture -r\n"
  },
  {
    "path": ".github/workflows/cron_master_compio_arm.yml",
    "content": "name: cron-master-compio-arm\n\non:\n  schedule: [cron: \"0 */5 * * *\"]\n  workflow_dispatch:\n\nenv:\n  CARGO_TERM_COLOR: always\n\njobs:\n  build_and_test:\n\n    runs-on: \"macos-15\"\n\n    steps:\n    - uses: actions/checkout@v2\n\n      # We use cargo-nextest because cargo test does not forward cancellation signal\n    - uses: taiki-e/install-action@nextest\n\n    - name: Run tests with compio\n      run: env WORKFLOW=1 make test_compio\n\n    - name: Run test with compio release\n      run: env WORKFLOW=1 make test_compio_release\n\n    - name: Run test with compio release and trace_log\n      run: cd test-suite; WORKFLOW=1 exec cargo nextest run -F=\"compio,trace_log\" --hide-progress-bar -j 1 --no-capture -r\n\n    - name: Run tests with compio_dispatcher\n      run: env WORKFLOW=1 make test_compio_dispatcher\n\n    - name: Run test with compio_dispatcher release\n      run: env WORKFLOW=1 make test_compio_dispatcher_release\n\n    - name: Run test with compio_dispatcher release and trace_log\n      run: cd test-suite; WORKFLOW=1 exec cargo nextest run -F=\"compio_dispatcher,trace_log\" --hide-progress-bar -j 1 --no-capture -r\n\n    - name: Dump log on cancel\n      if: ${{ cancelled() || failure() }}\n      uses: actions/upload-artifact@v4\n      with:\n          name: crossfire_ring\n          path: /tmp/crossfire_ring.log\n"
  },
  {
    "path": ".github/workflows/cron_master_compio_x86.yml",
    "content": "name: cron-master-compio-x86\n\non:\n  schedule: [cron: \"0 */5 * * *\"]\n  workflow_dispatch:\n\nenv:\n  CARGO_TERM_COLOR: always\n\njobs:\n  build_and_test:\n\n    runs-on: ubuntu-latest\n\n    steps:\n    - uses: actions/checkout@v2\n\n      # We use cargo-nextest because cargo test does not forward cancellation signal\n    - uses: taiki-e/install-action@nextest\n\n    - name: Run tests with compio\n      run: env WORKFLOW=1 make test_compio\n\n    - name: Run test with compio release\n      run: env WORKFLOW=1 make test_compio_release\n\n    - name: Run test with compio release and trace_log\n      run: cd test-suite; WORKFLOW=1 exec cargo nextest run -F=\"compio,trace_log\" --hide-progress-bar -j 1 --no-capture -r\n\n    - name: Run tests with compio_dispatcher\n      run: env WORKFLOW=1 make test_compio_dispatcher\n\n    - name: Run test with compio_dispatcher release\n      run: env WORKFLOW=1 make test_compio_dispatcher_release\n\n    - name: Run test with compio_dispatcher release and trace_log\n      run: cd test-suite; WORKFLOW=1 exec cargo nextest run -F=\"compio_dispatcher,trace_log\" --hide-progress-bar -j 1 --no-capture -r\n\n    - name: Dump log on cancel\n      if: ${{ cancelled() || failure() }}\n      uses: actions/upload-artifact@v4\n      with:\n          name: crossfire_ring\n          path: /tmp/crossfire_ring.log\n"
  },
  {
    "path": ".github/workflows/cron_master_smol_arm.yml",
    "content": "name: cron-master-smol-arm\n\non:\n  schedule: [cron: \"0 */5 * * *\"]\n  workflow_dispatch:\n\nenv:\n  CARGO_TERM_COLOR: always\n\njobs:\n  build_and_test:\n\n    runs-on: \"macos-15\"\n\n    steps:\n    - uses: actions/checkout@v2\n\n      # We use cargo-nextest because cargo test does not forward cancellation signal\n    - uses: taiki-e/install-action@nextest\n\n    - name: Run tests with smol\n      run: env WORKFLOW=1 make test_smol\n\n    - name: Run test with smol release\n      run: env WORKFLOW=1 make test_smol_release\n\n    - name: Run test with smol release and trace_log\n      run: cd test-suite; WORKFLOW=1 exec cargo nextest run -F=\"smol,trace_log\" --hide-progress-bar -j 1 --no-capture -r\n"
  },
  {
    "path": ".github/workflows/cron_master_smol_x86.yml",
    "content": "name: cron-master-smol-x86\n\non:\n  schedule: [cron: \"0 */5 * * *\"]\n  workflow_dispatch:\n\nenv:\n  CARGO_TERM_COLOR: always\n\njobs:\n  build_and_test:\n\n    runs-on: ubuntu-latest\n\n    steps:\n    - uses: actions/checkout@v2\n\n      # We use cargo-nextest because cargo test does not forward cancellation signal\n    - uses: taiki-e/install-action@nextest\n\n    - name: Run tests with smol\n      run: env WORKFLOW=1 make test_smol\n\n    - name: Run test with smol release\n      run: env WORKFLOW=1 make test_smol_release\n\n    - name: Run test with smol release and trace_log\n      run: cd test-suite; WORKFLOW=1 exec cargo nextest run -F=\"smol,trace_log\" --hide-progress-bar -j 1 --no-capture -r\n"
  },
  {
    "path": ".github/workflows/cron_master_threaded_arm.yml",
    "content": "name: cron-master-threaded-arm\n\non:\n  schedule: [cron: \"20 */5 * * *\"]\n  workflow_dispatch:\n\nenv:\n  CARGO_TERM_COLOR: always\n\njobs:\n  build_and_test:\n\n    runs-on: \"macos-15\"\n\n    steps:\n    - uses: actions/checkout@v2\n    - name: Build\n      run: cargo build --verbose\n\n    - name: Run tests\n      run: env WORKFLOW=1 make test test_blocking_context\n\n    - name: Run tests with --release\n      run: env WORKFLOW=1 make test_release test_blocking_context\n"
  },
  {
    "path": ".github/workflows/cron_master_threaded_x86.yml",
    "content": "name: cron-master-threaded-x86\n\non:\n  schedule: [cron: \"20 */5 * * *\"]\n  workflow_dispatch:\n\nenv:\n  CARGO_TERM_COLOR: always\n\njobs:\n  build_and_test:\n\n    runs-on: ubuntu-latest\n\n    steps:\n    - uses: actions/checkout@v2\n    - name: Build\n      run: cargo build --verbose\n\n    - name: Run tests\n      run: env WORKFLOW=1 make test test_blocking_context\n\n    - name: Run tests with --release\n      run: env WORKFLOW=1 make test_release test_blocking_context\n"
  },
  {
    "path": ".github/workflows/cron_master_tokio_arm.yml",
    "content": "name: cron-master-tokio-arm\n\non:\n  schedule: [cron: \"0 */5 * * *\"]\n  workflow_dispatch:\n\nenv:\n  CARGO_TERM_COLOR: always\n\njobs:\n  build_and_test:\n\n    runs-on: \"macos-15\"\n\n    steps:\n    - uses: actions/checkout@v2\n    - name: Build\n      run: cargo build --verbose\n\n      # We use cargo-nextest because cargo test does not forward cancellation signal\n    - uses: taiki-e/install-action@nextest\n\n    - name: Run tests with tokio multi-thread\n      run: cd test-suite; WORKFLOW=1 exec cargo nextest run -F=\"tokio,time\" --hide-progress-bar -j 1 --no-capture\n\n    - name: Run tests with tokio multi thread --release with trace log\n      run: cd test-suite; WORKFLOW=1 exec cargo nextest run -F=\"tokio,trace_log\" --hide-progress-bar -j 1 --no-capture -r\n\n    - name: Run tests with tokio single-thread --release with trace_log\n      run: cd test-suite; WORKFLOW=1 SINGLE_THREAD_RUNTIME=1 exec cargo nextest run -F=\"tokio,trace_log\" --hide-progress-bar -j 1 --no-capture -r\n\n    - name: Run tests with tokio multi thread --release\n      run: cd test-suite; WORKFLOW=1 exec cargo nextest run -F=\"tokio,time\" --hide-progress-bar -j 1 --no-capture -r\n\n    - name: Run tests with tokio single-thread --release\n      run: cd test-suite; WORKFLOW=1 SINGLE_THREAD_RUNTIME=1 exec cargo nextest run -F=\"tokio,time\" --hide-progress-bar -j 1 --no-capture -r\n\n    - name: Dump log on cancel\n      if: ${{ cancelled() }}\n      uses: actions/upload-artifact@v4\n      with:\n          name: crossfire_ring\n          path: /tmp/crossfire_ring.log\n"
  },
  {
    "path": ".github/workflows/cron_master_tokio_x86.yml",
    "content": "name: cron-master-tokio-x86\n\non:\n  schedule: [cron: \"0 */5 * * *\"]\n  workflow_dispatch:\n\nenv:\n  CARGO_TERM_COLOR: always\n\njobs:\n  build_and_test:\n\n    runs-on: ubuntu-latest\n\n    steps:\n    - uses: actions/checkout@v2\n    - name: Build\n      run: cargo build --verbose\n\n    - name: Run tests with tokio multi-thread\n      run: env WORKFLOW=1 make test\n\n    - name: Run tests with tokio current-thread\n      run: env WORKFLOW=1 SINGLE_THREAD_RUNTIME=1 make test\n\n    - name: Run tests with tokio multi-thread --release\n      run: env WORKFLOW=1 make test_release\n\n    - name: Run tests with tokio current-thread --release\n      run: env WORKFLOW=1 SINGLE_THREAD_RUNTIME=1 make test_release\n"
  },
  {
    "path": ".github/workflows/fast.yml",
    "content": "name: fast-validation\n\non:\n  push:\n    branches: [ \"master\" ]\n\nenv:\n  CARGO_TERM_COLOR: always\n\njobs:\n  build_and_test:\n\n    runs-on: ubuntu-latest\n\n    steps:\n    - uses: actions/checkout@v2\n\n    - name: Build with default\n      run: cargo build --verbose\n\n    - name: Clippy with default\n      run: cargo clippy -- -D warnings\n\n    - name: Build with tokio\n      run: cargo build -F tokio --verbose\n\n    - name: Build with async_std\n      run: cargo build -F async_std --verbose\n\n    - name: doc test & internal test\n      run: cargo test\n\n    - name: doc build\n      run: cargo doc --all-features\n\n    - name: Run basic tests with tokio\n      run: make test basic\n\n    - name: Run timeout tests with async_std\n      run: make test_async_std timeout\n"
  },
  {
    "path": ".github/workflows/leak.yml",
    "content": "name: leak\n\non:\n  schedule: [cron: \"30 */10 * * *\"]\n  workflow_dispatch:\n\nenv:\n  CARGO_TERM_COLOR: always\n\njobs:\n  build_and_test:\n\n    runs-on: ubuntu-latest\n\n    steps:\n    - uses: actions/checkout@v2\n    - name: Build\n      run: cargo build --verbose\n\n    - name: Run test-suite with LSAN\n      run: env NIGHTLY=\"+nightly\" RUSTFLAGS=\"-Zsanitizer=leak\" WORKFLOW=1 make test\n\n    - name: Run internal test\n      run: make test_internal\n"
  },
  {
    "path": ".github/workflows/miri_dev.yml",
    "content": "name: miri-dev\n\non:\n  workflow_dispatch:\n  schedule: [cron: \"20 */7 * * *\"]\n\nenv:\n  CARGO_TERM_COLOR: always\n\njobs:\n  test_tokio:\n\n    runs-on: ubuntu-latest\n\n    steps:\n    - uses: actions/checkout@v2\n      with:\n          ref: dev\n\n    - name: install specified nightly toolchain\n      run: rustup toolchain install nightly-2025-12-01\n\n    - name: install miri component to specified nightly\n      run: rustup component add miri --toolchain nightly-2025-12-01\n\n    - name: Run miri tests without log (tokio multi thread)\n      run: cd test-suite; NIGHTLY_VERSION=nightly-2025-12-01 scripts/miri.sh -F tokio,time\n\n    - name: Run miri tests without log (tokio current thread)\n      run: cd test-suite; SINGLE_THREAD_RUNTIME=1 NIGHTLY_VERSION=nightly-2025-12-01 scripts/miri.sh -F tokio\n"
  },
  {
    "path": ".github/workflows/miri_dev_log.yml",
    "content": "name: miri-dev-log\n\non:\n  workflow_dispatch:\n  schedule: [cron: \"30 */8 * * *\"]\n\nenv:\n  CARGO_TERM_COLOR: always\n\njobs:\n  test_tokio:\n\n    runs-on: ubuntu-latest\n\n    steps:\n    - uses: actions/checkout@v2\n      with:\n          ref: dev\n\n    - name: install specified nightly toolchain\n      run: rustup toolchain install nightly-2025-12-01\n\n    - name: install miri\n      run: rustup component add miri --toolchain nightly-2025-12-01\n\n    - name: Run miri tests with log (tokio multi thread)\n      run: cd test-suite; NIGHTLY_VERSION=nightly-2025-12-01 scripts/miri.sh --features trace_log,tokio\n\n    - name: collect log\n      if: ${{ failure() }}\n      uses: actions/upload-artifact@v4\n      with:\n          name: crossfire_miri_tokio_multithread\n          path: /tmp/crossfire_miri.log\n"
  },
  {
    "path": ".github/workflows/miri_tokio.yml",
    "content": "name: miri-tokio\n\non:\n  workflow_dispatch:\n  schedule: [cron: \"20 */6 * * *\"]\n\nenv:\n  CARGO_TERM_COLOR: always\n\njobs:\n  build_and_test:\n\n    runs-on: ubuntu-latest\n\n    steps:\n    - uses: actions/checkout@v2\n\n    - name: install specified nightly toolchain\n      run: rustup toolchain install nightly-2025-12-01\n\n    - name: install miri\n      run: rustup component add miri --toolchain nightly-2025-12-01\n      #run:  rustup component add --toolchain nightly-x86_64-unknown-linux-gnu miri\n\n    - name: Run miri tests without log (tokio multi thread)\n      run: cd test-suite;  NIGHTLY_VERSION=nightly-2025-12-01 WORKFLOW=1 scripts/miri.sh -F tokio,time\n\n"
  },
  {
    "path": ".github/workflows/miri_tokio_cur.yml",
    "content": "name: miri-tokio-cur\n\non:\n  workflow_dispatch:\n  schedule: [cron: \"50 */7 * * *\"]\n\nenv:\n  CARGO_TERM_COLOR: always\n\njobs:\n  build_and_test:\n\n    runs-on: ubuntu-latest\n\n    steps:\n    - uses: actions/checkout@v2\n\n    - name: install specified nightly toolchain\n      run: rustup toolchain install nightly-2025-12-01\n\n    - name: install miri\n      run: rustup component add miri --toolchain nightly-2025-12-01\n      #run: rustup component add --toolchain nightly-x86_64-unknown-linux-gnu miri\n\n    - name: Run miri tests without log (tokio current thread)\n      run: cd test-suite; NIGHTLY_VERSION=nightly-2025-12-01 WORKFLOW=1 SINGLE_THREAD_RUNTIME=1 scripts/miri.sh -F tokio,time\n"
  },
  {
    "path": ".github/workflows/miri_tokio_cur_log.yml",
    "content": "name: miri-tokio-cur-log\n\non:\n  workflow_dispatch:\n  schedule: [cron: \"20 */9 * * *\"]\n\nenv:\n  CARGO_TERM_COLOR: always\n\njobs:\n  build_and_test:\n\n    runs-on: ubuntu-latest\n\n    steps:\n    - uses: actions/checkout@v2\n\n    - name: install specified nightly toolchain\n      run: rustup toolchain install nightly-2025-12-01\n\n    - name: install miri\n      run: rustup component add miri --toolchain nightly-2025-12-01\n      #run: rustup component add --toolchain nightly-x86_64-unknown-linux-gnu miri\n\n    - name: Run miri tests with log (tokio current thread)\n      run: cd test-suite;  NIGHTLY_VERSION=nightly-2025-12-01 WORKFLOW=1 SINGLE_THREAD_RUNTIME=1 scripts/miri.sh --features trace_log,tokio\n\n    - name: collect log\n      if: ${{ failure() }}\n      uses: actions/upload-artifact@v4\n      with:\n          name: crossfire_miri\n          path: /tmp/crossfire_miri.log\n"
  },
  {
    "path": ".github/workflows/miri_tokio_log.yml",
    "content": "name: miri-tokio-log\n\non:\n  workflow_dispatch:\n  schedule: [cron: \"10 */7 * * *\"]\n\nenv:\n  CARGO_TERM_COLOR: always\n\njobs:\n  build_and_test:\n\n    runs-on: ubuntu-latest\n\n    steps:\n    - uses: actions/checkout@v2\n\n    - name: install specified nightly toolchain\n      run: rustup toolchain install nightly-2025-12-01\n\n    - name: install miri\n      run: rustup component add miri --toolchain nightly-2025-12-01\n      #run:  rustup component add --toolchain nightly-x86_64-unknown-linux-gnu miri\n\n    - name: Run miri tests with log (tokio multi thread)\n      run: cd test-suite; NIGHTLY_VERSION=nightly-2025-12-01 WORKFLOW=1 scripts/miri.sh --features trace_log,tokio\n\n    - name: collect log\n      if: ${{ failure() }}\n      uses: actions/upload-artifact@v4\n      with:\n          name: crossfire_miri_tokio_multithread\n          path: /tmp/crossfire_miri.log\n"
  },
  {
    "path": ".github/workflows/pr.yml",
    "content": "name: pr-validation\n\non:\n  pull_request:\n    types:\n      - opened\n      - synchronize\n      - ready_for_review\n\nenv:\n  CARGO_TERM_COLOR: always\n\njobs:\n  build_and_test:\n\n    runs-on: ubuntu-latest\n\n    steps:\n    - uses: actions/checkout@v2\n\n    - name: Build with default\n      run: cargo build --verbose\n\n    - name: Build with tokio\n      run: cargo build -F tokio --verbose\n\n    - name: Build with async_std\n      run: cargo build -F async_std --verbose\n\n    - name: Run tests with tokio\n      run: make test\n\n    - name: Run tests with tokio\n      run: make test_async_std timeout\n"
  },
  {
    "path": ".github/workflows/typos.yml",
    "content": "name: Typo checker\non:\n  pull_request:\n    types:\n      - opened\n      - synchronize\n      - ready_for_review\n  push:\n    branches: [ \"master\" ]\n  workflow_dispatch:\n\njobs:\n  run:\n    name: Spell Check with Typos\n    runs-on: ubuntu-22.04\n    steps:\n    - name: Checkout Actions Repository\n      uses: actions/checkout@v3\n\n    - name: Check spelling of the entire repository\n      uses: crate-ci/typos@v1.33.1\n"
  },
  {
    "path": ".gitignore",
    "content": "/target\nCargo.lock\ntags\n*.sw*\n"
  },
  {
    "path": "AGENTS.md",
    "content": "# General\n- All comments and documents must be in English.\n- Omit unnecessary obvious comments during coding.\n- Documents must be concise, well organized into categories, with no duplicated topics or redundant information. Related topics should be organized in close proximity.\n- If you don't know a 3rd-party API, look it up on `https://docs.rs/<crate>`.\n- Do not run cargo clippy.\n- Always use shorter token paths by importing traits or structures.\n- Avoid importing namespaces inside functions.\n\n# Test\n\n- Because crate::spsc, mpsc, mpmc module have the same type alias, in the test just use `crossfire::*`, and distinguish the types and functions with `spsc::`, `mpsc::`, `mpmc::` prefix.\n- Run test with `make test`. In order to prevent too long output truncated by AI tool, run test with `make test <test_name>` when you have a targeted test case.\n- Do not use cargo test to run the test, always use `make test`. Test case cannot be run concurrently with cargo test default param.\n- For statement that don't expect to fail, use `expect()` rather than `unwarp()`\n"
  },
  {
    "path": "CHANGELOG.md",
    "content": "# Changelog\n\nAll notable changes to this project will be documented in this file.\n\nThe format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),\nand this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).\n\n## Unreleased\n\n### Added\n\n### Removed\n\n### Changed\n\n### Fixed\n\n## [3.1.10] - 2026-05-05\n\n### Fix\n\n- waitgroup: Avoid miri report on stack borrow rule (issue #66)\n\n## [3.1.9] - 2026-05-05\n\n### Fix\n\n- Reduce Send requirement in generic (issue #64), which makes the error prompt cleaner\n\n## [3.1.8] - 2026-05-04\n\n### Added\n\n- Add WeakTx. which can downgrade from or upgrade to MTx / MAsyncTx\n\n## [3.1.7] - 2026-03-19\n\n### Added\n\n- oneshot: Add TxOneshot::is_disconnected()\n\n## [3.1.6] - 2026-03-18\n\n### Added\n\n- waitgroup: Add WaitGroupInline (which does not allocate)\n\n## [3.1.4] - 2026-02-25\n\n### Changed\n\n- oneshot: Add Sync for TxOneshot\n\n## [3.1.2] - 2026-02-16\n\n### Changed\n\n- waitgroup: Add inner T inside, just like Arc, this break previous 3.1.0 and 3.1.1\n\n## [3.1.1] - 2026-02-15\n\n### Changed\n\n- waitgroup: Add Sync for WaitGroupGuard\n\n## [3.1.0] - 2026-02-14\n\n### Added\n\n- Add WaitGroup that support async & blocking, with custom threshold.\n\n- oneshot: Add recv_async_timeout & recv_async_with_timer\n\n### Changed\n\n- oneshot: Refactor oneshot and optimize out arc cost. try_recv() now require `&mut self`.\n\n- async_tx/async_rx: Refactor SendTimeoutFuture/RecvTimeoutFuture signature, to remove boxed future usage\n\n## [3.0.6] - 2026-02-11\n\n### Fixed\n\n- Fix multiplex: Ensure all message received before disconnect\n\n## [3.0.5] - 2026-02-11\n\n### Fixed\n\n- Fix msrv to 1.79 (NonZero usage)\n\n- Fix clippy warning and document\n\n## [3.0.4] - 2026-02-03\n\n### Fixed\n\n- Avoid overflow evaluation in generic code\n\n  Remove Send/'static/Unpin limit from Flavor/Queue trait and struct definition,\n  add the limit to method.\n\n- Blocking method and struct don't need Unpin.\n\n- Async recv does not need Unpin.\n\n## [3.0.3] - 2026-01-30\n\n### Fixed\n\n- Fix multiplex premature closing\n\n## [3.0.2] - 2026-01-23\n\n### Added\n\n- Add missing into_async() method for blocking tx/rc\n\n## [3.0.1] - 2026-01-22\n\n### Changed\n\n- Remove the mode setting from Multiplex (always use round-robin)\n\n- Add default custom weight for Multiplex select, optimize selection cost (throughput +20%)\n\n## [3.0.0] - 2026-01-18\n\n### Changed\n\n- Disable direct_copy to make miri happy\n\n- Simplify waker cleaning logic\n\n## [3.0.0.beta3] - 2026-01-16\n\n### Change\n\n- New implementation of ArraySpsc & ArrayMpsc, throughput +50%\n\n- New implementation of OneMpsc, minor speed up.\n\n- Change multiplex recv(), try_recv(), recv_timeout() to &self, and impl BlockingRxTrait.\n\n- Remove unused lifetime param in BlockingRxTrait.\n\n### Fixed\n\nProblems from v3 beta\n\n- Add more backoff yielding for One flavor, to ensure 8x1, 16x1 cases stable, and minor optimize.\n\n- Fix commit_waiting state wrong condition, which lead to regression in cases like 1000 async tx.\n\n- Spsc should disable direct_copy (which only safe for MP)\n\n## [3.0.0.beta2] - 2026-01-15\n\n- Fix Array visibility in flavor module\n\n- Fix AsyncTxTrait for compio (The sleep does not have Send)\n\n## [3.0.0.beta1] - 2026-01-14\n\n### Changed\n\n- Change interface to V3 generic flavor API\n\n- Optimize for SPSC\n\n### Added\n\n- Add One flavor for bounded size 1 case\n\n- Add Null flavor for cancellation purpose channel\n\n- Add Select API\n\n- Add Multiplex API\n\n## [2.1.10] - 2026-01-10\n\n### Added\n\n- Add `oneshot` module\n\n- Add test workflow for `compio` (by lisovskiy)\n\n### Changed\n\n- Allow Blocking/Async Tx/Rx trait to be used as trait objects\n\n## [2.1.9] - 2025-12-31\n\n- Fix speed regression on ARM (fix backoff)\n\n## [2.1.8] - 2025-11-08\n\n### Fixed\n\n- Add `#[must_use]` to hint missing await on Future (by MathisWellmann)\n\n## [2.1.7] - 2025-11-08\n\n### Changed\n\n- Depend on `futures-core` crate instead of `futures` (issue #45)\n\n## [2.1.6] - 2025-10-10\n\n### Changed\n\n- Delete the code probing tokio (to prevent an issue in cargo 1.87-1.90 triggering the code without tokio feature enable)\n\n## [2.1.5] - 2025-10-06\n\n### Fixed\n\n- Remove doc_auto_cfg because removal by rust\n\n## [2.1.4] - 2025-10-01\n\n### Changed\n\n- Adjust backoff for Arm  (increase size 1 speed)\n\n- async: Use try_change_state() to reset init instead of get_state(), (Minor improvement on x86 bounded_100_async_n_n)\n\n## [2.1.3] - 2025-09-26\n\n### Added\n\n- Add send_with_timer() and recv_with_timer() for other async runtime (eg. smol).\n\n## [2.1.1-2.1.2]\n\n### Changed\n\n- Minor changed to doc\n\n## [2.1.0] - 2025-9-21\n\n### Changed\n\n- Refactor to drop dependency of crossbeam-channel, the underlayering is modified version of crossbeam-queue.\n\n- Bounded channel speed receive massive boost.\n\n- AsyncTx can convert back and forth with Tx, and AsyncRx can convert back and forth with Rx.\n\n- Optimise for VM machine that only have 1 cpu.\n\n- Use MaybeUninit to optimise the moving of large blob message for bounded channel, in nearly full scenario.\n\n- Rename ReceiveFuture to RecvFuture, ReceiveTimeoutFuture to RecvTimeoutFuture.\n\n### Removed\n\n- Remove AsyncTx::send_blocking() and AsyncRx::recv_blocking(), instead, you can use type conversion into Tx/Rx.\n\n## [2.0.26] - 2025-08-30\n\n### Fixed\n\n- waker_registry: Fix hang detect by miri in cancel_waker(), issue #34\n\n## [2.0.25] - 2025-08-29\n\n### Fixed\n\n- More strict with the waker status, issue #34 (use SeqCst in reset_init)\n\n## [2.0.24] - 2025-08-26\n\n### Fixed\n\n- More strict with the waker status,  issue #34 (spurious wake up, and waker commit)\n\n## [2.0.23] - 2025-08-23\n\n### Fixed\n\n- Change is_disconnected() to SeqCst\n\n## [2.0.22] - 2025-08-21\n\n### Fixed\n\n- RegistryMulti: Fix defend against infinite loop for sink/stream, code introduced from 2.0.20.\n\n## [2.0.21] - 2025-08-21\n\n### Added\n\n- Add clone_to_vec() method in async / blocking tx/rx trait\n\n### Fixed\n\n- AsyncSink: Fix typo in clear waker on drop (Does not affect stability)\n\n## [2.0.20] - 2025-08-17\n\n### Added\n\n- AsyncTxTrait: Add Into<AsyncSink<T>>\n\n- AsyncRxTrait: Add Into<AsyncStream<T>>\n\n### Fixed\n\n- Change the behavior of AsyncSink::poll_send() and AsyncStream::poll_item(), to make sure\nstream/sink wakers are notified, preventing deadlock from happening if user wants to cancel the operation.\nAdd explanation to the document.\n\n- Defend against infinite loop when waking up all wakers, given the change of sink/stream.\n\n## [2.0.19] - 2025-08-13\n\n### Added\n\n- Add capacity()\n\n## [2.0.18] - 2025-08-11\n\n### Fixed\n\n- Change some atomic load ordering from Acquire to SeqCst to pass validation by Miri.\n\n## [2.0.17] - 2025-08-08\n\n### Fixed\n\n- Reuse and cleanup waker as much as possible (for idle select scenario)\n\n- Change some atomic store ordering from Release to SeqCst to avoid further trouble.\n\n## [2.0.16] - 2025-08-04\n\n### Added\n\n- Add into_blocking()\n\n- Add missing into_sink() for MAsyncTx.\n\n- Add From for AsyncSink and AsyncStream.\n\n## [2.0.15] - 2025-08-04\n\n### Added\n\n- Add missing conversion: MAsyncTx->AsyncTx and MTx->Tx\n\n## [2.0.14] - 2025-08-03\n\n### Changed\n\n- Optimise bounded size 1 speed with backoff\n\n- Updated benchmark result vs kanal to wiki\n\n## [2.0.13] - 2025-07-24\n\n### Fixed\n\n- Fix a deadlock https://github.com/frostyplanet/crossfire-rs/issues/22\n\n### Added\n\n- Allow type conversion from AsyncTx -> Tx, AsyncRx -> Rx\n\n## [2.0.12] - 2025-07-18\n\n### Fixed\n\n- Fix a possible hang in LockedQueue introduced from v2.0.5\n\n## [2.0.11] - 2025-07-18\n\n### Added\n\n- Add Deref/AsRef for sender & receiver type to ChannelShared\n\n- Add is_full(), get_tx_count(), get_rx_count()\n\n- Revert the removal of send_blocking() and recv_blocking() (will maintain through 2.0.x)\n\n### Removed\n\n- Remove DerefMut because it's no used.\n\n### Fixed\n\n- Fix send_timeout() in blocking context\n\n## [2.0.10] yanked\n\npublished with the wrong branch, do not use.\n\n## [2.0.9] - 2025-07-16\n\n### Added\n\n- Add is_disconnected() to sender and receiver type.\n\n- Add Deref for AsyncSink to AsyncTx, and AsyncStream to AsyncRx, remove duplicated code.\n\n### Fixed\n\n- Fix a rare deadlock, when only one future in async runtime (for example channel async-blocking or blocking-async).\nRuntime will spuriously wake up with changed Waker.\n\n### Removed\n\n- Remove send_blocking() & recv_blocking(), which is anti-pattern. (Calling function that blocks might lead to deadlock in async runtime)\n\n## [2.0.8] - 2025-07-14\n\n### Added\n\n- AsyncStream: Add try_recv(), len() & is_empty()\n\n## [2.0.7] - 2025-07-13\n\n### Added\n\n- AsyncStream: Add poll_item() for writing custom future, as a replacement to AsyncRx's poll_item(),\n but without the need of LockedWaker.\n\n- Add AsyncSink::poll_send() for writing custom future, as a replacement to AsyncTx's poll_send(),\n but without the need of LockedWaker.\n\n- Implement Debug & Display for all senders and receivers.\n\n### Remove\n\n- Hide LockedWaker, since AsyncRx::poll_item() and AsyncTx::poll_send() is hidden.\n\n### Changed\n\n- Optimise speed for SPSC & MPSC up to 60% (with WeakCell)\n\n- Add execution time log to test cases.\n\n### Fixed\n\n- Fix LockedQueue empty flag (not affecting usage, just not accurate to internal test cases)\n\n## [2.0.6] - 2025-07-10\n\n### Added\n\n- Support timeout and tested on async-std\n\n### Changed\n\n- mark make_recv_future() & make_send_future() deprecated.\n\n- Change poll_send() & poll_item() to private function.\n\n## [2.0.5] - 2025-07-09\n\n### Added\n\n- Add send_timeout() & recv_timeout() for async context\n\n### Fixed\n\n- AsyncRx: Fix rare case that message left on disconnect\n\n- Fixed document typo and improve description.\n\n### Changed\n\n- Optimise RegistryMulti, with 20%+ speed improved on MPSC / MPMC\n\n## [2.0.4] - 2025-07-08\n\n### Changed\n\n- Remove Sync marker in Tx, Rx, AsyncTx, AsyncRx to prevent misuse with Arc\n\n\n## [2.0.3] - 2025-07-07\n\n### Changed\n\n- Remove duplicated code.\n\n### Fixed\n\n- AsyncRx should not have Clone.\n\n- Protect against misuse of spsc/mpsc when user should use mpmc (avoiding deadlocks)\n\n## [2.0.2] - 2025-07-05\n\n### Added\n\n- Add channels for blocking context (which equals to crossbeam)\n\n### Changed\n\n- Remove unused Clone for LockedWaker\n\n### Fixed\n\n- spsc: Add missing unsupported size=0 overwrites\n\n\n## [2.0.1] - 2025-07-03\n\n### Added\n\n- Add timeout API for blocking context (by Zach Schoenberger)\n\n### Changed\n\n- Set min Rust version and edition in alignment with crossbeam (by Zach Schoenberger)\n\n## [2.0.0] - 2025-06-27\n\n### Added\n\n- spsc module\n\n- Benchmark suite written with criterion.\n\n### Changed\n\n- Refactor the API design. Unify sender and receiver types.\n\n- Removal of macro rules and refactor SendWakers & RecvWakers into Enum, thus removal of generic type in Channelshared structure.\n\n- Removal of the spin lock in LockedWaker. Simplifying the logic without losing performance.\n\n- Rewrite the test cases with rstest.\n\n### Removed\n\n- Drop SelectSame module, because of hard to maintain, can be replace with future-select.\n\n## [1.1.0] - 2025-06-19\n\n### Changed\n\n- Migrate repo\n\nFrom <http://github.com/qingstor/crossfire-rs> to <https://github.com/frostyplanet/crossfire-rs>\n\n- Change rust edition to 2024, re-format the code and fix warnings.\n\n\n## [1.0.1] - 2023-08-29\n\n### Fixed\n\n- Fix atomic ordering for ARM (Have been tested on some ARM deployment)\n\n## [1.0.0] - 2022-12-03\n\n### Changed\n\n- Format all code and announcing v1.0\n\n- I decided that x86_64 stable after one year test.\n\n## [0.1.7] - 2021-08-22\n\n### Fixed\n\n- tx: Remove redundant old_waker.is_waked() on abandon\n\n## [0.1.6] - 2021-08-21\n\n### Fixed\n\n- mpsc: Fix RxFuture old_waker.abandon in poll_item\n\n## [0.1.5] - 2021-06-28\n\n### Changed\n\n- Replace deprecated compare_and_swap\n\n### Fixed\n\n- SelectSame: Fix close_handler last_index\n\n- Fix fetch_add/sub ordering for ARM  (discovered on test hang)\n"
  },
  {
    "path": "CONTRIBUTION",
    "content": "Original Author:\n- Plan (frostyplanet at gmail.com)\nThanks:\n- Zach Schoenberger\n- MathisWellmann\n- lisovskiy\n- Sherlock-Holo\n"
  },
  {
    "path": "Cargo.toml",
    "content": "[workspace]\nmembers = [\"test-suite\"]\n\n[package]\nname = \"crossfire\"\nversion = \"3.1.10\"\nauthors = [\"plan <frostyplanet@gmail.com>\"]\nedition = \"2021\"\nlicense = \"Apache-2.0\"\nhomepage = \"https://github.com/frostyplanet/crossfire-rs\"\nreadme = \"README.md\"\nrepository = \"https://github.com/frostyplanet/crossfire-rs\"\ndocumentation = \"https://docs.rs/crossfire\"\nkeywords = [\"async\", \"non-blocking\", \"lock-free\", \"channel\"]\ncategories = [\"concurrency\", \"data-structures\"]\nexclude = [\"/ci/*\", \"/bors.toml\"]\ndescription = \"channels for async and threads\"\nrust-version = \"1.79\"\n\n# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html\n\n# Because cargo-show-asm cannot match local crossfire package\n#[patch.crates-io]\n#crossfire = { path = \".\" }\n\n[dependencies]\ncrossbeam-utils = \"0.8\"\nfutures-core = \"0.3\"\nparking_lot = \"0\"\ntokio = { version = \"1\", features = [\"time\", \"rt\"], optional=true }\nasync-std = {version = \"1\", optional=true}\nlog = { version=\"0\", optional=true}\nsmallvec = \"1\"\n\n[dev-dependencies]\nlog = \"0\"\ntokio = { version = \"1\", features = [\"time\", \"sync\", \"rt-multi-thread\", \"rt\", \"macros\"] }\nsmol = \"2\"\ncaptains-log = \"0\"\n\n## For profiling symbol\n#[profile.release]\n#debug = true\n\n[features]\ndefault = []\n\n# Enable compat model for v2.x API\ncompat = []\n\n# This will enable timeout function\ntokio = [\"dep:tokio\"]\n\n# This will enable timeout function\nasync_std = [\"dep:async-std\"]\n\n# for test workflow debugging\ntrace_log = [\"dep:log\"]\n\n[package.metadata.docs.rs]\nall-features = true\n# enable features in the documentation\nrustdoc-args = [\"--cfg\", \"docsrs\"]\n\n[lints.clippy]\nnew_ret_no_self = \"allow\"\nneedless_range_loop = \"allow\"\ntype_complexity = \"allow\"\nneedless_return = \"allow\"\nmut_from_ref = \"allow\"\ntransmute_ptr_to_ref = \"allow\"\nlen_without_is_empty = \"allow\"\nnew_without_default = \"allow\"\nresult_unit_err = \"allow\"\n"
  },
  {
    "path": "LICENSE",
    "content": "Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"{}\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright (C) 2023-2025 The Crossfire Project Developers\n\n   Copyright (C) 2016-2023 Yunify Inc.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "Makefile",
    "content": "PRIMARY_TARGET := $(firstword $(MAKECMDGOALS))\nARGS := $(filter-out $(PRIMARY_TARGET), $(MAKECMDGOALS))\n\nRUN_TEST_CASE = _run_test_case() {                                                  \\\n    case=\"$(filter-out $ARGS,$(MAKECMDGOALS))\";                                      \\\n    if [ -n \"$${WORKFLOW}\" ]; then \\\n        export TEST_FLAG=\" -- -q --test-threads=1\"; \\\n    else  \\\n        export TEST_FLAG=\" -- --nocapture --test-threads=1\"; \\\n        export LOG_FILE=\"/tmp/test_crossfire.log\"; \\\n    fi; \\\n\tRUST_BACKTRACE=full cargo ${NIGHTLY} test  -p crossfire-test ${ARGS} $${FEATURE_FLAG} $${TEST_FLAG};    \\\n}\n\nRUN_RELEASE_CASE = _run_test_release_case() {                                                  \\\n    case=\"$(filter-out $@,$(MAKECMDGOALS))\";                                      \\\n    if [ -n \"$${WORKFLOW}\" ]; then \\\n        export TEST_FLAG=\" --release -- -q --test-threads=1\"; \\\n    else  \\\n        export LOG_FILE=\"/tmp/test_crossfire.log\"; \\\n        export TEST_FLAG=\" --release -- --nocapture --test-threads=1\"; \\\n    fi; \\\n\tRUST_BACKTRACE=full cargo ${NIGHTLY} test -p crossfire-test ${ARGS} $${FEATURE_FLAG} $${TEST_FLAG};  \\\n}\n\nRUN_BENCH = _run_bench() { \\\n\tcd test-suite; \\\n\tcargo bench --bench ${ARGS}; \\\n}\n\nINSTALL_GITHOOKS = _install_githooks() {                \\\n    git config core.hooksPath ./git-hooks;              \\\n}\n\n.PHONY: git-hooks\ngit-hooks:\n\t@$(INSTALL_GITHOOKS); _install_githooks\n\n.PHONY: init\ninit: git-hooks\n\n.PHONY: fmt\nfmt: init\n\tcargo fmt\n\n.PHONY: doc\ndoc:\n\tRUSTDOCFLAGS=\"--cfg docsrs\" cargo +nightly doc --all-features\n\n# usage:\n#  make test\n#  make test test_async\n.PHONY: test\ntest: init\n\t@echo \"Run test\"\n\t@${RUN_TEST_CASE}; FEATURE_FLAG=\"-F tokio,time\"; _run_test_case\n\t@echo \"Done\"\n\n# test with ringfile for deadlog\n.PHONY: test_log\ntest_log: init\n\t@echo \"Run test\"\n\t@${RUN_TEST_CASE}; FEATURE_FLAG=\"-F tokio,time,trace_log\"; _run_test_case\n\t@echo \"Done\"\n\n.PHONY: test_async_std\ntest_async_std: init\n\t@echo \"Run test\"\n\t@${RUN_TEST_CASE}; FEATURE_FLAG=\"-F async_std,time,\"; _run_test_case\n\t@echo \"Done\"\n\n.PHONY: test_log_async_std\ntest_log_async_std: init\n\t@echo \"Run test\"\n\t@${RUN_TEST_CASE}; FEATURE_FLAG=\"-F async_std,time,trace_log\"; _run_test_case\n\t@echo \"Done\"\n\n.PHONY: test_release\ntest_release:\n\t@${RUN_RELEASE_CASE}; FEATURE_FLAG=\"-F tokio,time\"; _run_test_release_case\n\n# test with ringfile for deadlog\n.PHONY: test_log_release\ntest_log_release:\n\t@${RUN_RELEASE_CASE}; FEATURE_FLAG=\"-F tokio,time,trace_log\"; _run_test_release_case\n\n.PHONY: test_async_std_release\ntest_async_std_release:\n\t@${RUN_RELEASE_CASE}; FEATURE_FLAG=\"-F async_std,time\"; _run_test_release_case\n\n# test with ringfile for deadlog\n.PHONY: test_log_async_std_release\ntest_log_async_std_release:\n\t@${RUN_RELEASE_CASE}; FEATURE_FLAG=\"-F async_std,time,trace_log\"; _run_test_release_case\n\n.PHONY: test_smol\ntest_smol:\n\t@${RUN_TEST_CASE}; FEATURE_FLAG=\"-F smol,time\"; _run_test_case\n\n# test with ringfile for deadlog\n.PHONY: test_log_smol\ntest_log_smol:\n\t@${RUN_TEST_CASE}; FEATURE_FLAG=\"-F smol,time,trace_log\"; _run_test_case\n\n.PHONY: test_smol_release\ntest_smol_release:\n\t@${RUN_RELEASE_CASE}; FEATURE_FLAG=\"-F smol,time\"; _run_test_release_case\n\n# test with ringfile for deadlog\n.PHONY: test_log_smol_release\ntest_log_smol_release:\n\t@${RUN_RELEASE_CASE}; FEATURE_FLAG=\"-F smol,trace_log,time\"; _run_test_release_case\n\n.PHONY: test_compio\ntest_compio:\n\t@${RUN_TEST_CASE}; FEATURE_FLAG=\"-F compio\"; _run_test_case\n\n# test with ringfile for deadlog\n.PHONY: test_log_compio\ntest_log_compio:\n\t@${RUN_TEST_CASE}; FEATURE_FLAG=\"-F compio,trace_log\"; _run_test_case\n\n.PHONY: test_compio_release\ntest_compio_release:\n\t@${RUN_RELEASE_CASE}; FEATURE_FLAG=\"-F compio\"; _run_test_release_case\n\n# test with ringfile for deadlog\n.PHONY: test_log_compio_release\ntest_log_compio_release:\n\t@${RUN_RELEASE_CASE}; FEATURE_FLAG=\"-F compio,trace_log\"; _run_test_release_case\n\n.PHONY: test_compio_dispatcher\ntest_compio_dispatcher:\n\t@${RUN_TEST_CASE}; FEATURE_FLAG=\"-F compio_dispatcher\"; _run_test_case\n\n# test with ringfile for deadlog\n.PHONY: test_log_compio_dispatcher\ntest_log_compio_dispatcher:\n\t@${RUN_TEST_CASE}; FEATURE_FLAG=\"-F compio_dispatcher,trace_log\"; _run_test_case\n\n.PHONY: test_compio_dispatcher_release\ntest_compio_dispatcher_release:\n\t@${RUN_RELEASE_CASE}; FEATURE_FLAG=\"-F compio_dispatcher\"; _run_test_release_case\n\n# test with ringfile for deadlog\n.PHONY: test_log_compio_dispatcher_release\ntest_log_compio_dispatcher_release:\n\t@${RUN_RELEASE_CASE}; FEATURE_FLAG=\"-F compio_dispatcher,trace_log\"; _run_test_release_case\n\n# Usage: make bench crossfire bounded_100_async_1_1\n.PHONY: bench\nbench:\n\t@${RUN_BENCH}; _run_bench\n\n.PHONY: test_leak\ntest_leak: test_internal\n\t@${RUN_TEST_CASE}; NIGHTLY=\"+nightly\" RUSTFLAGS=\"-Zsanitizer=leak\"; _run_test_case\n\n.PHONY: test_internal\ntest_internal:\n\tRUSTFLAGS=\"-Zsanitizer=leak\" cargo +nightly test -F trace_log --lib -- --nocapture ${ARGS}\n\n.PHONY: build\nbuild: init\n\tcargo build\n\n.DEFAULT_GOAL = build\n\n# Target name % means that it is a rule that matches anything, @: is a recipe;\n# the : means do nothing\n%:\n\t@:\n"
  },
  {
    "path": "README.md",
    "content": "# Crossfire\n\n[![Build Status](https://github.com/frostyplanet/crossfire-rs/workflows/Rust/badge.svg)](\nhttps://github.com/frostyplanet/crossfire-rs/actions)\n[![License](https://img.shields.io/badge/license-MIT%20OR%20Apache--2.0-blue.svg)](\nhttps://github.com/qignstor/crossfire-rs#license)\n[![Cargo](https://img.shields.io/crates/v/crossfire.svg)](\nhttps://crates.io/crates/crossfire)\n[![Documentation](https://docs.rs/crossfire/badge.svg)](\nhttps://docs.rs/crossfire)\n[![Rust 1.36+](https://img.shields.io/badge/rust-1.36+-lightgray.svg)](\nhttps://www.rust-lang.org)\n\nHigh-performance lockless spsc/mpsc/mpmc channels, algorithm derives crossbeam with improvements.\n\nIt supports async contexts and bridges the gap between async and blocking contexts.\n\nFor the concept, please refer to the [wiki](https://github.com/frostyplanet/crossfire-rs/wiki).\n\n## Version history\n\n* v1.0: Used in production since 2022.12.\n\n* v2.0: [2025.6] Refactored the codebase and API\nby removing generic types from the ChannelShared type, which made it easier to code with.\n\n* v2.1: [2025.9] Removed the dependency on crossbeam-channel\nand implemented with [a modified version of crossbeam-queue](https://github.com/frostyplanet/crossfire-rs/wiki/crossbeam-related),\nbrings 2x performance improvements for both async and blocking contexts.\n\n* v3.0: [2026.1] Refactored API back to generic flavor interface, added [select](https://docs.rs/crossfire/latest/crossfire/select/index.html).\nDedicated optimization: Bounded SPSC +70%, MPSC +30%, one-size +20%.\nEliminate enum dispatch cost, async performance improved for another 33%.\nCheckout [compat](https://docs.rs/crossfire/latest/crossfire/compat/index.html) for migration from v2.x.\n\n\n## Performance\n\nBeing a lockless channel, crossfire outperforms other async-capable channels.\nAnd thanks to a lighter notification mechanism, in a blocking context, most cases are even\nbetter than the original crossbeam-channel,\n\n<img src=\"https://github.com/frostyplanet/crossfire-rs/wiki/images/benchmark-3.0.0-2026-01-18/mpsc_size_100_sync.png\" alt=\"mpsc bounded size 100 blocking context\">\n\n<img src=\"https://github.com/frostyplanet/crossfire-rs/wiki/images/benchmark-3.0.0-2026-01-18/mpmc_size_100_sync.png\" alt=\"mpmc bounded size 100 blocking context\">\n\n<img src=\"https://github.com/frostyplanet/crossfire-rs/wiki/images/benchmark-3.0.0-2026-01-18/mpsc_size_100_tokio.png\" alt=\"mpsc bounded size 100 async context\">\n\n<img src=\"https://github.com/frostyplanet/crossfire-rs/wiki/images/benchmark-3.0.0-2026-01-18/mpmc_size_100_tokio.png\" alt=\"mpmc bounded size 100 async context\">\n\nMore benchmark data is posted on [wiki](https://github.com/frostyplanet/crossfire-rs/wiki/benchmark-v3.0.0-2026%E2%80%9001%E2%80%9018).\n\nAlso, being a lockless channel, the algorithm relies on spinning and yielding. Spinning is good on\nmulti-core systems, but not friendly to single-core systems (like virtual machines).\nSo we provide a function `detect_backoff_cfg()` to detect the running platform.\nCalling it within the initialization section of your code, will get a 2x performance boost on\nVPS.\n\nThe benchmark is written in the criterion framework. You can run the benchmark by:\n\n``` shell\nmake bench crossfire\nmake bench crossfire_select\n```\n\n## APIs\n\n### Concurrency Modules\n\n- [spsc](https://docs.rs/crossfire/latest/crossfire/spsc/index.html), [mpsc](https://docs.rs/crossfire/latest/crossfire/mpsc/index.html), [mpmc](https://docs.rs/crossfire/latest/crossfire/mpmc/index.html). Each has different underlying implementation\noptimized to its concurrent model.\nThe SP or SC interface is only for non-concurrent operation. It's more memory-efficient in waker registration,\nand has atomic ops cost reduced in the lockless algorithm.\n\n- [oneshot](https://docs.rs/crossfire/latest/crossfire/oneshot/index.html) has its special sender/receiver type because using `Tx` / `Rx` will be too heavy.\n\n- [select](https://docs.rs/crossfire/latest/crossfire/select/index.html):\n    - [Select<'a>](https://docs.rs/crossfire/latest/crossfire/select/struct.Select.html): crossbeam-channel style type erased API, borrows receiver address and select with \"token\"\n    - [Multiplex](https://docs.rs/crossfire/latest/crossfire/select/struct.Multiplex.html): Multiplex stream that owns multiple receiver, select from the same type of\n    channel flavors, for the same type of message.\n\n- [waitgroup](https://docs.rs/crossfire/latest/crossfire/waitgroup/index.html) High performance WaitGroup that allows custom threshold.\n\n### Flavors\n\nThe following lockless queues are expose in [flavor](https://docs.rs/crossfire/latest/crossfire/flavor/index.html) module, and each one have type alias in spsc/mpsc/mpmc:\n\n- `List` (which use crossbeam `SegQueue`)\n- `Array` (which is an enum that wraps crossbeam `ArrayQueue`, and a `One` if init with size<=1)\n  - For a bounded channel, a 0 size case is not supported yet. (rewrite as 1 size).\n  - The implementation for spsc & mpsc is simplified from mpmc version.\n- `One` (which derives from `ArrayQueue` algorithm, but have better performance in size=1\nscenario, because it have two slots to allow reader and writer works concurrently)\n- `Null` (See the doc [null](https://docs.rs/crossfire/latest/crossfire/null/index.html)), for cancellation purpose channel, that only wakeup on\nclosing.\n\n**NOTE** :\nAlthough the name `Array`, `List` are the same between spsc/mpsc/mpmc module,\nthey are different type alias local to its parent module. We suggest distinguish by\nnamespace when import for use.\n\n### Channel builder function\n\nAside from function `bounded_*`, `unbounded_*` which specify the sender / receiver type,\neach module has [build()](https://docs.rs/crossfire/latest/crossfire/mpmc/fn.build.html) and [new()](https://docs.rs/crossfire/latest/crossfire/mpmc/fn.new.html) function, which can apply to any channel flavors, and any async/blocking combinations.\n\n### Types\n\n<table align=\"center\" cellpadding=\"30\">\n<tr> <th rowspan=\"2\"> Context </th><th colspan=\"2\" align=\"center\"> Sender (Producer) </th> <th colspan=\"2\" align=\"center\"> Receiver (Consumer) </th> </tr>\n<tr> <td> Single </td> <td> Multiple </td><td> Single </td><td> Multiple </td></tr>\n<tr><td align=\"center\" rowspan=\"2\"> <b>Blocking</b> </td>\n<td colspan=\"2\" align=\"center\"> BlockingTxTrait </td>\n<td colspan=\"2\" align=\"center\"> BlockingRxTrait </td></tr>\n<tr>\n<td align=\"center\">Tx </td>\n<td align=\"center\">MTx</td>\n<td align=\"center\">Rx</td>\n<td align=\"center\">MRx</td>\n </tr>\n\n<tr><td><b>Weak reference</b></td><td></td><td><a>WeakTx</a></td></tr>\n\n<tr><td align=\"center\" rowspan=\"2\"><b>Async</b></td>\n<td colspan=\"2\" align=\"center\">AsyncTxTrait</td>\n<td colspan=\"2\" align=\"center\">AsyncRxTrait</td></tr>\n<tr>\n<td>AsyncTx</td>\n<td>MAsyncTx</td>\n<td>AsyncRx</td>\n<td>MAsyncRx</td></tr>\n\n</table>\n\n*Safety*: For the SP / SC version, `AsyncTx`, `AsyncRx`, `Tx`, and `Rx` are not `Clone` and without `Sync`.\nAlthough can be moved to other threads, but not allowed to use send/recv while in an Arc. (Refer to the compile_fail\nexamples in the type document).\n\nThe benefit of using the SP / SC API is completely lockless waker registration, in exchange for a performance boost.\n\nThe sender/receiver can use the **`From`** trait to convert between blocking and async context\ncounterparts (refer to the [example](#example) below)\n\n### Error types\n\nError types are the same as crossbeam-channel:\n\n`TrySendError`, `SendError`, `SendTimeoutError`, `TryRecvError`, `RecvError`, `RecvTimeoutError`\n\n### Async compatibility\n\nTested on tokio-1.x and async-std-1.x, crossfire is runtime-agnostic.\n\nThe following scenarios are considered:\n\n* The `AsyncTx::send()` and `AsyncRx::recv()` operations are **cancellation-safe** in an async context.\nYou can safely use the select! macro and timeout() function in tokio/futures in combination with recv().\n On cancellation, `SendFuture` and `RecvFuture` will trigger drop(), which will clean up the state of the waker,\nmaking sure there is no memory-leak and deadlock.\nBut you cannot know the true result from SendFuture, since it's dropped\nupon cancellation. Thus, we suggest using `AsyncTx::send_timeout()` instead.\n\n* When the \"tokio\" or \"async_std\" feature is enabled, we also provide two additional functions:\n\n- `AsyncTx::send_timeout()`, which will return the message that failed to be sent in\n`SendTimeoutError`. We guarantee the result is atomic. Alternatively, you can use\n`AsyncTx::send_with_timer()`.\n\n- `AsyncRx::recv_timeout()`, we guarantee the result is atomic.\nAlternatively, you can use `AsyncRx::recv_with_timer()`.\n\n* The waker footprint:\n\nWhen using a multi-producer and multi-consumer scenario, there's a small memory overhead to pass along a `Weak`\nreference of wakers.\nBecause we aim to be lockless, when the sending/receiving futures are canceled (like tokio::time::timeout()),\nit might trigger an immediate cleanup if the try-lock is successful, otherwise will rely on lazy cleanup.\n(This won't be an issue because weak wakers will be consumed by actual message send and recv).\nOn an idle-select scenario, like a notification for close, the waker will be reused as much as possible\nif poll() returns pending.\n\n* Handle written future:\n\nThe future object created by `AsyncTx::send()`, `AsyncTx::send_timeout()`, `AsyncRx::recv()`,\n`AsyncRx::recv_timeout()` is `Sized`. You don't need to put them in `Box`.\n\nIf you like to use poll function directly for complex behavior, you can call\n`AsyncSink::poll_send()` or `AsyncStream::poll_item()` with Context.\n\n## Usage\n\nCargo.toml:\n```toml\n[dependencies]\ncrossfire = \"3.1\"\n```\n\n### Feature flags\n\n* `compat`: Enable the compat model, which has the same API namespace struct as V2.x\n\n* `tokio`: Enable `send_timeout()`, `recv_timeout()` with tokio sleep function. (conflict\nwith `async_std` feature)\n\n* `async_std`: Enable send_timeout, recv_timeout with async-std sleep function. (conflict\nwith `tokio` feature)\n\n* `trace_log`: Development mode, to enable internal log while testing or benchmark, to debug deadlock issues.\n\n### Example\n\nblocking / async sender receiver mixed together\n\n```rust\n\nextern crate crossfire;\nuse crossfire::*;\n#[macro_use]\nextern crate tokio;\nuse tokio::time::{sleep, interval, Duration};\n\n#[tokio::main]\nasync fn main() {\n    let (tx, rx) = mpmc::bounded_async::<usize>(100);\n    let mut recv_counter = 0;\n    let mut co_tx = Vec::new();\n    let mut co_rx = Vec::new();\n    const ROUND: usize = 1000;\n\n    let _tx: MTx<mpmc::Array<usize>> = tx.clone().into_blocking();\n    co_tx.push(tokio::task::spawn_blocking(move || {\n        for i in 0..ROUND {\n            _tx.send(i).expect(\"send ok\");\n        }\n    }));\n    co_tx.push(tokio::spawn(async move {\n        for i in 0..ROUND {\n            tx.send(i).await.expect(\"send ok\");\n        }\n    }));\n    let _rx: MRx<mpmc::Array<usize>> = rx.clone().into_blocking();\n    co_rx.push(tokio::task::spawn_blocking(move || {\n        let mut count: usize = 0;\n        'A: loop {\n            match _rx.recv() {\n                Ok(_i) => {\n                    count += 1;\n                }\n                Err(_) => break 'A,\n            }\n        }\n        count\n    }));\n    co_rx.push(tokio::spawn(async move {\n        let mut count: usize = 0;\n        'A: loop {\n            match rx.recv().await {\n                Ok(_i) => {\n                    count += 1;\n                }\n                Err(_) => break 'A,\n            }\n        }\n        count\n    }));\n    for th in co_tx {\n        let _ = th.await.unwrap();\n    }\n    for th in co_rx {\n        recv_counter += th.await.unwrap();\n    }\n    assert_eq!(recv_counter, ROUND * 2);\n}\n```\n\n## Test status\n\n**NOTE**: Because we has push the speed to a level no one has gone before,\nit can put a pure pressure to the async runtime.\nSome hidden bug (especially atomic ops on weaker ordering platform) might occur:\n\nThe test is placed in test-suite directory, run with:\n\n```\nmake test\n```\n\n<table cellpadding=\"30\">\n<tr><th>arch</th><th>runtime</th><th>workflow</th><th>status</th></tr>\n<tr>\n<td align=\"center\" rowspan=\"5\">x86_64</td>\n<td>threaded</td>\n<td><a href=\"https://github.com/frostyplanet/crossfire-rs/actions/workflows/cron_master_threaded_x86.yml\">cron_master_threaded_x86</a> </td>\n<td>STABLE</td>\n</tr>\n<tr><td>tokio 1.47.1</td>\n<td><a href=\"https://github.com/frostyplanet/crossfire-rs/actions/workflows/cron_master_tokio_x86.yml\">cron_master_tokio_x86</a></td>\n<td>STABLE<br/>\n</td>\n</tr>\n<tr><td>async-std</td>\n<td><a href=\"https://github.com/frostyplanet/crossfire-rs/actions/workflows/cron_master_async_std_x86.yml\">cron_master_async_std_x86</a></td>\n<td>STABLE</td>\n</tr>\n<tr><td>smol</td>\n<td><a href=\"https://github.com/frostyplanet/crossfire-rs/actions/workflows/cron_master_smol_x86.yml\">cron_master_smol-x86</a></td>\n<td>STABLE</td>\n<tr><td>compio</td>\n<td><a href=\"https://github.com/frostyplanet/crossfire-rs/actions/workflows/cron_master_compio_x86.yml\">cron_master_compio-x86</a></td>\n<td>verifying</td>\n</tr>\n<tr><td align=\"center\" rowspan=\"5\">arm</td>\n<td>threaded</td>\n<td>\n<a href=\"https://github.com/frostyplanet/crossfire-rs/actions/workflows/cron_master_threaded_arm.yml\">cron_master_threaded_arm</a><br/>\n</td>\n<td>STABLE</td>\n</tr>\n<tr>\n<td>tokio >= 1.48 (<a href=\"https://github.com/tokio-rs/tokio/pull/7622\">tokio PR #7622</a>)\n</td>\n<td>\n<a href=\"https://github.com/frostyplanet/crossfire-rs/actions/workflows/cron_master_tokio_arm.yml\">cron_master_tokio_arm</a><br/>\n</td>\n<td> SHOULD UPGRADE tokio to 1.48<br/>\nSTABLE\n </td>\n</tr>\n<tr>\n<td>async-std</td>\n<td><a href=\"https://github.com/frostyplanet/crossfire-rs/actions/workflows/cron_master_async_std_arm.yml\">cron_master_async_std_arm</a></td>\n<td>STABLE</td>\n</tr>\n<tr>\n<td>smol</td>\n<td><a href=\"https://github.com/frostyplanet/crossfire-rs/actions/workflows/cron_master_smol_arm.yml\">cron_master_smol_arm</a> </td>\n<td>STABLE</td>\n</tr>\n<tr>\n<td>compio</td>\n<td><a href=\"https://github.com/frostyplanet/crossfire-rs/actions/workflows/cron_master_compio_arm.yml\">cron_master_compio_arm</a> </td>\n<td>verifying</td>\n</tr>\n<tr>\n<td rowspan=\"4\">miri (emulation)</td>\n<td>threaded</td>\n<td rowspan=\"2\"><a href=\"https://github.com/frostyplanet/crossfire-rs/actions/workflows/miri_tokio.yml\">miri_tokio</a><br />\n<a href=\"https://github.com/frostyplanet/crossfire-rs/actions/workflows/miri_tokio_cur.yml\">miri_tokio_cur</a>\n</td>\n<td>STABLE</td>\n</tr>\n<tr><td>tokio</td><td>STABLE</td>\n</tr>\n<tr><td>async-std</td><td>-</td> <td> (timerfd_create) not supported by miri </td>\n</tr>\n<tr><td>smol</td><td>-</td> <td> (timerfd_create) not supported by miri </td>\n</tr>\n\n</table>\n\n### Debugging deadlock issue\n\n**Debug locally**:\n\nUse `--features trace_log` to run the bench or test until it hangs, then press `ctrl+c` or send `SIGINT`,  there will be latest log dump to /tmp/crossfire_ring.log (refer to tests/common.rs `_setup_log()`)\n\n**Debug with github workflow**:  https://github.com/frostyplanet/crossfire-rs/issues/37\n"
  },
  {
    "path": "benches/inner.rs",
    "content": "use criterion::*;\nuse crossbeam_queue::{ArrayQueue, SegQueue};\nuse crossbeam_utils::Backoff;\nuse crossfire::collections::*;\nuse parking_lot::Mutex;\nuse std::cell::UnsafeCell;\nuse std::collections::VecDeque;\nuse std::sync::{\n    atomic::{AtomicBool, AtomicUsize, Ordering},\n    Arc, Weak,\n};\nuse std::thread;\nuse std::time::Duration;\n\nconst ONE_MILLION: usize = 1000000;\n\nstruct Foo {\n    _inner: usize,\n}\n\npub struct LockedQueue<T> {\n    empty: AtomicBool,\n    queue: Mutex<VecDeque<T>>,\n}\n\nimpl<T> LockedQueue<T> {\n    #[inline]\n    pub fn new(cap: usize) -> Self {\n        Self { empty: AtomicBool::new(true), queue: Mutex::new(VecDeque::with_capacity(cap)) }\n    }\n\n    #[inline(always)]\n    pub fn push(&self, msg: T) {\n        let mut guard = self.queue.lock();\n        if guard.is_empty() {\n            self.empty.store(false, Ordering::Release);\n        }\n        guard.push_back(msg);\n    }\n\n    #[inline(always)]\n    pub fn pop(&self) -> Option<T> {\n        if self.empty.load(Ordering::Acquire) {\n            return None;\n        }\n        let mut guard = self.queue.lock();\n        if let Some(item) = guard.pop_front() {\n            if guard.len() == 0 {\n                self.empty.store(true, Ordering::Release);\n            }\n            Some(item)\n        } else {\n            None\n        }\n    }\n\n    #[inline(always)]\n    pub fn len(&self) -> usize {\n        let guard = self.queue.lock();\n        guard.len()\n    }\n\n    #[allow(dead_code)]\n    #[inline(always)]\n    pub fn exists(&self) -> bool {\n        !self.empty.load(Ordering::Acquire)\n    }\n}\n\npub struct SpinQueue<T> {\n    lock: AtomicBool,\n    queue: UnsafeCell<VecDeque<T>>,\n}\n\nunsafe impl<T> Send for SpinQueue<T> {}\nunsafe impl<T> Sync for SpinQueue<T> {}\n\nimpl<T> SpinQueue<T> {\n    fn new(cap: usize) -> Self {\n        Self { lock: AtomicBool::new(false), queue: UnsafeCell::new(VecDeque::with_capacity(cap)) }\n    }\n\n    #[inline(always)]\n    fn get_queue(&self) -> &mut VecDeque<T> {\n        unsafe { std::mem::transmute(self.queue.get()) }\n    }\n\n    #[inline]\n    fn push(&self, msg: T) {\n        let backoff = Backoff::new();\n        while self.lock.swap(true, Ordering::SeqCst) {\n            backoff.spin();\n        }\n        self.get_queue().push_back(msg);\n        self.lock.store(false, Ordering::Release);\n    }\n\n    #[inline]\n    fn pop(&self) -> Option<T> {\n        let backoff = Backoff::new();\n        while self.lock.swap(true, Ordering::SeqCst) {\n            backoff.spin();\n        }\n        let r = self.get_queue().pop_front();\n        self.lock.store(false, Ordering::Release);\n        r\n    }\n}\n\nfn _bench_spin_queue(count: usize) {\n    let queue = Arc::new(SpinQueue::<Weak<Foo>>::new(10));\n    let mut th_s = Vec::new();\n    let counter = Arc::new(AtomicUsize::new(0));\n    for _ in 0..count {\n        let _queue = queue.clone();\n        let _counter = counter.clone();\n        th_s.push(thread::spawn(move || loop {\n            let i = _counter.fetch_add(1, Ordering::SeqCst);\n            if i < ONE_MILLION {\n                if let Some(weak) = _queue.pop() {\n                    let _ = weak.upgrade();\n                }\n            } else {\n                break;\n            }\n        }));\n    }\n    th_s.push(thread::spawn(move || {\n        for _ in 0..ONE_MILLION {\n            let foo = Arc::new(Foo { _inner: 1 });\n            queue.push(Arc::downgrade(&foo));\n        }\n    }));\n    for th in th_s {\n        let _ = th.join();\n    }\n}\n\nfn _bench_locked_queue(count: usize) {\n    let queue = Arc::new(LockedQueue::<Weak<Foo>>::new(10));\n    let mut th_s = Vec::new();\n    let counter = Arc::new(AtomicUsize::new(0));\n    for _ in 0..count {\n        let _queue = queue.clone();\n        let _counter = counter.clone();\n        th_s.push(thread::spawn(move || loop {\n            let i = _counter.fetch_add(1, Ordering::SeqCst);\n            if i < ONE_MILLION {\n                if let Some(weak) = _queue.pop() {\n                    let _ = weak.upgrade();\n                }\n            } else {\n                break;\n            }\n        }));\n    }\n    th_s.push(thread::spawn(move || {\n        for _ in 0..ONE_MILLION {\n            let foo = Arc::new(Foo { _inner: 1 });\n            queue.push(Arc::downgrade(&foo));\n        }\n    }));\n    for th in th_s {\n        let _ = th.join();\n    }\n}\n\nfn _bench_array_queue(count: usize) {\n    let queue = Arc::new(ArrayQueue::<Weak<Foo>>::new(1));\n    let mut th_s = Vec::new();\n    let counter = Arc::new(AtomicUsize::new(0));\n    for _ in 0..count {\n        let _queue = queue.clone();\n        let _counter = counter.clone();\n        th_s.push(thread::spawn(move || loop {\n            let i = _counter.fetch_add(1, Ordering::SeqCst);\n            if i < ONE_MILLION {\n                if let Some(weak) = _queue.pop() {\n                    let _ = weak.upgrade();\n                }\n            } else {\n                break;\n            }\n        }));\n    }\n    th_s.push(thread::spawn(move || {\n        for _ in 0..ONE_MILLION {\n            let foo = Arc::new(Foo { _inner: 1 });\n            queue.force_push(Arc::downgrade(&foo));\n        }\n    }));\n    for th in th_s {\n        let _ = th.join();\n    }\n}\n\nfn _bench_seg_queue(count: usize) {\n    let queue = Arc::new(SegQueue::<Weak<Foo>>::new());\n    let mut th_s = Vec::new();\n    let counter = Arc::new(AtomicUsize::new(0));\n    for _ in 0..count {\n        let _queue = queue.clone();\n        let _counter = counter.clone();\n        th_s.push(thread::spawn(move || loop {\n            let i = _counter.fetch_add(1, Ordering::SeqCst);\n            if i < ONE_MILLION {\n                if let Some(weak) = _queue.pop() {\n                    let _ = weak.upgrade();\n                }\n            } else {\n                break;\n            }\n        }));\n    }\n    th_s.push(thread::spawn(move || {\n        for _ in 0..ONE_MILLION {\n            let foo = Arc::new(Foo { _inner: 1 });\n            queue.push(Arc::downgrade(&foo));\n        }\n    }));\n    for th in th_s {\n        let _ = th.join();\n    }\n}\n\nfn _bench_weak_cell(count: usize) {\n    let cell = Arc::new(WeakCell::<Foo>::new());\n    let mut th_s = Vec::new();\n    let counter = Arc::new(AtomicUsize::new(0));\n    for _ in 0..count {\n        let _cell = cell.clone();\n        let _counter = counter.clone();\n        th_s.push(thread::spawn(move || loop {\n            let i = _counter.fetch_add(1, Ordering::SeqCst);\n            if i < ONE_MILLION {\n                let _ = _cell.pop();\n            } else {\n                break;\n            }\n        }));\n    }\n    th_s.push(thread::spawn(move || {\n        for _ in 0..ONE_MILLION {\n            let foo = Arc::new(Foo { _inner: 1 });\n            cell.put(Arc::downgrade(&foo));\n        }\n    }));\n    for th in th_s {\n        let _ = th.join();\n    }\n}\n\nfn _bench_empty(c: &mut Criterion) {\n    let mut group = c.benchmark_group(\"empty\");\n    group.significance_level(0.1).sample_size(50);\n    group.measurement_time(Duration::from_secs(10));\n    group.throughput(Throughput::Elements(ONE_MILLION as u64));\n    group.bench_function(\"weak_cell\", |b| {\n        b.iter(|| {\n            let cell = WeakCell::<Foo>::new();\n            for _ in 0..ONE_MILLION {\n                let _ = cell.pop();\n            }\n        })\n    });\n    group.measurement_time(Duration::from_secs(10));\n    group.throughput(Throughput::Elements(ONE_MILLION as u64));\n    group.bench_function(\"spin VecDeque\", |b| {\n        b.iter(|| {\n            let queue = SpinQueue::<Foo>::new(10);\n            for _ in 0..ONE_MILLION {\n                let _ = queue.pop();\n            }\n        })\n    });\n    group.measurement_time(Duration::from_secs(10));\n    group.throughput(Throughput::Elements(ONE_MILLION as u64));\n    group.bench_function(\"locked VecDeque\", |b| {\n        b.iter(|| {\n            let queue = LockedQueue::<Foo>::new(10);\n            for _ in 0..ONE_MILLION {\n                let _ = queue.pop();\n            }\n        })\n    });\n\n    group.measurement_time(Duration::from_secs(10));\n    group.throughput(Throughput::Elements(ONE_MILLION as u64));\n    group.bench_function(\"array_queue\", |b| {\n        b.iter(|| {\n            let queue = ArrayQueue::<Foo>::new(1);\n            for _ in 0..ONE_MILLION {\n                let _ = queue.pop();\n            }\n        })\n    });\n    group.measurement_time(Duration::from_secs(10));\n    group.throughput(Throughput::Elements(ONE_MILLION as u64));\n    group.bench_function(\"seg_queue\", |b| {\n        b.iter(|| {\n            let queue = SegQueue::<Foo>::new();\n            for _ in 0..ONE_MILLION {\n                let _ = queue.pop();\n            }\n        })\n    });\n}\n\nfn _bench_sequence(c: &mut Criterion) {\n    let mut group = c.benchmark_group(\"sequence\");\n    group.significance_level(0.1).sample_size(50);\n    group.measurement_time(Duration::from_secs(10));\n    group.throughput(Throughput::Elements(ONE_MILLION as u64));\n    group.bench_function(\"weak_cell\", |b| {\n        b.iter(|| {\n            let cell = WeakCell::<Foo>::new();\n            for _ in 0..ONE_MILLION {\n                let foo = Arc::new(Foo { _inner: 1 });\n                cell.put(Arc::downgrade(&foo));\n                let _ = cell.pop();\n            }\n        })\n    });\n    group.measurement_time(Duration::from_secs(10));\n    group.throughput(Throughput::Elements(ONE_MILLION as u64));\n    group.bench_function(\"spin VecDeque\", |b| {\n        b.iter(|| {\n            let queue = SpinQueue::new(10);\n            for _ in 0..ONE_MILLION {\n                let foo = Arc::new(Foo { _inner: 1 });\n                let _ = queue.push(Arc::downgrade(&foo));\n                if let Some(w) = queue.pop() {\n                    let _ = w.upgrade();\n                }\n            }\n        })\n    });\n    group.measurement_time(Duration::from_secs(10));\n    group.throughput(Throughput::Elements(ONE_MILLION as u64));\n    group.bench_function(\"locked VecDeque\", |b| {\n        b.iter(|| {\n            let queue = LockedQueue::new(10);\n            for _ in 0..ONE_MILLION {\n                let foo = Arc::new(Foo { _inner: 1 });\n                let _ = queue.push(Arc::downgrade(&foo));\n                if let Some(w) = queue.pop() {\n                    let _ = w.upgrade();\n                }\n            }\n        })\n    });\n\n    group.measurement_time(Duration::from_secs(10));\n    group.throughput(Throughput::Elements(ONE_MILLION as u64));\n    group.bench_function(\"array_queue\", |b| {\n        b.iter(|| {\n            let queue = ArrayQueue::<Weak<Foo>>::new(1);\n            for _ in 0..ONE_MILLION {\n                let foo = Arc::new(Foo { _inner: 1 });\n                let _ = queue.push(Arc::downgrade(&foo));\n                if let Some(w) = queue.pop() {\n                    let _ = w.upgrade();\n                }\n            }\n        })\n    });\n    group.measurement_time(Duration::from_secs(10));\n    group.throughput(Throughput::Elements(ONE_MILLION as u64));\n    group.bench_function(\"seg_queue\", |b| {\n        b.iter(|| {\n            let queue = SegQueue::<Weak<Foo>>::new();\n            for _ in 0..ONE_MILLION {\n                let foo = Arc::new(Foo { _inner: 1 });\n                let _ = queue.push(Arc::downgrade(&foo));\n                if let Some(w) = queue.pop() {\n                    let _ = w.upgrade();\n                }\n            }\n        })\n    });\n}\n\nfn _bench_threads(c: &mut Criterion) {\n    let mut group = c.benchmark_group(\"threads\");\n    group.significance_level(0.1).sample_size(50);\n    group.measurement_time(Duration::from_secs(10));\n\n    for input in n_1() {\n        group.throughput(Throughput::Elements(ONE_MILLION as u64));\n        group.bench_with_input(BenchmarkId::new(\"weak_cell\", input), &input, |b, i| {\n            b.iter(|| _bench_weak_cell(*i))\n        });\n        group.throughput(Throughput::Elements(ONE_MILLION as u64));\n        group.bench_with_input(BenchmarkId::new(\"spin VecDeque\", input), &input, |b, i| {\n            b.iter(|| _bench_spin_queue(*i))\n        });\n        group.throughput(Throughput::Elements(ONE_MILLION as u64));\n        group.bench_with_input(BenchmarkId::new(\"locked VecDeque\", input), &input, |b, i| {\n            b.iter(|| _bench_locked_queue(*i))\n        });\n        group.throughput(Throughput::Elements(ONE_MILLION as u64));\n        group.bench_with_input(BenchmarkId::new(\"array_queue\", input), &input, |b, i| {\n            b.iter(|| _bench_array_queue(*i))\n        });\n        group.throughput(Throughput::Elements(ONE_MILLION as u64));\n        group.bench_with_input(BenchmarkId::new(\"seg_queue\", input), &input, |b, i| {\n            b.iter(|| _bench_seg_queue(*i))\n        });\n    }\n}\n\ncriterion_group!(benches, _bench_empty, _bench_sequence, _bench_threads);\ncriterion_main!(benches);\n"
  },
  {
    "path": "git-hooks/pre-commit",
    "content": "#!/bin/bash\n\nmake fmt || exit 1\n\n# re add the files since changed by fmt\nfiles=$(git diff --cached --name-only --diff-filter=ACM | grep '.rs$')\nfor f in $files; do\n\techo \"git add $f\"\n\tgit add $f\ndone\nexit 0\n"
  },
  {
    "path": "rustfmt.toml",
    "content": "edition = \"2021\"\nfn_params_layout = \"Compressed\"\nnewline_style = \"Unix\"\nuse_small_heuristics = \"Max\"\nmax_width = 100\nuse_field_init_shorthand = true\n"
  },
  {
    "path": "src/async_rx.rs",
    "content": "use crate::flavor::{FlavorMC, FlavorSelect};\nuse crate::select::SelectResult;\nuse crate::stream::AsyncStream;\n#[cfg(feature = \"trace_log\")]\nuse crate::tokio_task_id;\nuse crate::{shared::*, trace_log, MRx, NotCloneable, ReceiverType, Rx};\nuse std::cell::Cell;\nuse std::fmt;\nuse std::future::Future;\nuse std::marker::PhantomData;\nuse std::ops::Deref;\nuse std::pin::Pin;\nuse std::sync::Arc;\nuse std::task::{Context, Poll};\n\n/// A single consumer (receiver) that works in an async context.\n///\n/// Additional methods in [ChannelShared] can be accessed through `Deref`.\n///\n/// `AsyncRx` can be converted into `Rx` via the `From` trait,\n/// which means you can have two types of receivers, both within async and\n/// blocking contexts, for the same channel.\n///\n/// **NOTE**: `AsyncRx` is not `Clone` or `Sync`.\n/// If you need concurrent access, use [MAsyncRx] instead.\n///\n/// `AsyncRx` has a `Send` marker and can be moved to other coroutines.\n/// The following code is OK:\n///\n/// ``` rust\n/// use crossfire::*;\n/// async fn foo() {\n///     let (tx, rx) = mpsc::bounded_async::<usize>(100);\n///     tokio::spawn(async move {\n///         let _ = rx.recv().await;\n///     });\n///     drop(tx);\n/// }\n/// ```\n///\n/// Because `AsyncRx` does not have a `Sync` marker, using `Arc<AsyncRx>` will lose the `Send` marker.\n///\n/// For your safety, the following code **should not compile**:\n///\n/// ``` compile_fail\n/// use crossfire::*;\n/// use std::sync::Arc;\n/// async fn foo() {\n///     let (tx, rx) = mpsc::bounded_async::<usize>(100);\n///     let rx = Arc::new(rx);\n///     tokio::spawn(async move {\n///         let _ = rx.recv().await;\n///     });\n///     drop(tx);\n/// }\n/// ```\npub struct AsyncRx<F: Flavor> {\n    pub(crate) shared: Arc<ChannelShared<F>>,\n    // Remove the Sync marker to prevent being put in Arc\n    _phan: PhantomData<Cell<()>>,\n}\n\nunsafe impl<F: Flavor> Send for AsyncRx<F> {}\n\nimpl<F: Flavor> fmt::Debug for AsyncRx<F> {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        write!(f, \"AsyncRx{:p}\", self)\n    }\n}\n\nimpl<F: Flavor> fmt::Display for AsyncRx<F> {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        write!(f, \"AsyncRx{:p}\", self)\n    }\n}\n\nimpl<F: Flavor> Drop for AsyncRx<F> {\n    #[inline(always)]\n    fn drop(&mut self) {\n        self.shared.close_rx();\n    }\n}\n\nimpl<F: Flavor> From<Rx<F>> for AsyncRx<F> {\n    fn from(value: Rx<F>) -> Self {\n        value.add_rx();\n        Self::new(value.shared.clone())\n    }\n}\n\nimpl<F: Flavor> AsyncRx<F> {\n    #[inline]\n    pub(crate) fn new(shared: Arc<ChannelShared<F>>) -> Self {\n        Self { shared, _phan: Default::default() }\n    }\n\n    /// Return true if the other side has closed\n    #[inline(always)]\n    pub fn is_disconnected(&self) -> bool {\n        self.shared.is_tx_closed()\n    }\n\n    #[inline]\n    pub fn into_stream(self) -> AsyncStream<F> {\n        AsyncStream::new(self)\n    }\n\n    #[inline]\n    pub fn into_blocking(self) -> Rx<F> {\n        self.into()\n    }\n}\n\nimpl<F: Flavor> AsyncRx<F> {\n    /// Receives a message from the channel. This method will await until a message is received or the channel is closed.\n    ///\n    /// This function is cancellation-safe, so it's safe to use with `timeout()` and the `select!` macro.\n    /// When a [RecvFuture] is dropped, no message will be received from the channel.\n    ///\n    /// For timeout scenarios, there's an alternative: [AsyncRx::recv_timeout()].\n    ///\n    /// Returns `Ok(T)` on success.\n    ///\n    /// Returns Err([RecvError]) if the sender has been dropped.\n    #[inline(always)]\n    pub fn recv<'a>(&'a self) -> RecvFuture<'a, F> {\n        RecvFuture { rx: self, waker: None }\n    }\n\n    // NOTE: we cannot use async fn recv_timeout signature because &self is not Send\n    /// Receives a message from the channel with a timeout.\n    /// Will await when channel is empty.\n    ///\n    /// The behavior is atomic: the message is either received successfully or the operation is canceled due to a timeout.\n    ///\n    /// Returns `Ok(T)` when successful.\n    ///\n    /// Returns Err([RecvTimeoutError::Timeout]) when a message could not be received because the channel is empty and the operation timed out.\n    ///\n    /// Returns Err([RecvTimeoutError::Disconnected]) if the sender has been dropped and the channel is empty.\n    #[cfg(feature = \"tokio\")]\n    #[cfg_attr(docsrs, doc(cfg(feature = \"tokio\")))]\n    #[inline]\n    pub fn recv_timeout(\n        &self, duration: std::time::Duration,\n    ) -> RecvTimeoutFuture<'_, F, tokio::time::Sleep, ()> {\n        let sleep = tokio::time::sleep(duration);\n        self.recv_with_timer(sleep)\n    }\n    #[cfg(feature = \"async_std\")]\n    #[cfg_attr(docsrs, doc(cfg(feature = \"async_std\")))]\n    #[inline]\n    pub fn recv_timeout(\n        &self, duration: std::time::Duration,\n    ) -> RecvTimeoutFuture<'_, F, impl Future<Output = ()>, ()> {\n        let sleep = async_std::task::sleep(duration);\n        self.recv_with_timer(sleep)\n    }\n\n    /// Receives a message from the channel with a custom timer function (from other async runtime).\n    ///\n    /// The behavior is atomic: the message is either received successfully or the operation is canceled due to a timeout.\n    ///\n    /// Returns `Ok(T)` when successful.\n    ///\n    /// Returns Err([RecvTimeoutError::Timeout]) when a message could not be received because the channel is empty and the operation timed out.\n    ///\n    /// Returns Err([RecvTimeoutError::Disconnected]) if the sender has been dropped and the channel is empty.\n    ///\n    /// # Argument:\n    ///\n    /// * `sleep`: The sleep function.\n    ///   The return value of `sleep` is ignore. We add generic `R` just in order to support smol::Timer\n    ///\n    /// # Example:\n    ///\n    /// with smol timer\n    ///\n    /// ```rust\n    /// extern crate smol;\n    /// use std::time::Duration;\n    /// use crossfire::*;\n    /// async fn foo() {\n    ///     let (tx, rx) = mpmc::bounded_async::<usize>(10);\n    ///     match rx.recv_with_timer(smol::Timer::after(Duration::from_secs(1))).await {\n    ///         Ok(_item)=>{\n    ///             println!(\"message recv\");\n    ///         }\n    ///         Err(RecvTimeoutError::Timeout)=>{\n    ///             println!(\"timeout\");\n    ///         }\n    ///         Err(RecvTimeoutError::Disconnected)=>{\n    ///             println!(\"sender-side closed\");\n    ///         }\n    ///     }\n    /// }\n    /// ```\n    #[inline]\n    pub fn recv_with_timer<'a, FR, R>(&'a self, sleep: FR) -> RecvTimeoutFuture<'a, F, FR, R>\n    where\n        FR: Future<Output = R>,\n    {\n        RecvTimeoutFuture { rx: self, waker: None, sleep }\n    }\n\n    /// Attempts to receive a message from the channel without blocking.\n    ///\n    /// Returns `Ok(T)` on successful.\n    ///\n    /// Returns Err([TryRecvError::Empty]) if the channel is empty.\n    ///\n    /// Returns Err([TryRecvError::Disconnected]) if the sender has been dropped and the channel is empty.\n    #[inline(always)]\n    pub fn try_recv(&self) -> Result<F::Item, TryRecvError> {\n        self.shared.try_recv()\n    }\n\n    /// This method use with [select](crate::select::Select), guarantee non-blocking\n    ///\n    /// # Panics\n    ///\n    /// Panics if SelectResult from other receiver is passed.\n    #[inline(always)]\n    pub fn read_select(&self, result: SelectResult) -> Result<F::Item, RecvError>\n    where\n        F: FlavorSelect,\n    {\n        assert_eq!(\n            self as *const Self as *const u8, result.channel,\n            \"invalid use select with another channel\"\n        );\n        self.as_ref().read_with_token(result.token)\n    }\n\n    /// Internal function might change in the future. For public version, use AsyncStream::poll_item() instead\n    ///\n    /// Returns `Ok(T)` on successful.\n    ///\n    /// Return Err([TryRecvError::Empty]) for Poll::Pending case.\n    ///\n    /// Return Err([TryRecvError::Disconnected]) when all Tx dropped and channel is empty.\n    #[inline(always)]\n    pub(crate) fn poll_item<const STREAM: bool>(\n        &self, ctx: &mut Context, o_waker: &mut Option<<F::Recv as Registry>::Waker>,\n    ) -> Result<F::Item, TryRecvError> {\n        let shared = &self.shared;\n        // When the result is not TryRecvError::Empty,\n        // make sure always take the o_waker out and abandon,\n        // to skip the timeout cleaning logic in Drop.\n        macro_rules! on_recv_no_waker {\n            () => {{\n                trace_log!(\"rx{:?}: recv\", tokio_task_id!());\n            }};\n        }\n        macro_rules! on_recv_waker {\n            ($state: expr) => {{\n                trace_log!(\"rx{:?}: recv {:?} {:?}\", tokio_task_id!(), o_waker, $state);\n                shared.recvs.cancel_waker(o_waker);\n            }};\n        }\n        macro_rules! try_recv {\n            ($recv_func: ident => $waker_handle: block) => {\n                if let Some(item) = shared.inner.$recv_func() {\n                    shared.on_recv();\n                    $waker_handle\n                    return Ok(item);\n                }\n            };\n        }\n        loop {\n            if o_waker.is_none() {\n                try_recv!(try_recv=>{ on_recv_no_waker!()});\n                // First call\n                if let Some(mut backoff) = shared.get_async_backoff() {\n                    loop {\n                        let complete = backoff.spin();\n                        try_recv!(try_recv=>{ on_recv_no_waker!()});\n                        if complete {\n                            break;\n                        }\n                    }\n                }\n            } else {\n                try_recv!(try_recv => {on_recv_waker!(WakerState::Woken)});\n            }\n            if shared.recvs.reg_waker_async(ctx, o_waker).is_some() {\n                break;\n            }\n            // NOTE: The other side put something while reg_send and did not see the waker,\n            // should check the channel again, otherwise might incur a dead lock.\n            // NOTE: special API before we park\n            // because Miri is not happy about ArrayQueue pop ordering, which is not SeqCst\n            try_recv!(try_recv_final =>{ on_recv_waker!(WakerState::Init)});\n            if !STREAM {\n                let state = shared.recvs.commit_waiting(o_waker);\n                trace_log!(\"rx{:?}: commit_waiting {:?} {}\", tokio_task_id!(), o_waker, state);\n                if state == WakerState::Woken as u8 {\n                    continue;\n                }\n            }\n            break;\n        }\n        if shared.is_tx_closed() {\n            try_recv!(try_recv =>{ on_recv_waker!(WakerState::Closed)});\n            trace_log!(\"rx{:?}: disconnected {:?}\", tokio_task_id!(), o_waker);\n            Err(TryRecvError::Disconnected)\n        } else {\n            Err(TryRecvError::Empty)\n        }\n    }\n}\n\n/// A fixed-sized future object constructed by [AsyncRx::recv()]\n#[must_use]\npub struct RecvFuture<'a, F: Flavor> {\n    rx: &'a AsyncRx<F>,\n    waker: Option<<F::Recv as Registry>::Waker>,\n}\n\nunsafe impl<F: Flavor> Send for RecvFuture<'_, F> {}\n\nimpl<F: Flavor> Drop for RecvFuture<'_, F> {\n    #[inline]\n    fn drop(&mut self) {\n        if let Some(waker) = self.waker.as_ref() {\n            self.rx.shared.abandon_recv_waker(waker);\n        }\n    }\n}\n\nimpl<F: Flavor> Future for RecvFuture<'_, F> {\n    type Output = Result<F::Item, RecvError>;\n\n    #[inline]\n    fn poll(self: Pin<&mut Self>, ctx: &mut Context) -> Poll<Self::Output> {\n        let mut _self = self.get_mut();\n        match _self.rx.poll_item::<false>(ctx, &mut _self.waker) {\n            Err(e) => {\n                if !e.is_empty() {\n                    let _ = _self.waker.take();\n                    Poll::Ready(Err(RecvError {}))\n                } else {\n                    Poll::Pending\n                }\n            }\n            Ok(item) => {\n                debug_assert!(_self.waker.is_none());\n                Poll::Ready(Ok(item))\n            }\n        }\n    }\n}\n\n/// A fixed-sized future object constructed by [AsyncRx::recv_timeout()]\n#[must_use]\npub struct RecvTimeoutFuture<'a, F, FR, R>\nwhere\n    F: Flavor,\n    FR: Future<Output = R>,\n{\n    rx: &'a AsyncRx<F>,\n    waker: Option<<F::Recv as Registry>::Waker>,\n    sleep: FR,\n}\n\nunsafe impl<F, FR, R> Send for RecvTimeoutFuture<'_, F, FR, R>\nwhere\n    F: Flavor,\n    FR: Future<Output = R>,\n{\n}\n\nimpl<F, FR, R> Drop for RecvTimeoutFuture<'_, F, FR, R>\nwhere\n    F: Flavor,\n    FR: Future<Output = R>,\n{\n    #[inline]\n    fn drop(&mut self) {\n        if let Some(waker) = self.waker.as_ref() {\n            self.rx.shared.abandon_recv_waker(waker);\n        }\n    }\n}\n\nimpl<F, FR, R> Future for RecvTimeoutFuture<'_, F, FR, R>\nwhere\n    F: Flavor,\n    FR: Future<Output = R>,\n{\n    type Output = Result<F::Item, RecvTimeoutError>;\n\n    #[inline]\n    fn poll(self: Pin<&mut Self>, ctx: &mut Context) -> Poll<Self::Output> {\n        // NOTE: we can use unchecked to bypass pin because we are not movig \"sleep\",\n        // neither it's exposed outside\n        let mut _self = unsafe { self.get_unchecked_mut() };\n        match _self.rx.poll_item::<false>(ctx, &mut _self.waker) {\n            Err(TryRecvError::Empty) => {\n                if unsafe { Pin::new_unchecked(&mut _self.sleep) }.poll(ctx).is_ready() {\n                    return Poll::Ready(Err(RecvTimeoutError::Timeout));\n                }\n                Poll::Pending\n            }\n            Err(TryRecvError::Disconnected) => Poll::Ready(Err(RecvTimeoutError::Disconnected)),\n            Ok(item) => Poll::Ready(Ok(item)),\n        }\n    }\n}\n\n/// For writing generic code with MAsyncRx & AsyncRx\npub trait AsyncRxTrait<T>: Send + 'static + fmt::Debug + fmt::Display {\n    /// Receive message, will await when channel is empty.\n    ///\n    /// Returns `Ok(T)` when successful.\n    ///\n    /// returns Err([RecvError]) when all Tx dropped.\n    fn recv(&self) -> impl Future<Output = Result<T, RecvError>> + Send;\n\n    /// Waits for a message to be received from the channel, but only for a limited time.\n    /// Will await when channel is empty.\n    ///\n    /// The behavior is atomic, either successfully polls a message,\n    /// or operation cancelled due to timeout.\n    ///\n    /// Returns Ok(T) when successful.\n    ///\n    /// Returns Err([RecvTimeoutError::Timeout]) when a message could not be received because the channel is empty and the operation timed out.\n    ///\n    /// returns Err([RecvTimeoutError::Disconnected]) when all Tx dropped and channel is empty.\n    #[cfg(any(feature = \"tokio\", feature = \"async_std\"))]\n    #[cfg_attr(docsrs, doc(cfg(any(feature = \"tokio\", feature = \"async_std\"))))]\n    fn recv_timeout(\n        &self, timeout: std::time::Duration,\n    ) -> impl Future<Output = Result<T, RecvTimeoutError>> + Send;\n\n    /// Receives a message from the channel with a custom timer function (from other async runtime).\n    ///\n    /// The behavior is atomic: the message is either received successfully or the operation is canceled due to a timeout.\n    ///\n    /// Returns `Ok(T)` when successful.\n    ///\n    /// Returns Err([RecvTimeoutError::Timeout]) when a message could not be received because the channel is empty and the operation timed out.\n    ///\n    /// Returns Err([RecvTimeoutError::Disconnected]) if the sender has been dropped and the channel is empty.\n    ///\n    /// # Argument:\n    ///\n    /// * `fut`: The sleep function. It's possible to wrap this function with cancelable handle,\n    ///   you can control when to stop polling. the return value of `fut` is ignore.\n    ///   We add generic `R` just in order to support smol::Timer.\n    fn recv_with_timer<FR, R>(\n        &self, fut: FR,\n    ) -> impl Future<Output = Result<T, RecvTimeoutError>> + Send\n    where\n        FR: Future<Output = R>;\n\n    /// Try to receive message, non-blocking.\n    ///\n    /// Returns Ok(T) when successful.\n    ///\n    /// Returns Err([TryRecvError::Empty]) when channel is empty.\n    ///\n    /// Returns Err([TryRecvError::Disconnected]) when all Tx dropped and channel is empty.\n    fn try_recv(&self) -> Result<T, TryRecvError>;\n\n    /// The number of messages in the channel at the moment\n    fn len(&self) -> usize;\n\n    /// The capacity of the channel, return None for unbounded channel.\n    fn capacity(&self) -> Option<usize>;\n\n    /// Whether channel is empty at the moment\n    fn is_empty(&self) -> bool;\n\n    /// Whether the channel is full at the moment\n    fn is_full(&self) -> bool;\n\n    /// Return true if the other side has closed\n    fn is_disconnected(&self) -> bool;\n\n    /// Return the number of senders\n    fn get_tx_count(&self) -> usize;\n\n    /// Return the number of receivers\n    fn get_rx_count(&self) -> usize;\n\n    fn clone_to_vec(self, count: usize) -> Vec<Self>\n    where\n        Self: Sized;\n\n    fn to_stream(self) -> Pin<Box<dyn futures_core::stream::Stream<Item = T>>>;\n\n    fn get_wakers_count(&self) -> (usize, usize);\n}\n\nimpl<F: Flavor> AsyncRxTrait<F::Item> for AsyncRx<F> {\n    #[inline(always)]\n    fn clone_to_vec(self, _count: usize) -> Vec<Self> {\n        assert_eq!(_count, 1);\n        vec![self]\n    }\n\n    #[inline(always)]\n    fn recv(&self) -> impl Future<Output = Result<F::Item, RecvError>> + Send {\n        AsyncRx::recv(self)\n    }\n\n    #[cfg(any(feature = \"tokio\", feature = \"async_std\"))]\n    #[cfg_attr(docsrs, doc(cfg(any(feature = \"tokio\", feature = \"async_std\"))))]\n    #[inline(always)]\n    fn recv_timeout(\n        &self, duration: std::time::Duration,\n    ) -> impl Future<Output = Result<F::Item, RecvTimeoutError>> + Send {\n        AsyncRx::recv_timeout(self, duration)\n    }\n\n    #[inline(always)]\n    fn recv_with_timer<FR, R>(\n        &self, sleep: FR,\n    ) -> impl Future<Output = Result<F::Item, RecvTimeoutError>> + Send\n    where\n        FR: Future<Output = R>,\n    {\n        AsyncRx::recv_with_timer(self, sleep)\n    }\n\n    #[inline(always)]\n    fn try_recv(&self) -> Result<F::Item, TryRecvError> {\n        AsyncRx::<F>::try_recv(self)\n    }\n\n    /// The number of messages in the channel at the moment\n    #[inline(always)]\n    fn len(&self) -> usize {\n        self.as_ref().len()\n    }\n\n    /// The capacity of the channel, return None for unbounded channel.\n    #[inline(always)]\n    fn capacity(&self) -> Option<usize> {\n        self.as_ref().capacity()\n    }\n\n    /// Whether channel is empty at the moment\n    #[inline(always)]\n    fn is_empty(&self) -> bool {\n        self.as_ref().is_empty()\n    }\n\n    /// Whether the channel is full at the moment\n    #[inline(always)]\n    fn is_full(&self) -> bool {\n        self.as_ref().is_full()\n    }\n\n    /// Return true if the other side has closed\n    #[inline(always)]\n    fn is_disconnected(&self) -> bool {\n        self.as_ref().get_tx_count() == 0\n    }\n\n    #[inline(always)]\n    fn get_tx_count(&self) -> usize {\n        self.as_ref().get_tx_count()\n    }\n\n    #[inline(always)]\n    fn get_rx_count(&self) -> usize {\n        self.as_ref().get_rx_count()\n    }\n\n    #[inline(always)]\n    fn to_stream(self) -> Pin<Box<dyn futures_core::stream::Stream<Item = F::Item>>> {\n        Box::pin(self.into_stream())\n    }\n\n    fn get_wakers_count(&self) -> (usize, usize) {\n        self.as_ref().get_wakers_count()\n    }\n}\n\n/// A multi-consumer (receiver) that works in an async context.\n///\n/// Inherits from [`AsyncRx<F>`] and implements `Clone`.\n/// Additional methods in [ChannelShared] can be accessed through `Deref`.\n///\n/// You can use `into()` to convert it to `AsyncRx<F>`.\n///\n/// `MAsyncRx` can be converted into `MRx` via the `From` trait,\n/// which means you can have two types of receivers, both within async and\n/// blocking contexts, for the same channel.\npub struct MAsyncRx<F: Flavor>(pub(crate) AsyncRx<F>);\n\nimpl<F: Flavor> fmt::Debug for MAsyncRx<F> {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        write!(f, \"MAsyncRx{:p}\", self)\n    }\n}\n\nimpl<F: Flavor> fmt::Display for MAsyncRx<F> {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        write!(f, \"MAsyncRx{:p}\", self)\n    }\n}\n\nunsafe impl<F: Flavor> Sync for MAsyncRx<F> {}\n\nimpl<F: Flavor> Clone for MAsyncRx<F> {\n    #[inline]\n    fn clone(&self) -> Self {\n        let inner = &self.0;\n        inner.shared.add_rx();\n        Self(AsyncRx::new(inner.shared.clone()))\n    }\n}\n\nimpl<F: Flavor> From<MAsyncRx<F>> for AsyncRx<F> {\n    fn from(rx: MAsyncRx<F>) -> Self {\n        rx.0\n    }\n}\n\nimpl<F: Flavor + FlavorMC> MAsyncRx<F> {\n    #[inline]\n    pub(crate) fn new(shared: Arc<ChannelShared<F>>) -> Self {\n        Self(AsyncRx::new(shared))\n    }\n}\n\nimpl<F: Flavor> MAsyncRx<F> {\n    #[inline]\n    pub fn into_stream(self) -> AsyncStream<F> {\n        AsyncStream::new(self.0)\n    }\n\n    #[inline]\n    pub fn into_blocking(self) -> MRx<F> {\n        self.into()\n    }\n}\n\nimpl<F: Flavor> Deref for MAsyncRx<F> {\n    type Target = AsyncRx<F>;\n\n    /// inherit all the functions of [AsyncRx]\n    #[inline(always)]\n    fn deref(&self) -> &Self::Target {\n        &self.0\n    }\n}\n\nimpl<F: Flavor> From<MRx<F>> for MAsyncRx<F> {\n    fn from(value: MRx<F>) -> Self {\n        value.add_rx();\n        Self(AsyncRx::new(value.shared.clone()))\n    }\n}\n\nimpl<F: Flavor + FlavorMC> AsyncRxTrait<F::Item> for MAsyncRx<F> {\n    #[inline(always)]\n    fn clone_to_vec(self, count: usize) -> Vec<Self> {\n        let mut v = Vec::with_capacity(count);\n        for _ in 0..count - 1 {\n            v.push(self.clone());\n        }\n        v.push(self);\n        v\n    }\n\n    #[inline(always)]\n    fn try_recv(&self) -> Result<F::Item, TryRecvError> {\n        self.0.try_recv()\n    }\n\n    #[inline(always)]\n    fn recv(&self) -> impl Future<Output = Result<F::Item, RecvError>> + Send {\n        self.0.recv()\n    }\n\n    #[cfg(any(feature = \"tokio\", feature = \"async_std\"))]\n    #[cfg_attr(docsrs, doc(cfg(any(feature = \"tokio\", feature = \"async_std\"))))]\n    #[inline(always)]\n    fn recv_timeout(\n        &self, duration: std::time::Duration,\n    ) -> impl Future<Output = Result<F::Item, RecvTimeoutError>> + Send {\n        self.0.recv_timeout(duration)\n    }\n\n    #[inline(always)]\n    fn recv_with_timer<FR, R>(\n        &self, fut: FR,\n    ) -> impl Future<Output = Result<F::Item, RecvTimeoutError>>\n    where\n        FR: Future<Output = R>,\n    {\n        self.0.recv_with_timer(fut)\n    }\n\n    /// The number of messages in the channel at the moment\n    #[inline(always)]\n    fn len(&self) -> usize {\n        self.as_ref().len()\n    }\n\n    /// The capacity of the channel, return None for unbounded channel.\n    #[inline(always)]\n    fn capacity(&self) -> Option<usize> {\n        self.as_ref().capacity()\n    }\n\n    /// Whether channel is empty at the moment\n    #[inline(always)]\n    fn is_empty(&self) -> bool {\n        self.as_ref().is_empty()\n    }\n\n    /// Whether the channel is full at the moment\n    #[inline(always)]\n    fn is_full(&self) -> bool {\n        self.as_ref().is_full()\n    }\n\n    /// Return true if the other side has closed\n    #[inline(always)]\n    fn is_disconnected(&self) -> bool {\n        self.as_ref().get_tx_count() == 0\n    }\n\n    #[inline(always)]\n    fn get_tx_count(&self) -> usize {\n        self.as_ref().get_tx_count()\n    }\n\n    #[inline(always)]\n    fn get_rx_count(&self) -> usize {\n        self.as_ref().get_rx_count()\n    }\n\n    #[inline(always)]\n    fn to_stream(self) -> Pin<Box<dyn futures_core::stream::Stream<Item = F::Item>>> {\n        Box::pin(self.into_stream())\n    }\n\n    fn get_wakers_count(&self) -> (usize, usize) {\n        self.as_ref().get_wakers_count()\n    }\n}\n\nimpl<F: Flavor> Deref for AsyncRx<F> {\n    type Target = ChannelShared<F>;\n    #[inline(always)]\n    fn deref(&self) -> &ChannelShared<F> {\n        &self.shared\n    }\n}\n\nimpl<F: Flavor> AsRef<ChannelShared<F>> for AsyncRx<F> {\n    #[inline(always)]\n    fn as_ref(&self) -> &ChannelShared<F> {\n        &self.shared\n    }\n}\n\nimpl<F: Flavor> AsRef<ChannelShared<F>> for MAsyncRx<F> {\n    #[inline(always)]\n    fn as_ref(&self) -> &ChannelShared<F> {\n        &self.0.shared\n    }\n}\n\nimpl<T, F: Flavor<Item = T>> ReceiverType for AsyncRx<F> {\n    type Flavor = F;\n    #[inline(always)]\n    fn new(shared: Arc<ChannelShared<F>>) -> Self {\n        AsyncRx::new(shared)\n    }\n}\n\nimpl<F: Flavor> NotCloneable for AsyncRx<F> {}\n\nimpl<T, F: Flavor<Item = T> + FlavorMC> ReceiverType for MAsyncRx<F> {\n    type Flavor = F;\n    #[inline(always)]\n    fn new(shared: Arc<ChannelShared<F>>) -> Self {\n        MAsyncRx::new(shared)\n    }\n}\n"
  },
  {
    "path": "src/async_tx.rs",
    "content": "use crate::flavor::FlavorMP;\nuse crate::sink::AsyncSink;\n#[cfg(feature = \"trace_log\")]\nuse crate::tokio_task_id;\nuse crate::weak::WeakTx;\nuse crate::{shared::*, trace_log, MTx, NotCloneable, SenderType, Tx};\nuse std::cell::Cell;\nuse std::fmt;\nuse std::future::Future;\nuse std::marker::PhantomData;\nuse std::mem::{needs_drop, MaybeUninit};\nuse std::ops::Deref;\nuse std::pin::Pin;\nuse std::sync::Arc;\nuse std::task::{Context, Poll};\n\n/// A single producer (sender) that works in an async context.\n///\n/// Additional methods in [ChannelShared] can be accessed through `Deref`.\n///\n/// `AsyncTx` can be converted into `Tx` via the `From` trait.\n/// This means you can have two types of senders, both within async and blocking contexts, for the same channel.\n///\n/// **NOTE**: `AsyncTx` is not `Clone` or `Sync`.\n/// If you need concurrent access, use [MAsyncTx] instead.\n///\n/// `AsyncTx` has a `Send` marker and can be moved to other coroutines.\n/// The following code is OK:\n///\n/// ``` rust\n/// use crossfire::*;\n/// async fn foo() {\n///     let (tx, rx) = spsc::bounded_async::<usize>(100);\n///     tokio::spawn(async move {\n///          let _ = tx.send(2).await;\n///     });\n///     drop(rx);\n/// }\n/// ```\n///\n/// Because `AsyncTx` does not have a `Sync` marker, using `Arc<AsyncTx>` will lose the `Send` marker.\n///\n/// For your safety, the following code **should not compile**:\n///\n/// ``` compile_fail\n/// use crossfire::*;\n/// use std::sync::Arc;\n/// async fn foo() {\n///     let (tx, rx) = spsc::bounded_async::<usize>(100);\n///     let tx = Arc::new(tx);\n///     tokio::spawn(async move {\n///          let _ = tx.send(2).await;\n///     });\n///     drop(rx);\n/// }\n/// ```\npub struct AsyncTx<F: Flavor> {\n    pub(crate) shared: Arc<ChannelShared<F>>,\n    // Remove the Sync marker to prevent being put in Arc\n    _phan: PhantomData<Cell<()>>,\n}\n\nimpl<F: Flavor> fmt::Debug for AsyncTx<F> {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        write!(f, \"AsyncTx{:p}\", self)\n    }\n}\n\nimpl<F: Flavor> fmt::Display for AsyncTx<F> {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        write!(f, \"AsyncTx{:p}\", self)\n    }\n}\n\nunsafe impl<F: Flavor> Send for AsyncTx<F> {}\n\nimpl<F: Flavor> Drop for AsyncTx<F> {\n    #[inline(always)]\n    fn drop(&mut self) {\n        self.shared.close_tx();\n    }\n}\n\nimpl<F: Flavor> From<Tx<F>> for AsyncTx<F> {\n    fn from(value: Tx<F>) -> Self {\n        value.add_tx();\n        Self::new(value.shared.clone())\n    }\n}\n\nimpl<F: Flavor> AsyncTx<F> {\n    #[inline]\n    pub(crate) fn new(shared: Arc<ChannelShared<F>>) -> Self {\n        Self { shared, _phan: Default::default() }\n    }\n\n    #[inline]\n    pub fn into_sink(self) -> AsyncSink<F> {\n        AsyncSink::new(self)\n    }\n\n    #[inline]\n    pub fn into_blocking(self) -> Tx<F> {\n        self.into()\n    }\n\n    /// Return true if the other side has closed\n    #[inline(always)]\n    pub fn is_disconnected(&self) -> bool {\n        self.shared.is_rx_closed()\n    }\n}\n\nimpl<F: Flavor> AsyncTx<F> {\n    /// Sends a message. This method will await until the message is sent or the channel is closed.\n    ///\n    /// This function is cancellation-safe, so it's safe to use with `timeout()` and the `select!` macro.\n    /// When a [SendFuture] is dropped, no message will be sent. However, the original message\n    /// cannot be returned due to API limitations. For timeout scenarios, we recommend using\n    /// [AsyncTx::send_timeout()], which returns the message in a [SendTimeoutError].\n    ///\n    /// Returns `Ok(())` on success.\n    ///\n    /// Returns Err([SendError]) if the receiver has been dropped.\n    #[inline(always)]\n    pub fn send<'a>(&'a self, item: F::Item) -> SendFuture<'a, F> {\n        SendFuture { tx: self, item: MaybeUninit::new(item), waker: None }\n    }\n\n    /// Attempts to send a message without blocking.\n    ///\n    /// Returns `Ok(())` when successful.\n    ///\n    /// Returns Err([TrySendError::Full]) if the channel is full.\n    ///\n    /// Returns Err([TrySendError::Disconnected]) if the receiver has been dropped.\n    #[inline]\n    pub fn try_send(&self, item: F::Item) -> Result<(), TrySendError<F::Item>> {\n        if self.shared.is_rx_closed() {\n            return Err(TrySendError::Disconnected(item));\n        }\n        let _item = MaybeUninit::new(item);\n        if self.shared.inner.try_send(&_item) {\n            self.shared.on_send();\n            Ok(())\n        } else {\n            unsafe { Err(TrySendError::Full(_item.assume_init())) }\n        }\n    }\n\n    /// Sends a message with a timeout.\n    /// Will await when channel is full.\n    ///\n    /// The behavior is atomic: the message is either sent successfully or returned with error.\n    ///\n    /// Returns `Ok(())` when successful.\n    ///\n    /// Returns Err([SendTimeoutError::Timeout]) if the operation timed out. The error contains the message that failed to be sent.\n    ///\n    /// Returns Err([SendTimeoutError::Disconnected]) if the receiver has been dropped. The error contains the message that failed to be sent.\n    #[cfg(feature = \"tokio\")]\n    #[cfg_attr(docsrs, doc(cfg(feature = \"tokio\")))]\n    #[inline]\n    pub fn send_timeout(\n        &self, item: F::Item, duration: std::time::Duration,\n    ) -> SendTimeoutFuture<'_, F, tokio::time::Sleep, ()> {\n        let sleep = tokio::time::sleep(duration);\n        self.send_with_timer(item, sleep)\n    }\n    #[cfg(feature = \"async_std\")]\n    #[cfg_attr(docsrs, doc(cfg(feature = \"async_std\")))]\n    #[inline]\n    pub fn send_timeout(\n        &self, item: F::Item, duration: std::time::Duration,\n    ) -> SendTimeoutFuture<'_, F, impl Future<Output = ()>, ()> {\n        let sleep = async_std::task::sleep(duration);\n        self.send_with_timer(item, sleep)\n    }\n\n    /// Sends a message with a custom timer function (from other async runtime).\n    ///\n    /// The behavior is atomic: the message is either sent successfully or returned with error.\n    ///\n    /// Returns `Ok(())` when successful.\n    ///\n    /// Returns Err([SendTimeoutError::Timeout]) if the operation timed out. The error contains the message that failed to be sent.\n    ///\n    /// Returns Err([SendTimeoutError::Disconnected]) if the receiver has been dropped. The error contains the message that failed to be sent.\n    ///\n    /// # Argument:\n    ///\n    /// * `fut`: The sleep function. It's possible to wrap this function with cancelable handle,\n    ///   you can control when to stop polling. the return value of `fut` is ignore.\n    ///   We add generic `R` just in order to support smol::Timer.\n    ///\n    /// # Example:\n    ///\n    /// ```rust\n    /// extern crate smol;\n    /// use std::time::Duration;\n    /// use crossfire::*;\n    /// async fn foo() {\n    ///     let (tx, rx) = mpmc::bounded_async::<usize>(10);\n    ///     match tx.send_with_timer(1, smol::Timer::after(Duration::from_secs(1))).await {\n    ///         Ok(_)=>{\n    ///             println!(\"message sent\");\n    ///         }\n    ///         Err(SendTimeoutError::Timeout(_item))=>{\n    ///             println!(\"send timeout\");\n    ///         }\n    ///         Err(SendTimeoutError::Disconnected(_item))=>{\n    ///             println!(\"receiver-side closed\");\n    ///         }\n    ///     }\n    /// }\n    /// ```\n    #[inline]\n    pub fn send_with_timer<FR, R>(&self, item: F::Item, fut: FR) -> SendTimeoutFuture<'_, F, FR, R>\n    where\n        FR: Future<Output = R>,\n    {\n        SendTimeoutFuture { tx: self, item: MaybeUninit::new(item), waker: None, sleep: fut }\n    }\n\n    /// Internal function might change in the future. For public version, use AsyncSink::poll_send() instead.\n    ///\n    /// Returns `Poll::Ready(Ok(()))` on message sent.\n    ///\n    /// Returns `Poll::Pending` for Poll::Pending case.\n    ///\n    /// Returns `Poll::Ready(Err(())` when all Rx dropped.\n    #[inline(always)]\n    pub(crate) fn poll_send<'a, const SINK: bool>(\n        &self, ctx: &'a mut Context, item: &MaybeUninit<F::Item>,\n        o_waker: &'a mut Option<<F::Send as Registry>::Waker>,\n    ) -> Poll<Result<(), ()>> {\n        let shared = &self.shared;\n        if shared.is_rx_closed() {\n            trace_log!(\"tx{:?}: closed {:?}\", tokio_task_id!(), o_waker);\n            return Poll::Ready(Err(()));\n        }\n        // When the result is not TrySendError::Full,\n        // make sure always take the o_waker out and abandon,\n        // to skip the timeout cleaning logic in Drop.\n        loop {\n            if shared.inner.try_send(item) {\n                shared.on_send();\n                if let Some(_waker) = o_waker.take() {\n                    trace_log!(\"tx{:?}: send {:?}\", tokio_task_id!(), _waker);\n                } else {\n                    trace_log!(\"tx{:?}: send\", tokio_task_id!());\n                }\n                return Poll::Ready(Ok(()));\n            }\n            if o_waker.is_none() {\n                if let Some(mut backoff) = shared.get_async_backoff() {\n                    loop {\n                        backoff.spin();\n                        if shared.inner.try_send(item) {\n                            shared.on_send();\n                            trace_log!(\"tx{:?}: send\", tokio_task_id!());\n                            return Poll::Ready(Ok(()));\n                        }\n                        if backoff.is_completed() {\n                            break;\n                        }\n                    }\n                }\n            }\n            match shared.senders.reg_waker_async(ctx, o_waker) {\n                Some(Poll::Pending) => return Poll::Pending,\n                Some(Poll::Ready(())) => return Poll::Ready(Err(())),\n                _ => {}\n            }\n            let state = shared.sender_double_check::<SINK>(item, o_waker);\n            trace_log!(\"tx{:?}: sender_double_check {:?} {}\", tokio_task_id!(), o_waker, state);\n            if state < WakerState::Woken as u8 {\n                return Poll::Pending;\n            } else if state > WakerState::Woken as u8 {\n                if state == WakerState::Done as u8 {\n                    trace_log!(\"tx{:?}: send {:?} done\", o_waker, tokio_task_id!());\n                    let _ = o_waker.take();\n                    return Poll::Ready(Ok(()));\n                } else {\n                    debug_assert_eq!(state, WakerState::Closed as u8);\n                    trace_log!(\"tx{:?}: closed {:?}\", o_waker, tokio_task_id!());\n                    let _ = o_waker.take();\n                    return Poll::Ready(Err(()));\n                }\n            }\n            debug_assert_eq!(state, WakerState::Woken as u8);\n            continue;\n        }\n    }\n}\n\n/// A fixed-sized future object constructed by [AsyncTx::send()]\n#[must_use]\npub struct SendFuture<'a, F: Flavor> {\n    tx: &'a AsyncTx<F>,\n    item: MaybeUninit<F::Item>,\n    waker: Option<<F::Send as Registry>::Waker>,\n}\n\nunsafe impl<F: Flavor> Send for SendFuture<'_, F> where F::Item: Send {}\n\nimpl<F: Flavor> Drop for SendFuture<'_, F> {\n    #[inline]\n    fn drop(&mut self) {\n        // Cancelling the future, poll is not ready\n        if let Some(waker) = self.waker.as_ref() {\n            if self.tx.shared.abandon_send_waker(waker) && needs_drop::<F::Item>() {\n                unsafe { self.item.assume_init_drop() };\n            }\n        }\n    }\n}\n\nimpl<F: Flavor> Future for SendFuture<'_, F>\nwhere\n    F::Item: Unpin,\n{\n    type Output = Result<(), SendError<F::Item>>;\n\n    #[inline]\n    fn poll(self: Pin<&mut Self>, ctx: &mut Context) -> Poll<Self::Output> {\n        let mut _self = self.get_mut();\n        match _self.tx.poll_send::<false>(ctx, &_self.item, &mut _self.waker) {\n            Poll::Ready(Ok(())) => {\n                debug_assert!(_self.waker.is_none());\n                Poll::Ready(Ok(()))\n            }\n            Poll::Ready(Err(())) => {\n                let _ = _self.waker.take();\n                Poll::Ready(Err(SendError(unsafe { _self.item.assume_init_read() })))\n            }\n            Poll::Pending => Poll::Pending,\n        }\n    }\n}\n\n/// A fixed-sized future object constructed by [AsyncTx::send_timeout()]\n#[must_use]\npub struct SendTimeoutFuture<'a, F, FR, R>\nwhere\n    F: Flavor,\n    FR: Future<Output = R>,\n{\n    tx: &'a AsyncTx<F>,\n    sleep: FR,\n    item: MaybeUninit<F::Item>,\n    waker: Option<<F::Send as Registry>::Waker>,\n}\n\nunsafe impl<F, FR, R> Send for SendTimeoutFuture<'_, F, FR, R>\nwhere\n    F: Flavor,\n    FR: Future<Output = R>,\n{\n}\n\nimpl<F, FR, R> Drop for SendTimeoutFuture<'_, F, FR, R>\nwhere\n    F: Flavor,\n    FR: Future<Output = R>,\n{\n    #[inline]\n    fn drop(&mut self) {\n        if let Some(waker) = self.waker.as_ref() {\n            // Cancelling the future, poll is not ready\n            if self.tx.shared.abandon_send_waker(waker) && needs_drop::<F::Item>() {\n                unsafe { self.item.assume_init_drop() };\n            }\n        }\n    }\n}\n\nimpl<F, FR, R> Future for SendTimeoutFuture<'_, F, FR, R>\nwhere\n    F: Flavor,\n    FR: Future<Output = R>,\n    F::Item: Send + 'static + Unpin,\n{\n    type Output = Result<(), SendTimeoutError<F::Item>>;\n\n    #[inline]\n    fn poll(self: Pin<&mut Self>, ctx: &mut Context) -> Poll<Self::Output> {\n        // NOTE: we can use unchecked to bypass pin because we are not movig \"sleep\",\n        // neither it's exposed outside\n        let mut _self = unsafe { self.get_unchecked_mut() };\n        match _self.tx.poll_send::<false>(ctx, &_self.item, &mut _self.waker) {\n            Poll::Ready(Ok(())) => {\n                debug_assert!(_self.waker.is_none());\n                Poll::Ready(Ok(()))\n            }\n            Poll::Ready(Err(())) => {\n                let _ = _self.waker.take();\n                Poll::Ready(Err(SendTimeoutError::Disconnected(unsafe {\n                    _self.item.assume_init_read()\n                })))\n            }\n            Poll::Pending => {\n                let sleep = unsafe { Pin::new_unchecked(&mut _self.sleep) };\n                if sleep.poll(ctx).is_ready() {\n                    if _self.tx.shared.abandon_send_waker(&_self.waker.take().unwrap()) {\n                        return Poll::Ready(Err(SendTimeoutError::Timeout(unsafe {\n                            _self.item.assume_init_read()\n                        })));\n                    } else {\n                        // Message already sent in background (on_recv).\n                        return Poll::Ready(Ok(()));\n                    }\n                }\n                Poll::Pending\n            }\n        }\n    }\n}\n\n/// For writing generic code with MAsyncTx & AsyncTx\npub trait AsyncTxTrait<T>: Send + 'static + fmt::Debug + fmt::Display {\n    /// Try to send message, non-blocking\n    ///\n    /// Returns `Ok(())` when successful.\n    ///\n    /// Returns Err([TrySendError::Full]) on channel full for bounded channel.\n    ///\n    /// Returns Err([TrySendError::Disconnected]) when all Rx dropped.\n    fn try_send(&self, item: T) -> Result<(), TrySendError<T>>;\n\n    /// The number of messages in the channel at the moment\n    fn len(&self) -> usize;\n\n    /// The capacity of the channel, return None for unbounded channel.\n    fn capacity(&self) -> Option<usize>;\n\n    /// Whether channel is empty at the moment\n    fn is_empty(&self) -> bool;\n\n    /// Whether the channel is full at the moment\n    fn is_full(&self) -> bool;\n\n    /// Return true if the other side has closed\n    fn is_disconnected(&self) -> bool;\n\n    /// Return the number of senders\n    fn get_tx_count(&self) -> usize;\n\n    /// Return the number of receivers\n    fn get_rx_count(&self) -> usize;\n\n    fn clone_to_vec(self, count: usize) -> Vec<Self>\n    where\n        Self: Sized;\n\n    fn get_wakers_count(&self) -> (usize, usize);\n\n    /// Send message. Will await when channel is full.\n    ///\n    /// Returns `Ok(())` on successful.\n    ///\n    /// Returns Err([SendError]) when all Rx is dropped.\n    fn send(&self, item: T) -> impl Future<Output = Result<(), SendError<T>>> + Send\n    where\n        T: Send + 'static + Unpin;\n\n    /// Waits for a message to be sent into the channel, but only for a limited time.\n    /// Will await when channel is full.\n    ///\n    /// The behavior is atomic, either message sent successfully or returned on error.\n    ///\n    /// Returns `Ok(())` when successful.\n    ///\n    /// Returns Err([SendTimeoutError::Timeout]) when the operation timed out.\n    ///\n    /// Returns Err([SendTimeoutError::Disconnected]) when all Rx dropped.\n    #[cfg(any(feature = \"tokio\", feature = \"async_std\"))]\n    #[cfg_attr(docsrs, doc(cfg(any(feature = \"tokio\", feature = \"async_std\"))))]\n    fn send_timeout<'a>(\n        &'a self, item: T, duration: std::time::Duration,\n    ) -> impl Future<Output = Result<(), SendTimeoutError<T>>> + Send\n    where\n        T: Send + 'static + Unpin;\n\n    /// Sends a message with a custom timer function.\n    /// Will await when channel is full.\n    ///\n    /// The behavior is atomic: the message is either sent successfully or returned with error.\n    ///\n    /// Returns `Ok(())` when successful.\n    ///\n    /// Returns Err([SendTimeoutError::Timeout]) if the operation timed out. The error contains the message that failed to be sent.\n    ///\n    /// Returns Err([SendTimeoutError::Disconnected]) if the receiver has been dropped. The error contains the message that failed to be sent.\n    ///\n    /// # Argument:\n    ///\n    /// * `fut`: The sleep function. It's possible to wrap this function with cancelable handle,\n    ///   you can control when to stop polling. the return value of `fut` is ignore.\n    ///   We add generic `R` just in order to support smol::Timer\n    fn send_with_timer<FR, R>(\n        &self, item: T, fut: FR,\n    ) -> impl Future<Output = Result<(), SendTimeoutError<T>>> + Send\n    where\n        FR: Future<Output = R>,\n        T: Send + 'static + Unpin;\n}\n\nimpl<F: Flavor> AsyncTxTrait<F::Item> for AsyncTx<F> {\n    #[inline(always)]\n    fn clone_to_vec(self, count: usize) -> Vec<Self> {\n        assert_eq!(count, 1);\n        vec![self]\n    }\n\n    #[inline(always)]\n    fn try_send(&self, item: F::Item) -> Result<(), TrySendError<F::Item>> {\n        AsyncTx::try_send(self, item)\n    }\n\n    #[inline(always)]\n    fn send(&self, item: F::Item) -> impl Future<Output = Result<(), SendError<F::Item>>> + Send\n    where\n        F::Item: Send + 'static + Unpin,\n    {\n        AsyncTx::send(self, item)\n    }\n\n    #[cfg(any(feature = \"tokio\", feature = \"async_std\"))]\n    #[cfg_attr(docsrs, doc(cfg(any(feature = \"tokio\", feature = \"async_std\"))))]\n    #[inline(always)]\n    fn send_timeout<'a>(\n        &'a self, item: F::Item, duration: std::time::Duration,\n    ) -> impl Future<Output = Result<(), SendTimeoutError<F::Item>>> + Send\n    where\n        F::Item: Send + 'static + Unpin,\n    {\n        AsyncTx::send_timeout(self, item, duration)\n    }\n\n    #[inline(always)]\n    fn send_with_timer<FR, R>(\n        &self, item: F::Item, fut: FR,\n    ) -> impl Future<Output = Result<(), SendTimeoutError<F::Item>>> + Send\n    where\n        FR: Future<Output = R>,\n        F::Item: Send + 'static + Unpin,\n    {\n        AsyncTx::send_with_timer(self, item, fut)\n    }\n\n    /// The number of messages in the channel at the moment\n    #[inline(always)]\n    fn len(&self) -> usize {\n        self.as_ref().len()\n    }\n\n    /// The capacity of the channel, return None for unbounded channel.\n    #[inline(always)]\n    fn capacity(&self) -> Option<usize> {\n        self.as_ref().capacity()\n    }\n\n    /// Whether channel is empty at the moment\n    #[inline(always)]\n    fn is_empty(&self) -> bool {\n        self.as_ref().is_empty()\n    }\n\n    /// Whether the channel is full at the moment\n    #[inline(always)]\n    fn is_full(&self) -> bool {\n        self.as_ref().is_full()\n    }\n\n    /// Return true if the other side has closed\n    #[inline(always)]\n    fn is_disconnected(&self) -> bool {\n        self.as_ref().get_rx_count() == 0\n    }\n\n    #[inline(always)]\n    fn get_tx_count(&self) -> usize {\n        self.as_ref().get_tx_count()\n    }\n\n    #[inline(always)]\n    fn get_rx_count(&self) -> usize {\n        self.as_ref().get_rx_count()\n    }\n\n    fn get_wakers_count(&self) -> (usize, usize) {\n        self.as_ref().get_wakers_count()\n    }\n}\n\n/// A multi-producer (sender) that works in an async context.\n///\n/// Inherits from [`AsyncTx<T>`] and implements `Clone`.\n/// Additional methods in [ChannelShared] can be accessed through `Deref`.\n///\n/// You can use `into()` to convert it to `AsyncTx<T>`.\n///\n/// `MAsyncTx` can be converted into `MTx` via the `From` trait,\n/// which means you can have two types of senders, both within async and\n/// blocking contexts, for the same channel.\npub struct MAsyncTx<F: Flavor>(pub(crate) AsyncTx<F>);\n\nimpl<F: Flavor> fmt::Debug for MAsyncTx<F> {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        write!(f, \"MAsyncTx{:p}\", self)\n    }\n}\n\nimpl<F: Flavor> fmt::Display for MAsyncTx<F> {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        write!(f, \"MAsyncTx{:p}\", self)\n    }\n}\n\nunsafe impl<F: Flavor> Sync for MAsyncTx<F> {}\n\nimpl<F: Flavor> Clone for MAsyncTx<F> {\n    #[inline]\n    fn clone(&self) -> Self {\n        let inner = &self.0;\n        inner.shared.add_tx();\n        Self(AsyncTx::new(inner.shared.clone()))\n    }\n}\n\nimpl<F: Flavor> From<MAsyncTx<F>> for AsyncTx<F> {\n    fn from(tx: MAsyncTx<F>) -> Self {\n        tx.0\n    }\n}\n\nimpl<F: Flavor> MAsyncTx<F> {\n    #[inline]\n    pub(crate) fn new(shared: Arc<ChannelShared<F>>) -> Self {\n        Self(AsyncTx::new(shared))\n    }\n\n    #[inline]\n    pub fn into_sink(self) -> AsyncSink<F> {\n        AsyncSink::new(self.0)\n    }\n\n    #[inline]\n    pub fn into_blocking(self) -> MTx<F> {\n        self.into()\n    }\n\n    /// Get a weak reference of sender.\n    ///\n    /// # Example\n    /// ```\n    /// use crossfire::*;\n    /// let (tx, rx) = mpsc::bounded_async::<usize>(100);\n    /// assert_eq!(tx.get_tx_count(), 1);\n    /// let weak_tx = tx.downgrade();\n    /// let tx_clone = weak_tx.upgrade::<MAsyncTx<_>>().unwrap();\n    /// assert_eq!(tx.get_tx_count(), 2);\n    /// drop(tx);\n    /// drop(tx_clone);\n    /// assert!(weak_tx.upgrade::<MAsyncTx<_>>().is_none());\n    /// assert_eq!(weak_tx.get_tx_count(), 0);\n    /// drop(rx);\n    /// ```\n    #[inline]\n    pub fn downgrade(&self) -> WeakTx<F>\n    where\n        F: FlavorMP,\n    {\n        WeakTx(self.shared.clone())\n    }\n}\n\nimpl<F: Flavor> Deref for MAsyncTx<F> {\n    type Target = AsyncTx<F>;\n\n    /// inherit all the functions of [AsyncTx]\n    #[inline(always)]\n    fn deref(&self) -> &Self::Target {\n        &self.0\n    }\n}\n\nimpl<F: Flavor> From<MTx<F>> for MAsyncTx<F> {\n    fn from(value: MTx<F>) -> Self {\n        value.add_tx();\n        Self(AsyncTx::new(value.shared.clone()))\n    }\n}\n\nimpl<F: Flavor + FlavorMP> AsyncTxTrait<F::Item> for MAsyncTx<F> {\n    #[inline(always)]\n    fn clone_to_vec(self, count: usize) -> Vec<Self> {\n        let mut v = Vec::with_capacity(count);\n        for _ in 0..count - 1 {\n            v.push(self.clone());\n        }\n        v.push(self);\n        v\n    }\n\n    #[inline(always)]\n    fn try_send(&self, item: F::Item) -> Result<(), TrySendError<F::Item>> {\n        self.0.try_send(item)\n    }\n\n    #[inline(always)]\n    fn send(&self, item: F::Item) -> impl Future<Output = Result<(), SendError<F::Item>>> + Send\n    where\n        F::Item: Send + 'static + Unpin,\n    {\n        self.0.send(item)\n    }\n\n    #[cfg(any(feature = \"tokio\", feature = \"async_std\"))]\n    #[cfg_attr(docsrs, doc(cfg(any(feature = \"tokio\", feature = \"async_std\"))))]\n    #[inline(always)]\n    fn send_timeout<'a>(\n        &'a self, item: F::Item, duration: std::time::Duration,\n    ) -> impl Future<Output = Result<(), SendTimeoutError<F::Item>>> + Send\n    where\n        F::Item: Send + 'static + Unpin,\n    {\n        self.0.send_timeout(item, duration)\n    }\n\n    #[inline(always)]\n    fn send_with_timer<FR, R>(\n        &self, item: F::Item, fut: FR,\n    ) -> impl Future<Output = Result<(), SendTimeoutError<F::Item>>> + Send\n    where\n        FR: Future<Output = R>,\n        F::Item: Send + 'static + Unpin,\n    {\n        self.0.send_with_timer::<FR, R>(item, fut)\n    }\n\n    /// The number of messages in the channel at the moment\n    #[inline(always)]\n    fn len(&self) -> usize {\n        self.as_ref().len()\n    }\n\n    /// The capacity of the channel, return None for unbounded channel.\n    #[inline(always)]\n    fn capacity(&self) -> Option<usize> {\n        self.as_ref().capacity()\n    }\n\n    /// Whether channel is empty at the moment\n    #[inline(always)]\n    fn is_empty(&self) -> bool {\n        self.as_ref().is_empty()\n    }\n\n    /// Whether the channel is full at the moment\n    #[inline(always)]\n    fn is_full(&self) -> bool {\n        self.as_ref().is_full()\n    }\n\n    /// Return true if the other side has closed\n    #[inline(always)]\n    fn is_disconnected(&self) -> bool {\n        self.as_ref().get_rx_count() == 0\n    }\n\n    #[inline(always)]\n    fn get_tx_count(&self) -> usize {\n        self.as_ref().get_tx_count()\n    }\n\n    #[inline(always)]\n    fn get_rx_count(&self) -> usize {\n        self.as_ref().get_rx_count()\n    }\n\n    fn get_wakers_count(&self) -> (usize, usize) {\n        self.as_ref().get_wakers_count()\n    }\n}\n\nimpl<F: Flavor> Deref for AsyncTx<F> {\n    type Target = ChannelShared<F>;\n    #[inline(always)]\n    fn deref(&self) -> &ChannelShared<F> {\n        &self.shared\n    }\n}\n\nimpl<F: Flavor> AsRef<ChannelShared<F>> for AsyncTx<F> {\n    #[inline(always)]\n    fn as_ref(&self) -> &ChannelShared<F> {\n        &self.shared\n    }\n}\n\nimpl<F: Flavor> AsRef<ChannelShared<F>> for MAsyncTx<F> {\n    #[inline(always)]\n    fn as_ref(&self) -> &ChannelShared<F> {\n        &self.0.shared\n    }\n}\n\nimpl<T, F: Flavor<Item = T>> SenderType for AsyncTx<F> {\n    type Flavor = F;\n    #[inline(always)]\n    fn new(shared: Arc<ChannelShared<F>>) -> Self {\n        AsyncTx::new(shared)\n    }\n}\n\nimpl<F: Flavor> NotCloneable for AsyncTx<F> {}\n\nimpl<T, F: Flavor<Item = T> + FlavorMP> SenderType for MAsyncTx<F> {\n    type Flavor = F;\n    #[inline(always)]\n    fn new(shared: Arc<ChannelShared<F>>) -> Self {\n        MAsyncTx::new(shared)\n    }\n}\n"
  },
  {
    "path": "src/backoff.rs",
    "content": "use core::num::NonZero;\nuse std::mem::transmute;\nuse std::sync::atomic::{AtomicBool, AtomicU32, Ordering};\nuse std::thread;\n\npub const SPIN_LIMIT: u16 = 6;\n\n#[cfg(target_arch = \"x86_64\")]\npub const DEFAULT_LIMIT: u16 = 6;\n#[cfg(not(target_arch = \"x86_64\"))]\npub const DEFAULT_LIMIT: u16 = 10;\npub const MAX_LIMIT: u16 = 10;\n\npub const DEFAULT_CONFIG: u32 =\n    BackoffConfig { spin_limit: SPIN_LIMIT, limit: DEFAULT_LIMIT }.to_u32();\n\nstatic DETECT_CONFIG: AtomicU32 = AtomicU32::new(DEFAULT_CONFIG);\n\nstatic _INIT: AtomicBool = AtomicBool::new(false);\n\n/// Detect cpu number and auto setting backoff config.\n///\n/// On one core system, it will be more effective (as much as 2x faster) to use yield than spinning.\n///\n/// The function need to be invoke manually in your initialization code, which does not interrupt\n/// channel operation on other thread. By saving the result to global atomic, the effect will apply after execution.\n///\n/// The result we choose not to include this in default channel initialization code, because\n/// Cpu detection process is somehow slow for benchmark standard,\n/// and `thread::available_parallelism()` might require I/O on system files, you may not\n/// like it in sandbox scenario.\npub fn detect_backoff_cfg() {\n    if _INIT.compare_exchange(false, true, Ordering::Relaxed, Ordering::Relaxed).is_err() {\n        return;\n    }\n    if thread::available_parallelism().unwrap_or(NonZero::new(1).unwrap())\n        == NonZero::new(1).unwrap()\n    {\n        // For one core (like VM machine), better use yield_now instead of spin_loop.\n        DETECT_CONFIG.store(\n            BackoffConfig { spin_limit: 0, limit: DEFAULT_LIMIT }.to_u32(),\n            Ordering::Release,\n        );\n    }\n}\n\n#[derive(Debug, Clone, Copy)]\n#[repr(C)]\npub struct BackoffConfig {\n    pub spin_limit: u16,\n    pub limit: u16,\n}\n\nimpl Default for BackoffConfig {\n    #[inline(always)]\n    fn default() -> Self {\n        Self::from_u32(DETECT_CONFIG.load(Ordering::Relaxed))\n    }\n}\n\nimpl BackoffConfig {\n    #[inline(always)]\n    pub fn detect() -> Self {\n        Self::from_u32(DETECT_CONFIG.load(Ordering::Relaxed))\n    }\n\n    #[inline(always)]\n    pub const fn to_u32(self) -> u32 {\n        let i: u32 = unsafe { transmute(self) };\n        i\n    }\n\n    #[inline(always)]\n    pub const fn from_u32(config: u32) -> Self {\n        unsafe { transmute(config) }\n    }\n\n    #[allow(dead_code)]\n    #[inline(always)]\n    pub const fn async_limit(mut self, limit: u16) -> Self {\n        if limit < self.limit {\n            self.limit = limit;\n        }\n        self.spin_limit = limit;\n        self\n    }\n\n    #[allow(dead_code)]\n    #[inline(always)]\n    pub const fn limit(mut self, limit: u16) -> Self {\n        self.limit = limit;\n        self\n    }\n\n    #[allow(dead_code)]\n    #[inline(always)]\n    pub const fn spin(mut self, spin_limit: u16) -> Self {\n        if spin_limit < self.spin_limit {\n            self.spin_limit = spin_limit;\n        }\n        self\n    }\n}\n\npub struct Backoff {\n    step: u16,\n    pub config: BackoffConfig,\n}\n\nimpl Backoff {\n    #[inline(always)]\n    pub fn new() -> Self {\n        Self { step: 0, config: BackoffConfig::default() }\n    }\n\n    #[inline(always)]\n    pub fn from(config: BackoffConfig) -> Self {\n        Self { step: 0, config }\n    }\n\n    #[allow(dead_code)]\n    #[inline(always)]\n    pub fn spin(&mut self) -> bool {\n        for _ in 0..1 << self.step.min(SPIN_LIMIT) {\n            std::hint::spin_loop();\n        }\n        if self.step < MAX_LIMIT {\n            self.step += 1;\n            self.step > self.config.limit\n        } else {\n            true\n        }\n    }\n\n    #[inline(always)]\n    pub fn set_step(&mut self, step: u16) {\n        self.step = step;\n    }\n\n    #[inline(always)]\n    pub fn snooze(&mut self) -> bool {\n        if self.step >= self.config.limit {\n            return true;\n        }\n        if self.step < self.config.spin_limit {\n            for _ in 0..1 << self.step {\n                std::hint::spin_loop();\n            }\n        } else {\n            std::thread::yield_now();\n        }\n        self.step += 1;\n        false\n    }\n\n    #[allow(dead_code)]\n    #[inline(always)]\n    pub fn yield_now(&mut self) -> bool {\n        if self.step >= self.config.limit {\n            return true;\n        }\n        std::thread::yield_now();\n        self.step += 1;\n        false\n    }\n\n    #[inline(always)]\n    pub fn is_completed(&self) -> bool {\n        self.step >= self.config.limit\n    }\n\n    #[allow(dead_code)]\n    #[inline(always)]\n    pub fn step(&self) -> usize {\n        self.step as usize\n    }\n\n    #[inline(always)]\n    pub fn reset(&mut self) {\n        self.step = 0;\n    }\n}\n\n#[cfg(test)]\nmod tests {\n\n    use super::*;\n\n    #[test]\n    fn test_backoff() {\n        let backoff = Backoff::from(BackoffConfig { spin_limit: 1, limit: 0 });\n        assert!(backoff.is_completed());\n        println!(\"Option<backoff> size {}\", size_of::<Option<Backoff>>());\n        println!(\"backoff size {}\", size_of::<Backoff>());\n        println!(\"BackoffConfig size {}\", size_of::<BackoffConfig>());\n        assert_eq!(size_of::<BackoffConfig>(), size_of::<u32>());\n        let config = BackoffConfig { spin_limit: 6, limit: 7 };\n        let config_i = config.to_u32();\n        let _config = BackoffConfig::from_u32(config_i);\n        assert_eq!(config.spin_limit, _config.spin_limit);\n        assert_eq!(config.limit, _config.limit);\n\n        let mut backoff = Backoff::from(BackoffConfig { spin_limit: 2, limit: 4 });\n        assert_eq!(backoff.step, 0);\n        backoff.spin();\n        assert_eq!(backoff.step, 1);\n        backoff.snooze();\n        assert_eq!(backoff.step, 2);\n        backoff.snooze();\n        backoff.snooze();\n        backoff.snooze();\n        backoff.snooze();\n        assert_eq!(backoff.step, 4);\n        backoff.spin();\n        assert_eq!(backoff.step, 5);\n    }\n}\n"
  },
  {
    "path": "src/blocking_rx.rs",
    "content": "use crate::backoff::*;\nuse crate::flavor::{FlavorMC, FlavorSelect};\nuse crate::select::SelectResult;\nuse crate::{shared::*, trace_log, AsyncRx, MAsyncRx, NotCloneable, ReceiverType};\nuse std::cell::Cell;\nuse std::fmt;\nuse std::marker::PhantomData;\nuse std::ops::Deref;\nuse std::sync::{atomic::Ordering, Arc};\nuse std::time::{Duration, Instant};\n\n/// A single consumer (receiver) that works in a blocking context.\n///\n/// Additional methods in [ChannelShared] can be accessed through `Deref`.\n///\n/// **NOTE**: `Rx` is not `Clone` or `Sync`.\n/// If you need concurrent access, use [MRx] instead.\n///\n/// `Rx` has a `Send` marker and can be moved to other threads.\n/// The following code is OK:\n///\n/// ``` rust\n/// use crossfire::*;\n/// let (tx, rx) = mpsc::bounded_blocking::<usize>(100);\n/// std::thread::spawn(move || {\n///     let _ = rx.recv();\n/// });\n/// drop(tx);\n/// ```\n///\n/// Because `Rx` does not have a `Sync` marker, using `Arc<Rx>` will lose the `Send` marker.\n///\n/// For your safety, the following code **should not compile**:\n///\n/// ``` compile_fail\n/// use crossfire::*;\n/// use std::sync::Arc;\n/// let (tx, rx) = mpsc::bounded_blocking(100);\n/// let rx = Arc::new(rx);\n/// std::thread::spawn(move || {\n///     let _ = rx.recv();\n/// });\n/// drop(tx);\n/// ```\npub struct Rx<F: Flavor> {\n    pub(crate) shared: Arc<ChannelShared<F>>,\n    // Remove the Sync marker to prevent being put in Arc\n    _phan: PhantomData<Cell<()>>,\n    waker_cache: WakerCache<()>,\n}\n\nunsafe impl<F: Flavor> Send for Rx<F> {}\n\nimpl<F: Flavor> fmt::Debug for Rx<F> {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        write!(f, \"Rx{:p}\", self)\n    }\n}\n\nimpl<F: Flavor> fmt::Display for Rx<F> {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        write!(f, \"Rx{:p}\", self)\n    }\n}\n\nimpl<F: Flavor> Drop for Rx<F> {\n    #[inline(always)]\n    fn drop(&mut self) {\n        self.shared.close_rx();\n    }\n}\n\nimpl<F: Flavor> From<AsyncRx<F>> for Rx<F> {\n    fn from(value: AsyncRx<F>) -> Self {\n        value.add_rx();\n        Self::new(value.shared.clone())\n    }\n}\n\nimpl<F: Flavor> Rx<F> {\n    #[inline(always)]\n    pub(crate) fn new(shared: Arc<ChannelShared<F>>) -> Self {\n        Self { shared, waker_cache: WakerCache::new(), _phan: Default::default() }\n    }\n\n    #[inline(always)]\n    pub(crate) fn _recv_blocking(\n        &self, deadline: Option<Instant>,\n    ) -> Result<F::Item, RecvTimeoutError> {\n        let shared = &self.shared;\n        let mut o_waker: Option<<F::Recv as Registry>::Waker> = None;\n        macro_rules! on_recv_no_waker {\n            () => {{\n                trace_log!(\"rx: recv\");\n            }};\n        }\n        macro_rules! on_recv_waker {\n            () => {{\n                trace_log!(\"rx: recv {:?}\", o_waker);\n                self.recvs.cache_waker(o_waker, &self.waker_cache);\n            }};\n        }\n        macro_rules! try_recv {\n            ($handle_waker: block) => {\n                if let Some(item) = shared.inner.try_recv() {\n                    shared.on_recv();\n                    $handle_waker\n                    return Ok(item);\n                }\n            };\n        }\n        try_recv!({ on_recv_no_waker!() });\n        let mut cfg = BackoffConfig::detect().limit(shared.backoff_limit);\n        if shared.large {\n            cfg = cfg.spin(2);\n        }\n        let mut backoff = Backoff::from(cfg);\n        loop {\n            let r = backoff.snooze();\n            try_recv!({ on_recv_no_waker!() });\n            if r {\n                break;\n            }\n        }\n        let mut state;\n        'MAIN: loop {\n            shared.recvs.reg_waker_blocking(&mut o_waker, &self.waker_cache);\n            // NOTE: special API before we park\n            // because Miri is not happy about ArrayQueue pop ordering, which is not SeqCst\n            if let Some(item) = shared.inner.try_recv_final() {\n                shared.on_recv();\n                trace_log!(\"rx: recv cancel {:?} Init\", o_waker);\n                self.recvs.cancel_waker(&mut o_waker);\n                return Ok(item);\n            }\n            state = shared.recvs.commit_waiting(&o_waker);\n            trace_log!(\"rx: {:?} commit_waiting state={}\", o_waker, state);\n            if shared.is_tx_closed() {\n                break 'MAIN;\n            }\n            while state < WakerState::Woken as u8 {\n                match check_timeout(deadline) {\n                    Ok(None) => {\n                        std::thread::park();\n                    }\n                    Ok(Some(dur)) => {\n                        std::thread::park_timeout(dur);\n                    }\n                    Err(_) => {\n                        shared.abandon_recv_waker(o_waker.as_ref().unwrap());\n                        return Err(RecvTimeoutError::Timeout);\n                    }\n                }\n                state = self.recvs.get_waker_state(&o_waker, Ordering::SeqCst);\n                trace_log!(\"rx: after park state={}\", state);\n            }\n            if state == WakerState::Closed as u8 {\n                break 'MAIN;\n            }\n            backoff.reset();\n            loop {\n                try_recv!({ on_recv_waker!() });\n                if backoff.snooze() {\n                    break;\n                }\n            }\n        }\n        try_recv!({ on_recv_waker!() });\n        // make sure all msgs received, since we have soonze\n        Err(RecvTimeoutError::Disconnected)\n    }\n\n    /// Receives a message from the channel. This method will block until a message is received or the channel is closed.\n    ///\n    /// Returns `Ok(T)` on success.\n    ///\n    /// Returns Err([RecvError]) if the sender has been dropped.\n    #[inline]\n    pub fn recv(&self) -> Result<F::Item, RecvError> {\n        self._recv_blocking(None).map_err(|err| match err {\n            RecvTimeoutError::Disconnected => RecvError,\n            RecvTimeoutError::Timeout => unreachable!(),\n        })\n    }\n\n    /// Attempts to receive a message from the channel without blocking.\n    ///\n    /// Returns `Ok(T)` when successful.\n    ///\n    /// Returns Err([TryRecvError::Empty]) if the channel is empty.\n    ///\n    /// Returns Err([TryRecvError::Disconnected]) if the sender has been dropped and the channel is empty.\n    #[inline]\n    pub fn try_recv(&self) -> Result<F::Item, TryRecvError> {\n        self.shared.try_recv()\n    }\n\n    /// Receives a message from the channel with a timeout.\n    /// Will block when channel is empty.\n    ///\n    /// The behavior is atomic: the message is either received successfully or the operation is canceled due to a timeout.\n    ///\n    /// Returns `Ok(T)` when successful.\n    ///\n    /// Returns Err([RecvTimeoutError::Timeout]) when a message could not be received because the channel is empty and the operation timed out.\n    ///\n    /// Returns Err([RecvTimeoutError::Disconnected]) if the sender has been dropped and the channel is empty.\n    #[inline]\n    pub fn recv_timeout(&self, timeout: Duration) -> Result<F::Item, RecvTimeoutError> {\n        match Instant::now().checked_add(timeout) {\n            Some(deadline) => self._recv_blocking(Some(deadline)),\n            None => self.try_recv().map_err(|e| match e {\n                TryRecvError::Disconnected => RecvTimeoutError::Disconnected,\n                TryRecvError::Empty => RecvTimeoutError::Timeout,\n            }),\n        }\n    }\n\n    /// Return true if the other side has closed\n    #[inline(always)]\n    pub fn is_disconnected(&self) -> bool {\n        self.shared.is_tx_closed()\n    }\n\n    /// This method use with [select](crate::select::Select::select), guarantee non-blocking\n    /// # Panics\n    ///\n    /// Panics if SelectResult from other receiver is passed.\n    #[inline(always)]\n    pub fn read_select(&self, result: SelectResult) -> Result<F::Item, RecvError>\n    where\n        F: FlavorSelect,\n    {\n        assert_eq!(\n            self as *const Self as *const u8, result.channel,\n            \"invalid use select with another channel\"\n        );\n        self.as_ref().read_with_token(result.token)\n    }\n\n    #[inline(always)]\n    pub fn into_async(self) -> AsyncRx<F> {\n        self.into()\n    }\n}\n\n/// A multi-consumer (receiver) that works in a blocking context.\n///\n/// Inherits from [`Rx<F>`] and implements `Clone`.\n/// Additional methods can be accessed through `Deref<Target=[ChannelShared]>`.\n///\n/// You can use `into()` to convert it to `Rx<F>`.\npub struct MRx<F: Flavor>(pub(crate) Rx<F>);\n\nimpl<F: Flavor> fmt::Debug for MRx<F> {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        write!(f, \"MRx{:p}\", self)\n    }\n}\n\nimpl<F: Flavor> fmt::Display for MRx<F> {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        write!(f, \"MRx{:p}\", self)\n    }\n}\n\nunsafe impl<F: Flavor> Sync for MRx<F> {}\n\nimpl<F: Flavor> MRx<F>\nwhere\n    F: FlavorMC,\n{\n    #[inline(always)]\n    pub(crate) fn new(shared: Arc<ChannelShared<F>>) -> Self {\n        Self(Rx::new(shared))\n    }\n\n    #[inline(always)]\n    pub fn into_async(self) -> MAsyncRx<F> {\n        self.into()\n    }\n}\n\nimpl<F: Flavor> Clone for MRx<F> {\n    #[inline(always)]\n    fn clone(&self) -> Self {\n        let inner = &self.0;\n        inner.shared.add_rx();\n        Self(Rx::new(inner.shared.clone()))\n    }\n}\n\nimpl<F: Flavor> Deref for MRx<F> {\n    type Target = Rx<F>;\n\n    /// Inherits all the functions of [Rx].\n    #[inline(always)]\n    fn deref(&self) -> &Self::Target {\n        &self.0\n    }\n}\n\nimpl<F: Flavor> From<MRx<F>> for Rx<F> {\n    fn from(rx: MRx<F>) -> Self {\n        rx.0\n    }\n}\n\nimpl<F: Flavor> From<MAsyncRx<F>> for MRx<F> {\n    fn from(value: MAsyncRx<F>) -> Self {\n        value.add_rx();\n        Self(Rx::new(value.shared.clone()))\n    }\n}\n\n/// For writing generic code with MRx & Rx\npub trait BlockingRxTrait<T>: Send + 'static + fmt::Debug + fmt::Display {\n    /// Receives a message from the channel. This method will block until a message is received or the channel is closed.\n    ///\n    /// Returns `Ok(T)` on success.\n    ///\n    /// Returns Err([RecvError]) if the sender has been dropped.\n    fn recv(&self) -> Result<T, RecvError>;\n\n    /// Attempts to receive a message from the channel without blocking.\n    ///\n    /// Returns `Ok(T)` when successful.\n    ///\n    /// Returns Err([TryRecvError::Empty]) if the channel is empty.\n    ///\n    /// Returns Err([TryRecvError::Disconnected]) if the sender has been dropped and the channel is empty.\n    fn try_recv(&self) -> Result<T, TryRecvError>;\n\n    /// Receives a message from the channel with a timeout.\n    /// Will block when channel is empty.\n    ///\n    /// Returns `Ok(T)` when successful.\n    ///\n    /// Returns Err([RecvTimeoutError::Timeout]) when a message could not be received because the channel is empty and the operation timed out.\n    ///\n    /// Returns Err([RecvTimeoutError::Disconnected]) if the sender has been dropped and the channel is empty.\n    fn recv_timeout(&self, timeout: Duration) -> Result<T, RecvTimeoutError>;\n\n    /// The number of messages in the channel at the moment\n    fn len(&self) -> usize;\n\n    /// The capacity of the channel, return None for unbounded channel.\n    fn capacity(&self) -> Option<usize>;\n\n    /// Whether channel is empty at the moment\n    fn is_empty(&self) -> bool;\n\n    /// Whether the channel is full at the moment\n    fn is_full(&self) -> bool;\n\n    /// Return true if the other side has closed\n    fn is_disconnected(&self) -> bool;\n\n    /// Return the number of senders\n    fn get_tx_count(&self) -> usize;\n\n    /// Return the number of receivers\n    fn get_rx_count(&self) -> usize;\n\n    fn clone_to_vec(self, count: usize) -> Vec<Self>\n    where\n        Self: Sized;\n\n    fn get_wakers_count(&self) -> (usize, usize);\n}\n\nimpl<F: Flavor> BlockingRxTrait<F::Item> for Rx<F> {\n    #[inline(always)]\n    fn clone_to_vec(self, _count: usize) -> Vec<Self> {\n        assert_eq!(_count, 1);\n        vec![self]\n    }\n\n    #[inline(always)]\n    fn recv(&self) -> Result<F::Item, RecvError> {\n        Rx::recv(self)\n    }\n\n    #[inline(always)]\n    fn try_recv(&self) -> Result<F::Item, TryRecvError> {\n        Rx::try_recv(self)\n    }\n\n    #[inline(always)]\n    fn recv_timeout(&self, timeout: Duration) -> Result<F::Item, RecvTimeoutError> {\n        Rx::recv_timeout(self, timeout)\n    }\n\n    /// The number of messages in the channel at the moment\n    #[inline(always)]\n    fn len(&self) -> usize {\n        self.as_ref().len()\n    }\n\n    /// The capacity of the channel, return None for unbounded channel.\n    #[inline(always)]\n    fn capacity(&self) -> Option<usize> {\n        self.as_ref().capacity()\n    }\n\n    /// Whether channel is empty at the moment\n    #[inline(always)]\n    fn is_empty(&self) -> bool {\n        self.as_ref().is_empty()\n    }\n\n    /// Whether the channel is full at the moment\n    #[inline(always)]\n    fn is_full(&self) -> bool {\n        self.as_ref().is_full()\n    }\n\n    /// Return true if the other side has closed\n    #[inline(always)]\n    fn is_disconnected(&self) -> bool {\n        self.as_ref().is_tx_closed()\n    }\n\n    #[inline(always)]\n    fn get_tx_count(&self) -> usize {\n        self.as_ref().get_tx_count()\n    }\n\n    #[inline(always)]\n    fn get_rx_count(&self) -> usize {\n        self.as_ref().get_rx_count()\n    }\n\n    fn get_wakers_count(&self) -> (usize, usize) {\n        self.as_ref().get_wakers_count()\n    }\n}\n\nimpl<F> BlockingRxTrait<F::Item> for MRx<F>\nwhere\n    F: Flavor + FlavorMC,\n{\n    #[inline(always)]\n    fn clone_to_vec(self, count: usize) -> Vec<Self> {\n        let mut v = Vec::with_capacity(count);\n        for _ in 0..count - 1 {\n            v.push(self.clone());\n        }\n        v.push(self);\n        v\n    }\n\n    #[inline(always)]\n    fn recv(&self) -> Result<F::Item, RecvError> {\n        self.0.recv()\n    }\n\n    #[inline(always)]\n    fn try_recv(&self) -> Result<F::Item, TryRecvError> {\n        self.0.try_recv()\n    }\n\n    #[inline(always)]\n    fn recv_timeout(&self, timeout: Duration) -> Result<F::Item, RecvTimeoutError> {\n        self.0.recv_timeout(timeout)\n    }\n\n    /// The number of messages in the channel at the moment\n    #[inline(always)]\n    fn len(&self) -> usize {\n        self.as_ref().len()\n    }\n\n    /// The capacity of the channel, return None for unbounded channel.\n    #[inline(always)]\n    fn capacity(&self) -> Option<usize> {\n        self.as_ref().capacity()\n    }\n\n    /// Whether channel is empty at the moment\n    #[inline(always)]\n    fn is_empty(&self) -> bool {\n        self.as_ref().is_empty()\n    }\n\n    /// Whether the channel is full at the moment\n    #[inline(always)]\n    fn is_full(&self) -> bool {\n        self.as_ref().is_full()\n    }\n\n    /// Return true if the other side has closed\n    #[inline(always)]\n    fn is_disconnected(&self) -> bool {\n        self.as_ref().is_tx_closed()\n    }\n\n    #[inline(always)]\n    fn get_tx_count(&self) -> usize {\n        self.as_ref().get_tx_count()\n    }\n\n    #[inline(always)]\n    fn get_rx_count(&self) -> usize {\n        self.as_ref().get_rx_count()\n    }\n\n    fn get_wakers_count(&self) -> (usize, usize) {\n        self.as_ref().get_wakers_count()\n    }\n}\n\nimpl<F: Flavor> Deref for Rx<F> {\n    type Target = ChannelShared<F>;\n\n    #[inline(always)]\n    fn deref(&self) -> &ChannelShared<F> {\n        &self.shared\n    }\n}\n\nimpl<F: Flavor> AsRef<ChannelShared<F>> for Rx<F> {\n    #[inline(always)]\n    fn as_ref(&self) -> &ChannelShared<F> {\n        &self.shared\n    }\n}\n\nimpl<F: Flavor> AsRef<ChannelShared<F>> for MRx<F> {\n    #[inline(always)]\n    fn as_ref(&self) -> &ChannelShared<F> {\n        &self.0.shared\n    }\n}\n\nimpl<T, F: Flavor<Item = T>> ReceiverType for Rx<F> {\n    type Flavor = F;\n    #[inline(always)]\n    fn new(shared: Arc<ChannelShared<F>>) -> Self {\n        Rx::new(shared)\n    }\n}\n\nimpl<F: Flavor> NotCloneable for Rx<F> {}\n\nimpl<F> ReceiverType for MRx<F>\nwhere\n    F: Flavor + FlavorMC,\n{\n    type Flavor = F;\n\n    #[inline(always)]\n    fn new(shared: Arc<ChannelShared<F>>) -> Self {\n        MRx::new(shared)\n    }\n}\n"
  },
  {
    "path": "src/blocking_tx.rs",
    "content": "use crate::backoff::*;\nuse crate::flavor::FlavorMP;\nuse crate::weak::WeakTx;\nuse crate::{shared::*, trace_log, AsyncTx, MAsyncTx, NotCloneable, SenderType};\nuse std::cell::Cell;\nuse std::fmt;\nuse std::marker::PhantomData;\nuse std::mem::MaybeUninit;\nuse std::ops::Deref;\nuse std::sync::atomic::Ordering;\nuse std::sync::Arc;\nuse std::time::{Duration, Instant};\n\n/// A single producer (sender) that works in a blocking context.\n///\n/// Additional methods in [ChannelShared] can be accessed through `Deref`.\n///\n/// **NOTE**: `Tx` is not `Clone` or `Sync`.\n/// If you need concurrent access, use [MTx] instead.\n///\n/// `Tx` has a `Send` marker and can be moved to other threads.\n/// The following code is OK:\n///\n/// ``` rust\n/// use crossfire::*;\n/// let (tx, rx) = spsc::bounded_blocking::<usize>(100);\n/// std::thread::spawn(move || {\n///     let _ = tx.send(1);\n/// });\n/// drop(rx);\n/// ```\n///\n/// Because `Tx` does not have a `Sync` marker, using `Arc<Tx>` will lose the `Send` marker.\n///\n/// For your safety, the following code **should not compile**:\n///\n/// ``` compile_fail\n/// use crossfire::*;\n/// use std::sync::Arc;\n/// let (tx, rx) = spsc::bounded_blocking::<usize>(100);\n/// let tx = Arc::new(tx);\n/// std::thread::spawn(move || {\n///     let _ = tx.send(1);\n/// });\n/// drop(rx);\n/// ```\npub struct Tx<F: Flavor> {\n    pub(crate) shared: Arc<ChannelShared<F>>,\n    // Remove the Sync marker to prevent being put in Arc\n    _phan: PhantomData<Cell<()>>,\n    waker_cache: WakerCache<*const F::Item>,\n}\n\nunsafe impl<F: Flavor> Send for Tx<F> {}\n\nimpl<F: Flavor> fmt::Debug for Tx<F> {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        write!(f, \"Tx{:p}\", self)\n    }\n}\n\nimpl<F: Flavor> fmt::Display for Tx<F> {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        write!(f, \"Tx{:p}\", self)\n    }\n}\n\nimpl<F: Flavor> Drop for Tx<F> {\n    #[inline(always)]\n    fn drop(&mut self) {\n        self.shared.close_tx();\n    }\n}\n\nimpl<F: Flavor> From<AsyncTx<F>> for Tx<F> {\n    fn from(value: AsyncTx<F>) -> Self {\n        value.add_tx();\n        Self::new(value.shared.clone())\n    }\n}\n\nimpl<F: Flavor> Tx<F> {\n    #[inline]\n    pub(crate) fn new(shared: Arc<ChannelShared<F>>) -> Self {\n        Self { shared, waker_cache: WakerCache::new(), _phan: Default::default() }\n    }\n\n    /// Return true if the other side has closed\n    #[inline(always)]\n    pub fn is_disconnected(&self) -> bool {\n        self.shared.is_rx_closed()\n    }\n\n    #[inline]\n    pub fn into_async(self) -> AsyncTx<F> {\n        self.into()\n    }\n}\n\nimpl<F: Flavor> Tx<F> {\n    #[inline(always)]\n    pub(crate) fn _send_bounded(\n        &self, item: &MaybeUninit<F::Item>, deadline: Option<Instant>,\n    ) -> Result<(), SendTimeoutError<F::Item>> {\n        let shared = &self.shared;\n        let large = shared.large;\n        let backoff_cfg = BackoffConfig::detect().spin(2).limit(shared.backoff_limit);\n        let mut backoff = Backoff::from(backoff_cfg);\n        let congest = shared.sender_direct_copy();\n        // disable because of issue #54\n        let direct_copy = false;\n        //        let direct_copy = deadline.is_none() && shared.sender_direct_copy();\n        if large {\n            backoff.set_step(2);\n        }\n        loop {\n            let r = if large { backoff.yield_now() } else { backoff.spin() };\n            if direct_copy && large {\n                match shared.inner.try_send_oneshot(item.as_ptr()) {\n                    Some(false) => break,\n                    None => {\n                        if r {\n                            break;\n                        }\n                        continue;\n                    }\n                    _ => {\n                        shared.on_send();\n                        trace_log!(\"tx: send\");\n                        std::thread::yield_now();\n                        return Ok(());\n                    }\n                }\n            } else {\n                if !shared.inner.try_send(item) {\n                    if r {\n                        break;\n                    }\n                    continue;\n                }\n                shared.on_send();\n                trace_log!(\"tx: send\");\n                return Ok(());\n            }\n        }\n        let direct_copy_ptr: *const F::Item = std::ptr::null();\n        //            if direct_copy { item.as_ptr() } else { std::ptr::null() };\n\n        let mut state: u8;\n        let mut o_waker: Option<<F::Send as Registry>::Waker> = None;\n        macro_rules! return_ok {\n            () => {\n                trace_log!(\"tx: send {:?}\", o_waker);\n                if shared.is_full() {\n                    // It's for 8x1, 16x1.\n                    std::thread::yield_now();\n                    self.senders.cache_waker(o_waker, &self.waker_cache);\n                }\n                return Ok(())\n            };\n        }\n        loop {\n            self.senders.reg_waker_blocking(&mut o_waker, &self.waker_cache, direct_copy_ptr);\n            // For nx1 (more likely congest), need to reset backoff\n            // to allow more yield to receivers.\n            // For nxn (the backoff is already complete), wait a little bit.\n            state = shared.sender_double_check::<false>(item, &mut o_waker);\n            trace_log!(\"tx: sender_double_check {:?} state={}\", o_waker, state);\n            while state < WakerState::Woken as u8 {\n                if congest {\n                    state = shared.sender_snooze(&o_waker, &mut backoff);\n                }\n                if state <= WakerState::Waiting as u8 {\n                    match check_timeout(deadline) {\n                        Ok(None) => {\n                            std::thread::park();\n                        }\n                        Ok(Some(dur)) => {\n                            std::thread::park_timeout(dur);\n                        }\n                        Err(_) => {\n                            if shared.abandon_send_waker(o_waker.as_ref().unwrap()) {\n                                return Err(SendTimeoutError::Timeout(unsafe {\n                                    item.assume_init_read()\n                                }));\n                            } else {\n                                // NOTE: Unlikely since we disable direct copy with deadline\n                                // state is WakerState::Done\n                                return Ok(());\n                            }\n                        }\n                    }\n                    state = self.senders.get_waker_state(&o_waker, Ordering::SeqCst);\n                    trace_log!(\"tx: after park state={}\", state);\n                }\n            }\n            if state == WakerState::Woken as u8 {\n                backoff.reset();\n                loop {\n                    if shared.inner.try_send(item) {\n                        shared.on_send();\n                        return_ok!();\n                    }\n                    if backoff.is_completed() {\n                        break;\n                    }\n                    backoff.snooze();\n                }\n            } else if state == WakerState::Done as u8 {\n                return_ok!();\n            } else {\n                debug_assert_eq!(state, WakerState::Closed as u8);\n                return Err(SendTimeoutError::Disconnected(unsafe { item.assume_init_read() }));\n            }\n        }\n    }\n\n    /// Sends a message. This method will block until the message is sent or the channel is closed.\n    ///\n    /// Returns `Ok(())` on success.\n    ///\n    /// Returns `Err(SendError)` if the receiver has been dropped.\n    ///\n    #[inline]\n    pub fn send(&self, item: F::Item) -> Result<(), SendError<F::Item>> {\n        let shared = &self.shared;\n        if shared.is_rx_closed() {\n            return Err(SendError(item));\n        }\n        let _item = MaybeUninit::new(item);\n        if shared.inner.try_send(&_item) {\n            shared.on_send();\n            return Ok(());\n        }\n        match self._send_bounded(&_item, None) {\n            Ok(_) => Ok(()),\n            Err(SendTimeoutError::Disconnected(e)) => Err(SendError(e)),\n            Err(SendTimeoutError::Timeout(_)) => unreachable!(),\n        }\n    }\n\n    /// Attempts to send a message without blocking.\n    ///\n    /// Returns `Ok(())` when successful.\n    ///\n    /// Returns Err([TrySendError::Full]) if the channel is full.\n    ///\n    /// Returns Err([TrySendError::Disconnected]) if the receiver has been dropped.\n    #[inline]\n    pub fn try_send(&self, item: F::Item) -> Result<(), TrySendError<F::Item>> {\n        let shared = &self.shared;\n        if shared.is_rx_closed() {\n            return Err(TrySendError::Disconnected(item));\n        }\n        let _item = MaybeUninit::new(item);\n        if shared.inner.try_send(&_item) {\n            shared.on_send();\n            Ok(())\n        } else {\n            Err(TrySendError::Full(unsafe { _item.assume_init_read() }))\n        }\n    }\n\n    /// Sends a message with a timeout.\n    /// Will block when channel is full.\n    ///\n    /// The behavior is atomic: the message is either sent successfully or returned on error.\n    ///\n    /// Returns `Ok(())` when successful.\n    ///\n    /// Returns Err([SendTimeoutError::Timeout]) if the operation timed out.\n    ///\n    /// Returns Err([SendTimeoutError::Disconnected]) if the receiver has been dropped.\n    #[inline]\n    pub fn send_timeout(\n        &self, item: F::Item, timeout: Duration,\n    ) -> Result<(), SendTimeoutError<F::Item>> {\n        let shared = &self.shared;\n        if shared.is_rx_closed() {\n            return Err(SendTimeoutError::Disconnected(item));\n        }\n        match Instant::now().checked_add(timeout) {\n            None => self.try_send(item).map_err(|e| match e {\n                TrySendError::Disconnected(t) => SendTimeoutError::Disconnected(t),\n                TrySendError::Full(t) => SendTimeoutError::Timeout(t),\n            }),\n            Some(deadline) => {\n                let _item = MaybeUninit::new(item);\n                if shared.inner.try_send(&_item) {\n                    shared.on_send();\n                    return Ok(());\n                }\n                match self._send_bounded(&_item, Some(deadline)) {\n                    Ok(_) => Ok(()),\n                    Err(e) => Err(e),\n                }\n            }\n        }\n    }\n}\n\n/// A multi-producer (sender) that works in a blocking context.\n///\n/// Inherits from [`Tx<F>`] and implements `Clone`.\n/// Additional methods can be accessed through `Deref<Target=[ChannelShared]>`.\n///\n/// You can use `into()` to convert it to `Tx<F>`.\npub struct MTx<F: Flavor>(pub(crate) Tx<F>);\n\nimpl<F: Flavor> fmt::Debug for MTx<F> {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        write!(f, \"MTx{:p}\", self)\n    }\n}\n\nimpl<F: Flavor> fmt::Display for MTx<F> {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        write!(f, \"MTx{:p}\", self)\n    }\n}\n\nimpl<F: Flavor> From<MTx<F>> for Tx<F> {\n    fn from(tx: MTx<F>) -> Self {\n        tx.0\n    }\n}\n\nimpl<F: Flavor> From<MAsyncTx<F>> for MTx<F> {\n    fn from(value: MAsyncTx<F>) -> Self {\n        value.add_tx();\n        Self(Tx::new(value.shared.clone()))\n    }\n}\n\nunsafe impl<F: Flavor> Sync for MTx<F> {}\n\nimpl<F: Flavor + FlavorMP> MTx<F> {\n    #[inline]\n    pub(crate) fn new(shared: Arc<ChannelShared<F>>) -> Self {\n        Self(Tx::new(shared))\n    }\n\n    #[inline]\n    pub fn into_async(self) -> MAsyncTx<F> {\n        self.into()\n    }\n\n    /// Get a weak reference of sender.\n    ///\n    /// # Example\n    /// ```\n    /// use crossfire::*;\n    /// let (tx, rx) = mpsc::bounded_blocking::<usize>(100);\n    /// let weak_tx = tx.downgrade();\n    /// assert_eq!(tx.get_tx_count(), 1);\n    /// let tx_clone = weak_tx.upgrade::<MTx<_>>().unwrap();\n    /// assert_eq!(tx.get_tx_count(), 2);\n    /// drop(tx);\n    /// drop(tx_clone);\n    /// assert!(weak_tx.upgrade::<MTx<_>>().is_none());\n    /// assert_eq!(weak_tx.get_tx_count(), 0);\n    /// ```\n    #[inline]\n    pub fn downgrade(&self) -> WeakTx<F> {\n        WeakTx(self.shared.clone())\n    }\n}\n\nimpl<F: Flavor> Clone for MTx<F> {\n    #[inline]\n    fn clone(&self) -> Self {\n        let inner = &self.0;\n        inner.shared.add_tx();\n        Self(Tx::new(inner.shared.clone()))\n    }\n}\n\nimpl<F: Flavor> Deref for MTx<F> {\n    type Target = Tx<F>;\n\n    /// Inherits all the functions of [Tx].\n    #[inline(always)]\n    fn deref(&self) -> &Self::Target {\n        &self.0\n    }\n}\n\n/// For writing generic code with MTx & Tx\npub trait BlockingTxTrait<T>: Send + 'static + fmt::Debug + fmt::Display {\n    /// Sends a message. This method will block until the message is sent or the channel is closed.\n    ///\n    /// Returns `Ok(())` on success.\n    ///\n    /// Returns Err([SendError]) if the receiver has been dropped.\n    fn send(&self, _item: T) -> Result<(), SendError<T>>;\n\n    /// Attempts to send a message without blocking.\n    ///\n    /// Returns `Ok(())` when successful.\n    ///\n    /// Returns `Err([TrySendError::Full])` if the channel is full.\n    ///\n    /// Returns Err([TrySendError::Disconnected]) if the receiver has been dropped.\n    fn try_send(&self, _item: T) -> Result<(), TrySendError<T>>;\n\n    /// Sends a message with a timeout.\n    /// Will block when channel is empty.\n    ///\n    /// Returns `Ok(())` when successful.\n    ///\n    /// Returns Err([SendTimeoutError::Timeout]) if the message could not be sent because the channel is full and the operation timed out.\n    ///\n    /// Returns Err([SendTimeoutError::Disconnected]) if the receiver has been dropped.\n    fn send_timeout(&self, item: T, timeout: Duration) -> Result<(), SendTimeoutError<T>>;\n\n    /// The number of messages in the channel at the moment\n    fn len(&self) -> usize;\n\n    /// The capacity of the channel, return None for unbounded channel.\n    fn capacity(&self) -> Option<usize>;\n\n    /// Whether channel is empty at the moment\n    fn is_empty(&self) -> bool;\n\n    /// Whether the channel is full at the moment\n    fn is_full(&self) -> bool;\n\n    /// Return true if the other side has closed\n    fn is_disconnected(&self) -> bool;\n\n    /// Return the number of senders\n    fn get_tx_count(&self) -> usize;\n\n    /// Return the number of receivers\n    fn get_rx_count(&self) -> usize;\n\n    fn clone_to_vec(self, count: usize) -> Vec<Self>\n    where\n        Self: Sized;\n\n    fn get_wakers_count(&self) -> (usize, usize);\n}\n\nimpl<F: Flavor> BlockingTxTrait<F::Item> for Tx<F> {\n    #[inline(always)]\n    fn clone_to_vec(self, _count: usize) -> Vec<Self> {\n        assert_eq!(_count, 1);\n        vec![self]\n    }\n\n    #[inline(always)]\n    fn send(&self, item: F::Item) -> Result<(), SendError<F::Item>> {\n        Tx::send(self, item)\n    }\n\n    #[inline(always)]\n    fn try_send(&self, item: F::Item) -> Result<(), TrySendError<F::Item>> {\n        Tx::try_send(self, item)\n    }\n\n    #[inline(always)]\n    fn send_timeout(\n        &self, item: F::Item, timeout: Duration,\n    ) -> Result<(), SendTimeoutError<F::Item>> {\n        Tx::send_timeout(self, item, timeout)\n    }\n\n    /// The number of messages in the channel at the moment\n    #[inline(always)]\n    fn len(&self) -> usize {\n        self.as_ref().len()\n    }\n\n    /// The capacity of the channel, return None for unbounded channel.\n    #[inline(always)]\n    fn capacity(&self) -> Option<usize> {\n        self.as_ref().capacity()\n    }\n\n    /// Whether channel is empty at the moment\n    #[inline(always)]\n    fn is_empty(&self) -> bool {\n        self.as_ref().is_empty()\n    }\n\n    /// Whether the channel is full at the moment\n    #[inline(always)]\n    fn is_full(&self) -> bool {\n        self.as_ref().is_full()\n    }\n\n    /// Return true if the other side has closed\n    #[inline(always)]\n    fn is_disconnected(&self) -> bool {\n        self.as_ref().get_rx_count() == 0\n    }\n\n    #[inline(always)]\n    fn get_tx_count(&self) -> usize {\n        self.as_ref().get_tx_count()\n    }\n\n    #[inline(always)]\n    fn get_rx_count(&self) -> usize {\n        self.as_ref().get_rx_count()\n    }\n\n    fn get_wakers_count(&self) -> (usize, usize) {\n        self.as_ref().get_wakers_count()\n    }\n}\n\nimpl<F: Flavor + FlavorMP> BlockingTxTrait<F::Item> for MTx<F> {\n    #[inline(always)]\n    fn clone_to_vec(self, count: usize) -> Vec<Self> {\n        let mut v = Vec::with_capacity(count);\n        for _ in 0..count - 1 {\n            v.push(self.clone());\n        }\n        v.push(self);\n        v\n    }\n\n    #[inline(always)]\n    fn send(&self, item: F::Item) -> Result<(), SendError<F::Item>> {\n        self.0.send(item)\n    }\n\n    #[inline(always)]\n    fn try_send(&self, item: F::Item) -> Result<(), TrySendError<F::Item>> {\n        self.0.try_send(item)\n    }\n\n    #[inline(always)]\n    fn send_timeout(\n        &self, item: F::Item, timeout: Duration,\n    ) -> Result<(), SendTimeoutError<F::Item>> {\n        self.0.send_timeout(item, timeout)\n    }\n\n    /// The number of messages in the channel at the moment\n    #[inline(always)]\n    fn len(&self) -> usize {\n        self.as_ref().len()\n    }\n\n    /// The capacity of the channel, return None for unbounded channel.\n    #[inline(always)]\n    fn capacity(&self) -> Option<usize> {\n        self.as_ref().capacity()\n    }\n\n    /// Whether channel is empty at the moment\n    #[inline(always)]\n    fn is_empty(&self) -> bool {\n        self.as_ref().is_empty()\n    }\n\n    /// Whether the channel is full at the moment\n    #[inline(always)]\n    fn is_full(&self) -> bool {\n        self.as_ref().is_full()\n    }\n\n    /// Return true if the other side has closed\n    #[inline(always)]\n    fn is_disconnected(&self) -> bool {\n        self.as_ref().get_rx_count() == 0\n    }\n\n    #[inline(always)]\n    fn get_tx_count(&self) -> usize {\n        self.as_ref().get_tx_count()\n    }\n\n    #[inline(always)]\n    fn get_rx_count(&self) -> usize {\n        self.as_ref().get_rx_count()\n    }\n\n    fn get_wakers_count(&self) -> (usize, usize) {\n        self.as_ref().get_wakers_count()\n    }\n}\n\nimpl<F: Flavor> Deref for Tx<F> {\n    type Target = ChannelShared<F>;\n    #[inline(always)]\n    fn deref(&self) -> &ChannelShared<F> {\n        &self.shared\n    }\n}\n\nimpl<F: Flavor> AsRef<ChannelShared<F>> for Tx<F> {\n    #[inline(always)]\n    fn as_ref(&self) -> &ChannelShared<F> {\n        &self.shared\n    }\n}\n\nimpl<F: Flavor> AsRef<ChannelShared<F>> for MTx<F> {\n    #[inline(always)]\n    fn as_ref(&self) -> &ChannelShared<F> {\n        &self.0.shared\n    }\n}\n\nimpl<T, F: Flavor<Item = T>> SenderType for Tx<F> {\n    type Flavor = F;\n    #[inline(always)]\n    fn new(shared: Arc<ChannelShared<F>>) -> Self {\n        Self::new(shared)\n    }\n}\n\nimpl<F: Flavor> NotCloneable for Tx<F> {}\n\nimpl<T, F: Flavor<Item = T> + FlavorMP> SenderType for MTx<F> {\n    type Flavor = F;\n    #[inline(always)]\n    fn new(shared: Arc<ChannelShared<F>>) -> Self {\n        MTx::new(shared)\n    }\n}\n"
  },
  {
    "path": "src/collections.rs",
    "content": "use std::ptr;\nuse std::sync::{\n    atomic::{AtomicPtr, Ordering},\n    Arc, Weak,\n};\n\npub struct ArcCell<T> {\n    ptr: AtomicPtr<T>,\n}\n\nimpl<T> Drop for ArcCell<T> {\n    #[inline]\n    fn drop(&mut self) {\n        self.clear();\n    }\n}\n\nunsafe impl<T> Send for ArcCell<T> {}\nunsafe impl<T> Sync for ArcCell<T> {}\n\nimpl<T> ArcCell<T> {\n    #[inline(always)]\n    pub fn new() -> Self {\n        Self { ptr: AtomicPtr::new(ptr::null_mut()) }\n    }\n\n    #[inline(always)]\n    pub fn exists(&self) -> bool {\n        !self.ptr.load(Ordering::Acquire).is_null()\n    }\n\n    #[inline(always)]\n    pub fn pop(&self) -> Option<Arc<T>> {\n        let ptr = self.ptr.swap(ptr::null_mut(), Ordering::SeqCst);\n        if !ptr.is_null() {\n            Some(unsafe { Arc::from_raw(ptr) })\n        } else {\n            None\n        }\n    }\n\n    #[allow(dead_code)]\n    #[inline(always)]\n    pub fn clear(&self) {\n        let ptr = self.ptr.swap(ptr::null_mut(), Ordering::SeqCst);\n        if !ptr.is_null() {\n            // Convert into Weak and drop\n            let _ = unsafe { Arc::from_raw(ptr) };\n        }\n    }\n\n    #[inline(always)]\n    pub fn try_put(&self, item: Arc<T>) {\n        let item_ptr = Arc::into_raw(item) as *mut T;\n        match self.ptr.compare_exchange(\n            ptr::null_mut(),\n            item_ptr,\n            Ordering::SeqCst,\n            Ordering::Relaxed,\n        ) {\n            Ok(_) => {}\n            Err(_) => {\n                let _ = unsafe { Arc::from_raw(item_ptr) };\n            }\n        }\n    }\n}\n\n#[allow(dead_code)]\npub struct WeakCell<T> {\n    ptr: AtomicPtr<T>,\n}\n\nunsafe impl<T> Send for WeakCell<T> {}\nunsafe impl<T> Sync for WeakCell<T> {}\n\nimpl<T> Drop for WeakCell<T> {\n    #[inline]\n    fn drop(&mut self) {\n        self.clear();\n    }\n}\n\nimpl<T> WeakCell<T> {\n    #[inline(always)]\n    pub fn new() -> Self {\n        Self { ptr: AtomicPtr::new(ptr::null_mut()) }\n    }\n\n    #[inline(always)]\n    pub fn is_empty(&self) -> bool {\n        self.ptr.load(Ordering::SeqCst).is_null()\n    }\n\n    #[inline(always)]\n    pub fn pop(&self) -> Option<Arc<T>> {\n        let mut v = self.ptr.load(Ordering::SeqCst);\n        if v.is_null() {\n            return None;\n        }\n        loop {\n            match self.ptr.compare_exchange(v, ptr::null_mut(), Ordering::SeqCst, Ordering::Acquire)\n            {\n                Ok(_) => return unsafe { Weak::from_raw(v) }.upgrade(),\n                Err(_v) => {\n                    if _v.is_null() {\n                        return None;\n                    }\n                    v = _v;\n                }\n            }\n        }\n    }\n\n    //// it is allow to fail, with only one shot and weak Ops\n    #[inline(always)]\n    pub fn clear(&self) -> bool {\n        // Don't need accurate, it's optional\n        let v = self.ptr.load(Ordering::Acquire);\n        if v.is_null() {\n            return false;\n        }\n        match self.ptr.compare_exchange(v, ptr::null_mut(), Ordering::Release, Ordering::Relaxed) {\n            Ok(_) => {\n                let _ = unsafe { Weak::from_raw(v) };\n                true\n            }\n            Err(_v) => {\n                // We don't really have to clear this on spurious failure\n                false\n            }\n        }\n    }\n\n    #[inline(always)]\n    pub fn replace(&self, item: Weak<T>) {\n        let old_ptr = self.ptr.swap(item.into_raw() as *mut T, Ordering::SeqCst);\n        if !old_ptr.is_null() {\n            let _ = unsafe { Weak::from_raw(old_ptr) };\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n\n    #[test]\n    fn test_weak_cell() {\n        use super::*;\n        use std::sync::Arc;\n        let cell = WeakCell::new();\n        assert!(cell.is_empty());\n        let item = Arc::new(1);\n        cell.replace(Arc::downgrade(&item));\n        assert!(!cell.is_empty());\n        let _item = cell.pop().unwrap();\n        assert!(cell.is_empty());\n        assert!(Arc::ptr_eq(&item, &_item));\n        cell.replace(Arc::downgrade(&item));\n        assert!(!cell.is_empty());\n        // it is allow to fail under miri\n        println!(\"clear\");\n        while !cell.clear() {\n            assert!(!cell.is_empty());\n            println!(\"try clear again\");\n        }\n        assert!(cell.is_empty());\n        drop(_item);\n        assert_eq!(Arc::strong_count(&item), 1);\n        assert_eq!(Arc::weak_count(&item), 0);\n    }\n}\n"
  },
  {
    "path": "src/compat.rs",
    "content": "//! compatible layer for V2.0 API\n//!\n//! # Migration from v2.* to v3\n//!\n//! If you want to migrate to v3 API, you may add the flavor type in [MTx], [MRx], [Tx], [Rx] type,\n//! and change the channel initialization function accordingly.\n//!\n//! If you have a large project that use v2 API, and want to migrate gradually,\n//! only need to change original import from  `use crossfire::*` to `use crossfire::compat::*`.\n//! This module provides the [CompatFlavor] which erase the difference between `List` and `Array`,\n//! but registry only use RegistryMulti for spsc and mpsc for compatibility.\n//!\n//! # Compatible consideration\n//!\n//! - In the legacy API, the sender/receiver types had erased the signature between bounded or unbounded channels\n//! - The low level queue implement is for MPMC regardless of MPSC/SPSC model (which is exactly the\n//! same with V2.1)\n//! - The module structure in `crossfire::compat::*`, is exactly the same as v2.x `crossfire::*`.\n//!\n//! # Incompatible notes\n//!\n//! - keeping Into<AsyncStream<T, F>> for `AsyncRxTrait<T>` is not possible, due to `AsyncRxTrait<T>`\n//! is erased out Flavor parameter, so we add `AsyncRxTrait::to_stream()` which returns `Pin<Box<dyn futures_core::stream::Stream<Item = T>>>`.\n//!\n//! # The reason of complete API refactor\n//!\n//! I know we all hate the contagious nature of generic code, and reluctant to use trait object,\n//! it's common practice to use static dispatch like `enum-dispatch`. Originally crossfire only\n//! have 2 channel variance ([CompatFlavor]), when adding more channel flavor for specific scenario,\n//! other than common list and array, and specialized implement for spsc, mpsc, etc,\n//! I notice that when the flavor enum grow from 2 types to 4+ types,\n//! although the positive result can be observed on Arm, there was a regression in x86 async benchmark,\n//! which offset the optimization effort.\n//! It's impossible to erased the type while keeping the performance goal having so much types.\n//!\n//! From the aspect of compiler:\n//! - In blocking context, the compiler can eliminate the unused branch according to the context,\n//! and keeping the function calls inline, unless you put multiple variant of enum together into a\n//! collection.\n//! - In async context, the compiler is ignorance, since most of the async code is indirect calls.\n//! We can see in generated asm from cargo-show-asm, even you initialize the channel with ArrayQueue, there's still\n//! SeqQueue match branch inside the `RecvFuture::poll()`. What's worse when we have 4 types\n//! variant in the flavor enum, the compiler think the internal queue ops function no longer worth\n//! to inline (because overall flatten code will be too big), and the match branch might fallen\n//! back to a big match table instead of simple comparison. This is the reason of performance regression.\n//!\n//! From the aspect of CPU:\n//! - I had tried a manual Vtable by putting method ptr inside AsyncTx/AsyncRx, which is ok on X86,\n//! but Arm will have -50% penalty. It looks like Arm is poor on loading / caching function ptr.\n//! - Generic Arm CPU has overall poor performance (1/3 ~ 1/2) compared to mainstream x86_64, and\n//! bad at atomic CAS, a big match branch might be not so obvious than the positive effect from\n//! changing some CAS to direct load/store in the lockless algorithm.\n//!\n//! From the aspect of API usage:\n//! - There're already nice native select mechanisms on async ecology, we don't have to worry about the\n//! difference of receiver types, for flexibility.\n//! - For blocking context, it might be more common scenario to select from the same type of channels for efficiency.\n//! - The crossbeam implementation of select is decouple from channel types and message type, which\n//! means the API is possible for crossfire too.\n\nuse crate::flavor::{\n    flavor_dispatch, flavor_select_dispatch, queue_dispatch, Flavor, FlavorImpl, FlavorMC,\n    FlavorMP, Queue,\n};\nuse crate::shared::*;\npub use crate::{AsyncRxTrait, AsyncTxTrait, BlockingRxTrait, BlockingTxTrait};\nuse std::mem::MaybeUninit;\n\n/// Compatible flavor that wraps the Array and list type\n#[allow(clippy::large_enum_variant)]\npub enum CompatFlavor<T> {\n    Array(crate::flavor::Array<T>),\n    List(crate::flavor::List<T>),\n}\n\nmacro_rules! wrap_compat {\n    ($self: expr, $method:ident $($arg:expr)*)=>{\n        match $self {\n            Self::Array(inner) => inner.$method($($arg)*),\n            Self::List(inner) => inner.$method($($arg)*),\n        }\n    };\n}\nimpl<T> Queue for CompatFlavor<T> {\n    type Item = T;\n    queue_dispatch!(wrap_compat);\n}\n\nimpl<T> FlavorImpl for CompatFlavor<T> {\n    flavor_dispatch!(wrap_compat);\n}\n\nimpl<T> FlavorSelect for CompatFlavor<T> {\n    flavor_select_dispatch!(wrap_compat);\n}\n\nimpl<T> FlavorMP for CompatFlavor<T> {}\nimpl<T> FlavorMC for CompatFlavor<T> {}\n\n// There's not much performance difference between old RegistrySingle and RegistryMulti,\n// we just use RegistryMulti here since this is just for compatible reason.\nimpl<T: Send + 'static> Flavor for CompatFlavor<T> {\n    type Send = RegistryMultiSend<T>;\n    type Recv = RegistryMultiRecv;\n}\n\n#[inline(always)]\nfn new_list<T: Send + Unpin + 'static>() -> CompatFlavor<T> {\n    CompatFlavor::<T>::List(crate::flavor::List::new())\n}\n\n#[inline(always)]\nfn new_array<T: Send + Unpin + 'static>(mut size: usize) -> CompatFlavor<T> {\n    if size <= 1 {\n        size = 1;\n    }\n    CompatFlavor::<T>::Array(crate::flavor::Array::<T>::new(size))\n}\n\npub type Tx<T> = crate::Tx<CompatFlavor<T>>;\n\npub type MTx<T> = crate::MTx<CompatFlavor<T>>;\n\npub type Rx<T> = crate::Rx<CompatFlavor<T>>;\n\npub type MRx<T> = crate::MRx<CompatFlavor<T>>;\n\npub type AsyncTx<T> = crate::AsyncTx<CompatFlavor<T>>;\n\npub type MAsyncTx<T> = crate::MAsyncTx<CompatFlavor<T>>;\n\npub type AsyncRx<T> = crate::AsyncRx<CompatFlavor<T>>;\n\npub type MAsyncRx<T> = crate::MAsyncRx<CompatFlavor<T>>;\n\npub use crate::{\n    RecvError, RecvTimeoutError, SendError, SendTimeoutError, TryRecvError, TrySendError,\n};\n\npub mod sink {\n    use super::*;\n\n    pub type AsyncSink<T> = crate::sink::AsyncSink<CompatFlavor<T>>;\n}\n\npub mod stream {\n    use super::*;\n\n    pub type AsyncStream<T> = crate::stream::AsyncStream<CompatFlavor<T>>;\n}\n\npub mod spsc {\n\n    use super::*;\n\n    macro_rules! init_share {\n        ($flavor: expr) => {{\n            ChannelShared::new($flavor, RegistryMultiSend::new(), RegistryMultiRecv::new())\n        }};\n    }\n\n    /// Creates an unbounded channel for use in a blocking context.\n    ///\n    /// The sender will never block, so we use the same `Tx` for all threads.\n    pub fn unbounded_blocking<T: Unpin + Send + 'static>() -> (Tx<T>, Rx<T>) {\n        let shared = init_share!(new_list::<T>());\n        let tx = Tx::new(shared.clone());\n        let rx = Rx::new(shared);\n        (tx, rx)\n    }\n\n    /// Creates an unbounded channel for use in an async context.\n    ///\n    /// The sender will never block, so we use the same `Tx` for all threads.\n    pub fn unbounded_async<T: Unpin + Send + 'static>() -> (Tx<T>, AsyncRx<T>) {\n        let shared = init_share!(new_list::<T>());\n        let tx = Tx::new(shared.clone());\n        let rx = AsyncRx::new(shared);\n        (tx, rx)\n    }\n\n    /// Creates a bounded channel for use in a blocking context.\n    ///\n    /// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1.\n    pub fn bounded_blocking<T: Unpin + Send + 'static>(size: usize) -> (Tx<T>, Rx<T>) {\n        let shared = init_share!(new_array::<T>(size));\n        let tx = Tx::new(shared.clone());\n        let rx = Rx::new(shared);\n        (tx, rx)\n    }\n\n    /// Creates a bounded channel where both the sender and receiver are async.\n    ///\n    /// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1.\n    pub fn bounded_async<T: Unpin + Send + 'static>(size: usize) -> (AsyncTx<T>, AsyncRx<T>) {\n        let shared = init_share!(new_array::<T>(size));\n        let tx = AsyncTx::new(shared.clone());\n        let rx = AsyncRx::new(shared);\n        (tx, rx)\n    }\n\n    /// Creates a bounded channel where the sender is async and the receiver is blocking.\n    ///\n    /// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1.\n    pub fn bounded_tx_async_rx_blocking<T: Unpin + Send + 'static>(\n        size: usize,\n    ) -> (AsyncTx<T>, Rx<T>) {\n        let shared = init_share!(new_array::<T>(size));\n        let tx = AsyncTx::new(shared.clone());\n        let rx = Rx::new(shared);\n        (tx, rx)\n    }\n\n    /// Creates a bounded channel where the sender is blocking and the receiver is async.\n    ///\n    /// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1.\n    pub fn bounded_tx_blocking_rx_async<T: Unpin + Send + 'static>(\n        size: usize,\n    ) -> (Tx<T>, AsyncRx<T>) {\n        let shared = init_share!(new_array::<T>(size));\n        let tx = Tx::new(shared.clone());\n        let rx = AsyncRx::new(shared);\n        (tx, rx)\n    }\n}\n\npub mod mpsc {\n\n    use super::*;\n\n    macro_rules! init_share {\n        ($flavor: expr) => {{\n            ChannelShared::new($flavor, RegistryMultiSend::new(), RegistryMultiRecv::new())\n        }};\n    }\n\n    /// Creates an unbounded channel for use in a blocking context.\n    ///\n    /// The sender will never block, so we use the same `Tx` for all threads.\n    pub fn unbounded_blocking<T: Send + 'static + Unpin>() -> (MTx<T>, Rx<T>) {\n        let shared = init_share!(new_list::<T>());\n        let tx = MTx::new(shared.clone());\n        let rx = Rx::new(shared);\n        (tx, rx)\n    }\n\n    /// Creates an unbounded channel for use in an async context.\n    ///\n    /// Although the sender type is `MTx`, it will never block.\n    pub fn unbounded_async<T: Send + 'static + Unpin>() -> (MTx<T>, AsyncRx<T>) {\n        let shared = init_share!(new_list::<T>());\n        let tx = MTx::new(shared.clone());\n        let rx = AsyncRx::new(shared);\n        (tx, rx)\n    }\n\n    /// Creates a bounded channel for use in a blocking context.\n    ///\n    /// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1.\n    pub fn bounded_blocking<T: Send + 'static + Unpin>(size: usize) -> (MTx<T>, Rx<T>) {\n        let shared = init_share!(new_array::<T>(size));\n        let tx = MTx::new(shared.clone());\n        let rx = Rx::new(shared);\n        (tx, rx)\n    }\n\n    /// Creates a bounded channel where both the sender and receiver are async.\n    ///\n    /// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1.\n    pub fn bounded_async<T: Send + 'static + Unpin>(size: usize) -> (MAsyncTx<T>, AsyncRx<T>) {\n        let shared = init_share!(new_array::<T>(size));\n        let tx = MAsyncTx::new(shared.clone());\n        let rx = AsyncRx::new(shared);\n        (tx, rx)\n    }\n\n    /// Creates a bounded channel where the sender is async and the receiver is blocking.\n    ///\n    /// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1.\n    pub fn bounded_tx_async_rx_blocking<T: Send + 'static + Unpin>(\n        size: usize,\n    ) -> (MAsyncTx<T>, Rx<T>) {\n        let shared = init_share!(new_array::<T>(size));\n        let tx = MAsyncTx::new(shared.clone());\n        let rx = Rx::new(shared);\n        (tx, rx)\n    }\n\n    /// Creates a bounded channel where the sender is blocking and the receiver is async.\n    ///\n    /// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1.\n    pub fn bounded_tx_blocking_rx_async<T: Send + 'static + Unpin>(\n        size: usize,\n    ) -> (MTx<T>, AsyncRx<T>) {\n        let shared = init_share!(new_array::<T>(size));\n        let tx = MTx::new(shared.clone());\n        let rx = AsyncRx::new(shared);\n        (tx, rx)\n    }\n}\n\npub mod mpmc {\n    //! v2 API Compatible Multiple producers, multiple consumers.\n\n    use super::*;\n\n    macro_rules! init_share {\n        ($flavor: expr) => {{\n            ChannelShared::new($flavor, RegistryMultiSend::new(), RegistryMultiRecv::new())\n        }};\n    }\n\n    /// Creates an unbounded channel for use in a blocking context.\n    ///\n    /// The sender will never block, so we use the same `Tx` for all threads.\n    pub fn unbounded_blocking<T: Send + 'static + Unpin>() -> (MTx<T>, MRx<T>) {\n        let shared = init_share!(new_list::<T>());\n        let tx = MTx::new(shared.clone());\n        let rx = MRx::new(shared);\n        (tx, rx)\n    }\n\n    /// Creates an unbounded channel for use in an async context.\n    ///\n    /// Although the sender type is `MTx`, it will never block.\n    pub fn unbounded_async<T: Send + 'static + Unpin>() -> (MTx<T>, MAsyncRx<T>) {\n        let shared = init_share!(new_list::<T>());\n        let tx = MTx::new(shared.clone());\n        let rx = MAsyncRx::new(shared);\n        (tx, rx)\n    }\n\n    /// Creates a bounded channel for use in a blocking context.\n    ///\n    /// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1.\n    pub fn bounded_blocking<T: Send + 'static + Unpin>(size: usize) -> (MTx<T>, MRx<T>) {\n        let shared = init_share!(new_array::<T>(size));\n        let tx = MTx::new(shared.clone());\n        let rx = MRx::new(shared);\n        (tx, rx)\n    }\n\n    /// Creates a bounded channel for use in an async context.\n    ///\n    /// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1.\n    pub fn bounded_async<T: Send + 'static + Unpin>(size: usize) -> (MAsyncTx<T>, MAsyncRx<T>) {\n        let shared = init_share!(new_array::<T>(size));\n        let tx = MAsyncTx::new(shared.clone());\n        let rx = MAsyncRx::new(shared);\n        (tx, rx)\n    }\n\n    /// Creates a bounded channel where the sender is async and the receiver is blocking.\n    ///\n    /// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1.\n    pub fn bounded_tx_async_rx_blocking<T: Send + 'static + Unpin>(\n        size: usize,\n    ) -> (MAsyncTx<T>, MRx<T>) {\n        let shared = init_share!(new_array::<T>(size));\n        let tx = MAsyncTx::new(shared.clone());\n        let rx = MRx::new(shared);\n        (tx, rx)\n    }\n\n    /// Creates a bounded channel where the sender is blocking and the receiver is async.\n    ///\n    /// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1.\n    pub fn bounded_tx_blocking_rx_async<T: Send + 'static + Unpin>(\n        size: usize,\n    ) -> (MTx<T>, MAsyncRx<T>) {\n        let shared = init_share!(new_array::<T>(size));\n        let tx = MTx::new(shared.clone());\n        let rx = MAsyncRx::new(shared);\n        (tx, rx)\n    }\n}\n"
  },
  {
    "path": "src/crossbeam/array_queue.rs",
    "content": "//! Modify by frostyplanet@gmail.com for the crossfire crate:\n//!\n//!   - Optimise for single consumer scenario;\n//!   - Add token interface according to crossbeam-channel\n//!   - Modified push() to push_with_ptr();\n//!   - Add try_push_oneshot() which combinds the logic of push and check_full in one step;\n//!   - Remove unused functions.\n//!\n//! Fork from crossbeam-queue crate commit 5a154def002304814d50f3c7658bd30eb46b2fad\n//!\n//! The MIT License (MIT)\n//!\n//! Copyright (c) 2025, 2026 frostyplanet@gmail.com\n//!\n//! Copyright (c) 2019 The Crossbeam Project Developers\n//!\n//! Permission is hereby granted, free of charge, to any\n//! person obtaining a copy of this software and associated\n//! documentation files (the \"Software\"), to deal in the\n//! Software without restriction, including without\n//! limitation the rights to use, copy, modify, merge,\n//! publish, distribute, sublicense, and/or sell copies of\n//! the Software, and to permit persons to whom the Software\n//! is furnished to do so, subject to the following\n//! conditions:\n//!\n//! The above copyright notice and this permission notice\n//! shall be included in all copies or substantial portions\n//! of the Software.\n//!\n//! THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF\n//! ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED\n//! TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n//! PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\n//! SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\n//! CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n//! OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR\n//! IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n//! DEALINGS IN THE SOFTWARE.\n//!\n//! The implementation is based on Dmitry Vyukov's bounded MPMC queue.\n//!\n//! Source:\n//!   - <http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue>\n\nuse core::cell::UnsafeCell;\n\nuse crate::flavor::Token;\nuse core::mem::{self, MaybeUninit};\nuse core::panic::{RefUnwindSafe, UnwindSafe};\nuse core::ptr;\nuse core::sync::atomic::{self, AtomicUsize, Ordering};\nuse crossbeam_utils::{Backoff, CachePadded};\n\n/// A slot in a queue.\nstruct Slot<T> {\n    /// The current stamp.\n    ///\n    /// If the stamp equals the tail, this node will be next written to. If it equals head + 1,\n    /// this node will be next read from.\n    stamp: AtomicUsize,\n\n    /// The value in this slot.\n    value: UnsafeCell<MaybeUninit<T>>,\n}\n\n/// A bounded multi-producer multi-consumer queue.\n///\n/// This queue allocates a fixed-capacity buffer on construction, which is used to store pushed\n/// elements. The queue cannot hold more elements than the buffer allows. Attempting to push an\n/// element into a full queue will fail. Alternatively, [`force_push`] makes it possible for\n/// this queue to be used as a ring-buffer. Having a buffer allocated upfront makes this queue\n/// a bit faster than [`SegQueue`].\n///\n/// [`SegQueue`]: super::SegQueue\npub struct ArrayQueue<T, const MP: bool, const MC: bool> {\n    /// The head of the queue.\n    ///\n    /// This value is a \"stamp\" consisting of an index into the buffer and a lap, but packed into a\n    /// single `usize`. The lower bits represent the index, while the upper bits represent the lap.\n    ///\n    /// Elements are popped from the head of the queue.\n    head: CachePadded<AtomicUsize>,\n\n    /// The tail of the queue.\n    ///\n    /// This value is a \"stamp\" consisting of an index into the buffer and a lap, but packed into a\n    /// single `usize`. The lower bits represent the index, while the upper bits represent the lap.\n    ///\n    /// Elements are pushed into the tail of the queue.\n    tail: CachePadded<AtomicUsize>,\n\n    /// The buffer holding slots.\n    buffer: Box<[Slot<T>]>,\n\n    /// A stamp with the value of `{ lap: 1, index: 0 }`.\n    one_lap: usize,\n}\n\nunsafe impl<T, const MP: bool, const MC: bool> Sync for ArrayQueue<T, MP, MC> {}\nunsafe impl<T, const MP: bool, const MC: bool> Send for ArrayQueue<T, MP, MC> {}\n\nimpl<T, const MP: bool, const MC: bool> UnwindSafe for ArrayQueue<T, MP, MC> {}\nimpl<T, const MP: bool, const MC: bool> RefUnwindSafe for ArrayQueue<T, MP, MC> {}\n\nimpl<T, const MP: bool, const MC: bool> ArrayQueue<T, MP, MC> {\n    /// Creates a new bounded queue with the given capacity.\n    ///\n    /// # Panics\n    ///\n    /// Panics if the capacity is zero.\n    pub fn new(cap: usize) -> Self {\n        assert!(cap > 0, \"capacity must be non-zero\");\n\n        // Head is initialized to `{ lap: 0, index: 0 }`.\n        // Tail is initialized to `{ lap: 0, index: 0 }`.\n        let head = 0;\n        let tail = 0;\n\n        // Allocate a buffer of `cap` slots initialized\n        // with stamps.\n        let buffer: Box<[Slot<T>]> = (0..cap)\n            .map(|i| {\n                // Set the stamp to `{ lap: 0, index: i }`.\n                Slot { stamp: AtomicUsize::new(i), value: UnsafeCell::new(MaybeUninit::uninit()) }\n            })\n            .collect();\n\n        // One lap is the smallest power of two greater than `cap`.\n        let one_lap = (cap + 1).next_power_of_two();\n\n        Self {\n            buffer,\n            one_lap,\n            head: CachePadded::new(AtomicUsize::new(head)),\n            tail: CachePadded::new(AtomicUsize::new(tail)),\n        }\n    }\n\n    /// This function is optimise for channel suspected to be full,\n    /// It's an equal replacement to is_full(), if not try only oneshot,\n    /// return Ok(true) when push ok, Ok(false) when channel is full.\n    /// None when uncertain (normally needs a loop)\n    #[allow(dead_code)]\n    #[inline(always)]\n    pub unsafe fn try_push_oneshot(&self, value: *const T) -> Option<bool> {\n        // Use two SeqCst to compare tail & head, it's an equal replacement to is_full()\n        let tail = self.tail.load(Ordering::SeqCst);\n        macro_rules! check_full {\n            ($tail: expr) => {\n                let head = self.head.load(Ordering::SeqCst);\n                // If the head lags one lap behind the tail as well...\n                if head.wrapping_add(self.one_lap) == $tail {\n                    // ...then the queue is full.\n                    return Some(false);\n                }\n            };\n        }\n        check_full!(tail);\n        match self._try_push(tail, value) {\n            Ok(_) => Some(true),\n            Err((_stamp, _new_tail)) => {\n                // after the first check_full with both loads are SeqCst, this is unlikely full, but also a hot path\n                None\n            }\n        }\n    }\n\n    /// return stamp, new_tail\n    #[inline]\n    fn _try_push(&self, tail: usize, value: *const T) -> Result<bool, (usize, Option<usize>)> {\n        let cap = self.capacity();\n        // Deconstruct the tail.\n        let index = tail & (self.one_lap - 1);\n\n        // Inspect the corresponding slot.\n        debug_assert!(index < self.buffer.len());\n        let slot = unsafe { self.buffer.get_unchecked(index) };\n        let stamp = slot.stamp.load(Ordering::Acquire);\n\n        // If the tail and the stamp match, we may attempt to push.\n        if tail == stamp {\n            let new_tail = if index + 1 < cap {\n                // Same lap, incremented index.\n                // Set to `{ lap: lap, index: index + 1 }`.\n                tail + 1\n            } else {\n                let lap = tail & !(self.one_lap - 1);\n                // One lap forward, index wraps around to zero.\n                // Set to `{ lap: lap.wrapping_add(1), index: 0 }`.\n                lap.wrapping_add(self.one_lap)\n            };\n            if MP {\n                // Try moving the tail.\n                if let Err(t) = self.tail.compare_exchange_weak(\n                    tail,\n                    new_tail,\n                    Ordering::SeqCst,\n                    Ordering::Relaxed,\n                ) {\n                    return Err((stamp, Some(t)));\n                }\n            } else {\n                self.tail.store(new_tail, Ordering::SeqCst);\n            }\n            // Write the value into the slot and update the stamp.\n            unsafe {\n                let item: &mut MaybeUninit<T> = &mut *slot.value.get();\n                item.write(ptr::read(value));\n            }\n            slot.stamp.store(tail + 1, Ordering::Release);\n            Ok(true)\n        } else {\n            Err((stamp, None))\n        }\n    }\n\n    #[inline(always)]\n    pub unsafe fn push_with_ptr(&self, value: *const T) -> bool {\n        let backoff = Backoff::new();\n        let mut tail =\n            if MP { self.tail.load(Ordering::Relaxed) } else { self.tail.load(Ordering::Acquire) };\n        macro_rules! check_full {\n            ($tail: expr) => {\n                let head = if MP || MC {\n                    // NOTE: The fence is preventing livestock\n                    atomic::fence(Ordering::SeqCst);\n                    self.head.load(Ordering::Relaxed)\n                } else {\n                    self.head.load(Ordering::SeqCst)\n                };\n                // If the head lags one lap behind the tail as well...\n                if head.wrapping_add(self.one_lap) == $tail {\n                    // ...then the queue is full.\n                    return false;\n                }\n            };\n        }\n        loop {\n            match self._try_push(tail, value) {\n                Ok(res) => return res,\n                Err((stamp, new_tail)) => {\n                    if let Some(_tail) = new_tail {\n                        tail = _tail;\n                        backoff.spin();\n                        continue;\n                    }\n                    if stamp.wrapping_add(self.one_lap) == tail + 1 {\n                        check_full!(tail);\n                    }\n                    backoff.snooze();\n                    if MP {\n                        tail = self.tail.load(Ordering::Relaxed);\n                    }\n                }\n            }\n        }\n    }\n\n    #[inline]\n    pub fn start_read(&self, final_check: bool) -> Option<Token> {\n        if let Some((slot, stamp)) = self._start_read(final_check) {\n            Some(Token::new(slot as *const Slot<T> as *const u8, stamp))\n        } else {\n            None\n        }\n    }\n\n    #[inline]\n    pub fn pop(&self, final_check: bool) -> Option<T> {\n        if let Some((slot, stamp)) = self._start_read(final_check) {\n            let msg = unsafe { slot.value.get().read().assume_init() };\n            slot.stamp.store(stamp, Ordering::Release);\n            Some(msg)\n        } else {\n            None\n        }\n    }\n\n    #[inline]\n    fn _start_read(&self, final_check: bool) -> Option<(&Slot<T>, usize)> {\n        let mut head;\n        if final_check {\n            // because we need to check is_empty before park,\n            // use SeqCst to make Miri happy\n            head = self.head.load(Ordering::SeqCst);\n        } else {\n            let order = if MC { Ordering::Relaxed } else { Ordering::Acquire };\n            head = self.head.load(order);\n        }\n        let backoff = Backoff::new();\n        loop {\n            // Deconstruct the head.\n            let index = head & (self.one_lap - 1);\n            // Inspect the corresponding slot.\n            debug_assert!(index < self.buffer.len());\n            let slot = unsafe { self.buffer.get_unchecked(index) };\n            let stamp = slot.stamp.load(Ordering::Acquire);\n\n            // If the stamp is ahead of the head by 1, we may attempt to pop.\n            if head + 1 == stamp {\n                let new = if index + 1 < self.capacity() {\n                    // Same lap, incremented index.\n                    // Set to `{ lap: lap, index: index + 1 }`.\n                    head + 1\n                } else {\n                    let lap = head & !(self.one_lap - 1);\n                    // One lap forward, index wraps around to zero.\n                    // Set to `{ lap: lap.wrapping_add(1), index: 0 }`.\n                    lap.wrapping_add(self.one_lap)\n                };\n                if MC {\n                    // Try moving the head.\n                    if let Err(new_head) = self.head.compare_exchange_weak(\n                        head,\n                        new,\n                        Ordering::SeqCst,\n                        Ordering::Relaxed,\n                    ) {\n                        head = new_head;\n                        backoff.spin();\n                        continue;\n                    }\n                } else {\n                    self.head.store(new, Ordering::SeqCst);\n                }\n                let new_head = head.wrapping_add(self.one_lap);\n                return Some((slot, new_head));\n            } else {\n                if stamp == head {\n                    // Check full\n                    let tail = if MP || MC {\n                        // NOTE: The fence is preventing live lock\n                        atomic::fence(Ordering::SeqCst);\n                        self.tail.load(Ordering::Relaxed)\n                    } else {\n                        self.tail.load(Ordering::SeqCst)\n                    };\n                    // If the tail equals the head, that means the channel is empty.\n                    if tail == head {\n                        return None;\n                    }\n                    backoff.spin();\n                } else {\n                    // Snooze because we need to wait for the stamp to get updated.\n                    backoff.snooze();\n                }\n                if MC {\n                    head = self.head.load(Ordering::Relaxed);\n                }\n                continue;\n            }\n        }\n    }\n\n    #[inline(always)]\n    pub fn read(&self, token: Token) -> T {\n        let slot: &Slot<T> = unsafe { &*token.pos.cast::<Slot<T>>() };\n        let msg = unsafe { slot.value.get().read().assume_init() };\n        slot.stamp.store(token.stamp, Ordering::Release);\n        msg\n    }\n\n    /// Returns the capacity of the queue.\n    #[inline]\n    pub fn capacity(&self) -> usize {\n        self.buffer.len()\n    }\n\n    /// Returns `true` if the queue is empty.\n    #[inline(always)]\n    pub fn is_empty(&self) -> bool {\n        let head = self.head.load(Ordering::SeqCst);\n        let tail = self.tail.load(Ordering::SeqCst);\n\n        // Is the tail lagging one lap behind head?\n        // Is the tail equal to the head?\n        //\n        // Note: If the head changes just before we load the tail, that means there was a moment\n        // when the channel was not empty, so it is safe to just return `false`.\n        tail == head\n    }\n\n    /// Returns `true` if the queue is full.\n    #[inline(always)]\n    pub fn is_full(&self) -> bool {\n        let tail = self.tail.load(Ordering::SeqCst);\n        let head = self.head.load(Ordering::SeqCst);\n\n        // Is the head lagging one lap behind tail?\n        //\n        // Note: If the tail changes just before we load the head, that means there was a moment\n        // when the queue was not full, so it is safe to just return `false`.\n        head.wrapping_add(self.one_lap) == tail\n    }\n\n    /// Returns the number of elements in the queue.\n    #[inline]\n    pub fn len(&self) -> usize {\n        loop {\n            // Load the tail, then load the head.\n            let tail = self.tail.load(Ordering::SeqCst);\n            let head = self.head.load(Ordering::SeqCst);\n\n            // If the tail didn't change, we've got consistent values to work with.\n            if self.tail.load(Ordering::SeqCst) == tail {\n                let hix = head & (self.one_lap - 1);\n                let tix = tail & (self.one_lap - 1);\n\n                return if hix < tix {\n                    tix - hix\n                } else if hix > tix {\n                    self.capacity() - hix + tix\n                } else if tail == head {\n                    0\n                } else {\n                    self.capacity()\n                };\n            }\n        }\n    }\n}\n\nimpl<T, const MP: bool, const MC: bool> Drop for ArrayQueue<T, MP, MC> {\n    fn drop(&mut self) {\n        if mem::needs_drop::<T>() {\n            // Get the index of the head.\n            let head = *self.head.get_mut();\n            let tail = *self.tail.get_mut();\n\n            let hix = head & (self.one_lap - 1);\n            let tix = tail & (self.one_lap - 1);\n\n            let len = if hix < tix {\n                tix - hix\n            } else if hix > tix {\n                self.capacity() - hix + tix\n            } else if tail == head {\n                0\n            } else {\n                self.capacity()\n            };\n\n            // Loop over all slots that hold a message and drop them.\n            for i in 0..len {\n                // Compute the index of the next slot holding a message.\n                let index =\n                    if hix + i < self.capacity() { hix + i } else { hix + i - self.capacity() };\n\n                unsafe {\n                    debug_assert!(index < self.buffer.len());\n                    let slot = self.buffer.get_unchecked_mut(index);\n                    (*slot.value.get()).assume_init_drop();\n                }\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "src/crossbeam/array_queue_mpsc.rs",
    "content": "//! Modify by frostyplanet@gmail.com for the crossfire crate:\n//!\n//!   - Optimise for MPSC scenario;\n//!   - Pack head/tail for cache efficiency;\n//!   - Add token interface according to crossbeam-channel\n//!   - Modified push() to push_with_ptr();\n//!   - Add try_push_oneshot();\n//!\n//! Fork from crossbeam-queue crate commit 5a154def002304814d50f3c7658bd30eb46b2fad\n//!\n//! The MIT License (MIT)\n//!\n//! Copyright (c) 2025, 2026 frostyplanet@gmail.com\n//!\n//! Copyright (c) 2019 The Crossbeam Project Developers\n//!\n//! Permission is hereby granted, free of charge, to any\n//! person obtaining a copy of this software and associated\n//! documentation files (the \"Software\"), to deal in the\n//! Software without restriction, including without\n//! limitation the rights to use, copy, modify, merge,\n//! publish, distribute, sublicense, and/or sell copies of\n//! the Software, and to permit persons to whom the Software\n//! is furnished to do so, subject to the following\n//! conditions:\n//!\n//! The above copyright notice and this permission notice\n//! shall be included in all copies or substantial portions\n//! of the Software.\n//!\n//! THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF\n//! ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED\n//! TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n//! PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\n//! SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\n//! CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n//! OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR\n//! IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n//! DEALINGS IN THE SOFTWARE.\n//!\n//! The implementation is based on Dmitry Vyukov's bounded MPMC queue.\n//!\n//! Source:\n//!   - <http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue>\n\nuse core::cell::UnsafeCell;\n\nuse crate::flavor::Token;\nuse core::mem::{self, MaybeUninit};\nuse core::panic::{RefUnwindSafe, UnwindSafe};\nuse core::ptr;\nuse core::sync::atomic::{AtomicU64, AtomicUsize, Ordering};\nuse crossbeam_utils::{Backoff, CachePadded};\n\n/// A slot in a queue.\nstruct Slot<T> {\n    /// The current stamp.\n    stamp: AtomicUsize,\n\n    /// The value in this slot.\n    value: UnsafeCell<MaybeUninit<T>>,\n}\n\n/// A bounded multi-producer single-consumer queue.\npub struct ArrayQueueMpsc<T> {\n    /// The sender state.\n    ///\n    /// High bits: head_cached\n    /// Low bits: tail\n    sender: CachePadded<AtomicU64>,\n\n    /// The receiver state.\n    ///\n    /// High bits: tail_cached\n    /// Low bits: head\n    recv: CachePadded<AtomicU64>,\n\n    /// The buffer holding slots.\n    buffer: Box<[Slot<T>]>,\n\n    /// A stamp with the value of `{ lap: 1, index: 0 }`.\n    one_lap: u32,\n}\n\nunsafe impl<T> Sync for ArrayQueueMpsc<T> {}\nunsafe impl<T> Send for ArrayQueueMpsc<T> {}\n\nimpl<T> UnwindSafe for ArrayQueueMpsc<T> {}\nimpl<T> RefUnwindSafe for ArrayQueueMpsc<T> {}\n\nimpl<T> ArrayQueueMpsc<T> {\n    /// Creates a new bounded queue with the given capacity.\n    ///\n    /// # Panics\n    ///\n    /// Panics if the capacity is zero.\n    pub fn new(cap: usize) -> Self {\n        assert!(cap > 0, \"capacity must be non-zero\");\n        assert!(cap < (1 << 31), \"capacity too large for u32 logic\");\n\n        // Head is initialized to `{ lap: 0, index: 0 }`.\n        // Tail is initialized to `{ lap: 0, index: 0 }`.\n        let head = 0;\n        let tail = 0;\n\n        // Allocate a buffer of `cap` slots initialized\n        // with stamps.\n        let buffer: Box<[Slot<T>]> = (0..cap)\n            .map(|i| {\n                // Set the stamp to `i`.\n                Slot { stamp: AtomicUsize::new(i), value: UnsafeCell::new(MaybeUninit::uninit()) }\n            })\n            .collect();\n\n        // One lap is the smallest power of two greater than `cap`.\n        let one_lap = (cap + 1).next_power_of_two() as u32;\n\n        Self {\n            buffer,\n            one_lap,\n            recv: CachePadded::new(AtomicU64::new(((tail as u64) << 32) | (head as u64))),\n            sender: CachePadded::new(AtomicU64::new(((head as u64) << 32) | (tail as u64))),\n        }\n    }\n\n    #[inline(always)]\n    fn _try_push(\n        &self, sender_val: u64, tail: u32, head_cached: u32, value: *const T,\n    ) -> Result<bool, u64> {\n        let index = (tail & (self.one_lap - 1)) as usize;\n        let new_tail = if index + 1 < self.buffer.len() {\n            tail + 1\n        } else {\n            let lap = tail & !(self.one_lap - 1);\n            lap.wrapping_add(self.one_lap)\n        };\n        let new_sender_val = ((head_cached as u64) << 32) | (new_tail as u64);\n        match self.sender.compare_exchange_weak(\n            sender_val,\n            new_sender_val,\n            Ordering::SeqCst,\n            Ordering::Acquire,\n        ) {\n            Ok(_) => {\n                debug_assert!(index < self.buffer.len());\n                unsafe {\n                    let slot = self.buffer.get_unchecked(index);\n\n                    let item: &mut MaybeUninit<T> = &mut *slot.value.get();\n                    item.write(ptr::read(value));\n                    slot.stamp.store((tail as usize).wrapping_add(1), Ordering::Release);\n                }\n                Ok(true)\n            }\n            Err(current) => Err(current),\n        }\n    }\n\n    #[inline(always)]\n    pub unsafe fn push_with_ptr(&self, value: *const T) -> bool {\n        let backoff = Backoff::new();\n        let mut sender_val = self.sender.load(Ordering::Relaxed);\n        loop {\n            let tail = sender_val as u32;\n            let mut head_cached = (sender_val >> 32) as u32;\n\n            if head_cached.wrapping_add(self.one_lap) == tail {\n                backoff.spin();\n                let head = self.recv.load(Ordering::SeqCst) as u32;\n                if head == head_cached {\n                    return false;\n                }\n                head_cached = head;\n            }\n            match self._try_push(sender_val, tail, head_cached, value) {\n                Ok(res) => return res,\n                Err(current) => {\n                    sender_val = current;\n                    backoff.snooze();\n                }\n            }\n        }\n    }\n\n    /// This function is optimise for channel suspected to be full,\n    /// It's an equal replacement to is_full(), if not try only oneshot,\n    /// return Ok(true) when push ok, Ok(false) when channel is full.\n    /// None when uncertain (normally needs a loop)\n    #[inline(always)]\n    pub unsafe fn try_push_oneshot(&self, value: *const T) -> Option<bool> {\n        let sender_val = self.sender.load(Ordering::SeqCst);\n        let tail = sender_val as u32;\n        let mut head_cached = (sender_val >> 32) as u32;\n        if head_cached.wrapping_add(self.one_lap) == tail {\n            let head = self.recv.load(Ordering::SeqCst) as u32;\n            if head == head_cached {\n                return Some(false);\n            }\n            head_cached = head;\n        }\n        self._try_push(sender_val, tail, head_cached, value).ok()\n    }\n\n    #[inline]\n    pub fn start_read(&self, final_check: bool) -> Option<Token> {\n        if let Some((head, tail_cached)) = self._start_read::<true>(final_check) {\n            let (slot, packed_recv) = self._read(head, tail_cached);\n            Some(Token::new(slot as *const Slot<T> as *const u8, packed_recv as usize))\n        } else {\n            None\n        }\n    }\n\n    #[inline]\n    pub fn pop(&self, final_check: bool) -> Option<T> {\n        if let Some((head, tail_cached)) = self._start_read::<true>(final_check) {\n            let (slot, packed_recv) = self._read(head, tail_cached);\n            let msg = unsafe { slot.value.get().read().assume_init() };\n            // Update recv (which contains head) to free the slot.\n            self.recv.store(packed_recv, Ordering::SeqCst);\n            Some(msg)\n        } else {\n            None\n        }\n    }\n\n    #[inline]\n    pub fn pop_cached(&self) -> Option<T> {\n        if let Some((head, tail_cached)) = self._start_read::<false>(false) {\n            let (slot, packed_recv) = self._read(head, tail_cached);\n            let msg = unsafe { slot.value.get().read().assume_init() };\n            // Update recv (which contains head) to free the slot.\n            self.recv.store(packed_recv, Ordering::SeqCst);\n            Some(msg)\n        } else {\n            None\n        }\n    }\n\n    /// return head, tail_cached\n    #[inline]\n    fn _start_read<const SPIN: bool>(&self, _final_check: bool) -> Option<(u32, u32)> {\n        let recv_val = self.recv.load(Ordering::Relaxed);\n        let head = recv_val as u32;\n        let mut tail_cached = (recv_val >> 32) as u32;\n\n        if tail_cached == head {\n            if SPIN {\n                core::hint::spin_loop();\n                let tail = if _final_check {\n                    self.sender.load(Ordering::SeqCst) as u32\n                } else {\n                    self.sender.load(Ordering::Acquire) as u32\n                };\n                if head == tail {\n                    return None;\n                }\n                tail_cached = tail;\n            } else {\n                return None;\n            }\n        }\n        Some((head, tail_cached))\n    }\n\n    #[inline]\n    fn _read(&self, head: u32, tail_cached: u32) -> (&Slot<T>, u64) {\n        // Deconstruct the head.\n        let index = (head & (self.one_lap - 1)) as usize;\n        debug_assert!(index < self.buffer.len());\n        let slot = unsafe { self.buffer.get_unchecked(index) };\n        // Wait for stamp update\n        let target_stamp = (head as usize).wrapping_add(1);\n        loop {\n            let stamp = slot.stamp.load(Ordering::Acquire);\n            if stamp == target_stamp {\n                break;\n            }\n            core::hint::spin_loop();\n        }\n        // Update head\n        let new_head = if index + 1 < self.buffer.len() {\n            head + 1\n        } else {\n            let lap = head & !(self.one_lap - 1);\n            lap.wrapping_add(self.one_lap)\n        };\n        (slot, ((tail_cached as u64) << 32) | (new_head as u64))\n    }\n\n    #[inline(always)]\n    pub fn read(&self, token: Token) -> T {\n        let slot: &Slot<T> = unsafe { &*token.pos.cast::<Slot<T>>() };\n        let msg = unsafe { slot.value.get().read().assume_init() };\n        // Do not update stamp\n        self.recv.store(token.stamp as u64, Ordering::SeqCst);\n        msg\n    }\n\n    /// Returns the capacity of the queue.\n    #[inline]\n    pub fn capacity(&self) -> usize {\n        self.buffer.len()\n    }\n\n    /// Returns `true` if the queue is empty.\n    #[inline(always)]\n    pub fn is_empty(&self) -> bool {\n        let head = self.recv.load(Ordering::SeqCst) as u32;\n        let tail = self.sender.load(Ordering::SeqCst) as u32;\n        tail == head\n    }\n\n    /// Returns `true` if the queue is full.\n    #[inline(always)]\n    pub fn is_full(&self) -> bool {\n        let tail = self.sender.load(Ordering::SeqCst) as u32;\n        let head = self.recv.load(Ordering::SeqCst) as u32;\n        head.wrapping_add(self.one_lap) == tail\n    }\n\n    /// Returns the number of elements in the queue.\n    #[inline]\n    pub fn len(&self) -> usize {\n        loop {\n            let tail = self.sender.load(Ordering::SeqCst) as u32;\n            let head = self.recv.load(Ordering::SeqCst) as u32;\n\n            if self.sender.load(Ordering::SeqCst) as u32 == tail {\n                let hix = head & (self.one_lap - 1);\n                let tix = tail & (self.one_lap - 1);\n\n                return if hix < tix {\n                    (tix - hix) as usize\n                } else if hix > tix {\n                    self.capacity() - (hix - tix) as usize\n                } else if tail == head {\n                    0\n                } else {\n                    self.capacity()\n                };\n            }\n        }\n    }\n}\n\nimpl<T> Drop for ArrayQueueMpsc<T> {\n    fn drop(&mut self) {\n        if mem::needs_drop::<T>() {\n            let recv_val = *self.recv.get_mut();\n            let sender_val = *self.sender.get_mut();\n\n            let head = recv_val as u32;\n            let tail = sender_val as u32;\n\n            let hix = head & (self.one_lap - 1);\n            let tix = tail & (self.one_lap - 1);\n\n            let len = if hix < tix {\n                tix - hix\n            } else if hix > tix {\n                self.capacity() as u32 - hix + tix\n            } else if tail == head {\n                0\n            } else {\n                self.capacity() as u32\n            };\n\n            for i in 0..(len as usize) {\n                let index = if (hix as usize) + i < self.capacity() {\n                    (hix as usize) + i\n                } else {\n                    (hix as usize) + i - self.capacity()\n                };\n\n                unsafe {\n                    debug_assert!(index < self.buffer.len());\n                    let slot = self.buffer.get_unchecked_mut(index);\n                    (*slot.value.get()).assume_init_drop();\n                }\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "src/crossbeam/array_queue_spsc.rs",
    "content": "//! Modify by frostyplanet@gmail.com for the crossfire crate:\n//!\n//!   - Modify for SPSC, remove the `stamp` field\n//!   - Optimization: pack head/tail and their cached counterparts into single AtomicU64 (u32 each).\n//!   - Add token interface according to crossbeam-channel\n//!   - Modified push() to push_with_ptr();\n//!   - Add try_push_oneshot() which combinds the logic of push and check_full in one step;\n//!   - Remove unused functions.\n//!\n//! Fork from crossbeam-queue crate commit 5a154def002304814d50f3c7658bd30eb46b2fad\n//!\n//! The MIT License (MIT)\n//!\n//! Copyright (c) 2025, 2026 frostyplanet@gmail.com\n//!\n//! Copyright (c) 2019 The Crossbeam Project Developers\n//!\n//! Permission is hereby granted, free of charge, to any\n//! person obtaining a copy of this software and associated\n//! documentation files (the \"Software\"), to deal in the\n//! Software without restriction, including without\n//! limitation the rights to use, copy, modify, merge,\n//! publish, distribute, sublicense, and/or sell copies of\n//! the Software, and to permit persons to whom the Software\n//! is furnished to do so, subject to the following\n//! conditions:\n//!\n//! The above copyright notice and this permission notice\n//! shall be included in all copies or substantial portions\n//! of the Software.\n//!\n//! THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF\n//! ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED\n//! TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n//! PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\n//! SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\n//! CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n//! OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR\n//! IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n//! DEALINGS IN THE SOFTWARE.\n//!\n//! The implementation is based on Dmitry Vyukov's bounded MPMC queue.\n//!\n//! Source:\n//!   - <http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue>\n\nuse core::cell::UnsafeCell;\n\nuse crate::flavor::Token;\nuse core::mem::{self, MaybeUninit};\nuse core::panic::{RefUnwindSafe, UnwindSafe};\nuse core::ptr;\nuse core::sync::atomic::{AtomicU64, Ordering};\nuse crossbeam_utils::CachePadded;\n\n/// A slot in a queue.\nstruct Slot<T> {\n    /// The value in this slot.\n    value: UnsafeCell<MaybeUninit<T>>,\n}\n\n/// A bounded multi-producer multi-consumer queue.\n///\n/// This queue allocates a fixed-capacity buffer on construction, which is used to store pushed\n/// elements. The queue cannot hold more elements than the buffer allows. Attempting to push an\n/// element into a full queue will fail. Alternatively, [`force_push`] makes it possible for\n/// this queue to be used as a ring-buffer. Having a buffer allocated upfront makes this queue\n/// a bit faster than [`SegQueue`].\n///\n/// [`SegQueue`]: super::SegQueue\npub struct ArrayQueueSpsc<T> {\n    /// The head of the queue.\n    ///\n    /// This value is a \"stamp\" consisting of an index into the buffer and a lap, but packed into a\n    /// single `usize`. The lower bits represent the index, while the upper bits represent the lap.\n    ///\n    /// Elements are popped from the head of the queue.\n    ///\n    /// High bits: head_cached\n    /// Low bits: tail\n    sender: CachePadded<AtomicU64>,\n\n    /// The tail of the queue.\n    ///\n    /// This value is a \"stamp\" consisting of an index into the buffer and a lap, but packed into a\n    /// single `usize`. The lower bits represent the index, while the upper bits represent the lap.\n    ///\n    /// Elements are pushed into the tail of the queue.\n    ///\n    /// High bits: tail_cached\n    /// Low bits: head\n    recv: CachePadded<AtomicU64>,\n\n    /// The buffer holding slots.\n    buffer: Box<[Slot<T>]>,\n\n    /// A stamp with the value of `{ lap: 1, index: 0 }`.\n    one_lap: u32,\n}\n\nunsafe impl<T> Sync for ArrayQueueSpsc<T> {}\nunsafe impl<T> Send for ArrayQueueSpsc<T> {}\n\nimpl<T> UnwindSafe for ArrayQueueSpsc<T> {}\nimpl<T> RefUnwindSafe for ArrayQueueSpsc<T> {}\n\nimpl<T> ArrayQueueSpsc<T> {\n    /// Creates a new bounded queue with the given capacity.\n    ///\n    /// # Panics\n    ///\n    /// Panics if the capacity is zero.\n    pub fn new(cap: usize) -> Self {\n        assert!(cap > 0, \"capacity must be non-zero\");\n        assert!(cap < (1 << 31), \"capacity too large for u32 logic\");\n\n        // Head is initialized to `{ lap: 0, index: 0 }`.\n        // Tail is initialized to `{ lap: 0, index: 0 }`.\n        let head = 0;\n        let tail = 0;\n\n        // Allocate a buffer of `cap` slots initialized\n        // with stamps.\n        let buffer: Box<[Slot<T>]> =\n            (0..cap).map(|_i| Slot { value: UnsafeCell::new(MaybeUninit::uninit()) }).collect();\n\n        // One lap is the smallest power of two greater than `cap`.\n        let one_lap = (cap + 1).next_power_of_two() as u32;\n\n        Self {\n            buffer,\n            one_lap,\n            recv: CachePadded::new(AtomicU64::new(((tail as u64) << 32) | (head as u64))),\n            sender: CachePadded::new(AtomicU64::new(((head as u64) << 32) | (tail as u64))),\n        }\n    }\n\n    #[inline(always)]\n    fn _try_push(&self, order: Ordering, value: *const T) -> bool {\n        let sender_val = self.sender.load(Ordering::Relaxed);\n        let tail = sender_val as u32;\n        let mut head_cached = (sender_val >> 32) as u32;\n\n        if head_cached.wrapping_add(self.one_lap) == tail {\n            let head = self.recv.load(order) as u32;\n            if head == head_cached {\n                return false;\n            }\n            head_cached = head;\n        }\n\n        let cap = self.capacity();\n        // Deconstruct the tail.\n        let index = (tail & (self.one_lap - 1)) as usize;\n        // Inspect the corresponding slot.\n        debug_assert!(index < self.buffer.len());\n        let slot = unsafe { self.buffer.get_unchecked(index) };\n        let new_tail = if index + 1 < cap {\n            // Same lap, incremented index.\n            // Set to `{ lap: lap, index: index + 1 }`.\n            tail + 1\n        } else {\n            let lap = tail & !(self.one_lap - 1);\n            // One lap forward, index wraps around to zero.\n            // Set to `{ lap: lap.wrapping_add(1), index: 0 }`.\n            lap.wrapping_add(self.one_lap)\n        };\n        // Write the value into the slot.\n        unsafe {\n            let item: &mut MaybeUninit<T> = &mut *slot.value.get();\n            item.write(ptr::read(value));\n        }\n        self.sender.store(((head_cached as u64) << 32) | (new_tail as u64), Ordering::SeqCst);\n        true\n    }\n\n    #[inline(always)]\n    pub unsafe fn push_with_ptr(&self, value: *const T) -> bool {\n        self._try_push(Ordering::Acquire, value)\n    }\n\n    #[inline(always)]\n    pub unsafe fn push_with_ptr_final(&self, value: *const T) -> bool {\n        self._try_push(Ordering::SeqCst, value)\n    }\n\n    #[inline]\n    pub fn start_read(&self, final_check: bool) -> Option<Token> {\n        if let Some((head, tail_cached)) = self._start_read::<true>(final_check) {\n            let (slot, packed_recv) = self._read(head, tail_cached);\n            Some(Token::new(slot as *const Slot<T> as *const u8, packed_recv as usize))\n        } else {\n            None\n        }\n    }\n\n    #[inline]\n    pub fn pop(&self, final_check: bool) -> Option<T> {\n        if let Some((head, tail_cached)) = self._start_read::<true>(final_check) {\n            let (slot, packed_recv) = self._read(head, tail_cached);\n            let msg = unsafe { slot.value.get().read().assume_init() };\n            self.recv.store(packed_recv, Ordering::SeqCst);\n            Some(msg)\n        } else {\n            None\n        }\n    }\n\n    #[inline]\n    pub fn pop_cached(&self) -> Option<T> {\n        if let Some((head, tail_cached)) = self._start_read::<false>(false) {\n            let (slot, packed_recv) = self._read(head, tail_cached);\n            let msg = unsafe { slot.value.get().read().assume_init() };\n            self.recv.store(packed_recv, Ordering::SeqCst);\n            Some(msg)\n        } else {\n            None\n        }\n    }\n\n    /// return (head, tail_cached)\n    #[inline]\n    fn _start_read<const SPIN: bool>(&self, _final_check: bool) -> Option<(u32, u32)> {\n        let recv_val = self.recv.load(Ordering::Relaxed);\n        let head = recv_val as u32;\n        let mut tail_cached = (recv_val >> 32) as u32;\n\n        if tail_cached == head {\n            if SPIN {\n                // because we don't have stamp, and no spinning loop,\n                // this line is critical for performance\n                std::hint::spin_loop();\n                let tail = {\n                    if _final_check {\n                        // because we need to check is_empty before park,\n                        // use SeqCst to make Miri happy\n                        self.sender.load(Ordering::SeqCst) as u32\n                    } else {\n                        self.sender.load(Ordering::Acquire) as u32\n                    }\n                };\n                if head == tail {\n                    return None;\n                }\n                tail_cached = tail;\n            } else {\n                return None;\n            }\n        }\n        Some((head, tail_cached))\n    }\n\n    #[inline]\n    fn _read(&self, head: u32, tail_cached: u32) -> (&Slot<T>, u64) {\n        // Deconstruct the head.\n        let index = (head & (self.one_lap - 1)) as usize;\n        // Inspect the corresponding slot.\n        debug_assert!(index < self.buffer.len());\n        let slot = unsafe { self.buffer.get_unchecked(index) };\n        // If the stamp is ahead of the head by 1, we may attempt to pop.\n        let new_head = if index + 1 < self.capacity() {\n            // Same lap, incremented index.\n            // Set to `{ lap: lap, index: index + 1 }`.\n            head + 1\n        } else {\n            let lap = head & !(self.one_lap - 1);\n            // One lap forward, index wraps around to zero.\n            // Set to `{ lap: lap.wrapping_add(1), index: 0 }`.\n            lap.wrapping_add(self.one_lap)\n        };\n        (slot, ((tail_cached as u64) << 32) | (new_head as u64))\n    }\n\n    #[inline(always)]\n    pub fn read(&self, token: Token) -> T {\n        let slot: &Slot<T> = unsafe { &*token.pos.cast::<Slot<T>>() };\n        let msg = unsafe { slot.value.get().read().assume_init() };\n        self.recv.store(token.stamp as u64, Ordering::SeqCst);\n        msg\n    }\n\n    /// Returns the capacity of the queue.\n    #[inline]\n    pub fn capacity(&self) -> usize {\n        self.buffer.len()\n    }\n\n    /// Returns `true` if the queue is empty.\n    #[inline(always)]\n    pub fn is_empty(&self) -> bool {\n        let head = self.recv.load(Ordering::SeqCst) as u32;\n        let tail = self.sender.load(Ordering::SeqCst) as u32;\n\n        // Is the tail lagging one lap behind head?\n        // Is the tail equal to the head?\n        //\n        // Note: If the head changes just before we load the tail, that means there was a moment\n        // when the channel was not empty, so it is safe to just return `false`.\n        tail == head\n    }\n\n    /// Returns `true` if the queue is full.\n    #[inline(always)]\n    pub fn is_full(&self) -> bool {\n        let tail = self.sender.load(Ordering::SeqCst) as u32;\n        let head = self.recv.load(Ordering::SeqCst) as u32;\n\n        // Is the head lagging one lap behind tail?\n        //\n        // Note: If the tail changes just before we load the head, that means there was a moment\n        // when the queue was not full, so it is safe to just return `false`.\n        head.wrapping_add(self.one_lap) == tail\n    }\n\n    /// Returns the number of elements in the queue.\n    #[inline]\n    pub fn len(&self) -> usize {\n        loop {\n            // Load the tail, then load the head.\n            let tail = self.sender.load(Ordering::SeqCst) as u32;\n            let head = self.recv.load(Ordering::SeqCst) as u32;\n\n            // If the tail didn't change, we've got consistent values to work with.\n            if self.sender.load(Ordering::SeqCst) as u32 == tail {\n                let hix = head & (self.one_lap - 1);\n                let tix = tail & (self.one_lap - 1);\n\n                return if hix < tix {\n                    (tix - hix) as usize\n                } else if hix > tix {\n                    self.capacity() - (hix - tix) as usize\n                } else if tail == head {\n                    0\n                } else {\n                    self.capacity()\n                };\n            }\n        }\n    }\n}\n\nimpl<T> Drop for ArrayQueueSpsc<T> {\n    fn drop(&mut self) {\n        if mem::needs_drop::<T>() {\n            // Get the index of the head.\n            let head = (*self.recv.get_mut()) as u32;\n            let tail = (*self.sender.get_mut()) as u32;\n\n            let hix = head & (self.one_lap - 1);\n            let tix = tail & (self.one_lap - 1);\n\n            let len = if hix < tix {\n                tix - hix\n            } else if hix > tix {\n                self.capacity() as u32 - hix + tix\n            } else if tail == head {\n                0\n            } else {\n                self.capacity() as u32\n            };\n\n            // Loop over all slots that hold a message and drop them.\n            for i in 0..(len as usize) {\n                // Compute the index of the next slot holding a message.\n                let index = if (hix as usize) + i < self.capacity() {\n                    (hix as usize) + i\n                } else {\n                    (hix as usize) + i - self.capacity()\n                };\n\n                unsafe {\n                    debug_assert!(index < self.buffer.len());\n                    let slot = self.buffer.get_unchecked_mut(index);\n                    (*slot.value.get()).assume_init_drop();\n                }\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "src/crossbeam/err.rs",
    "content": "//! The MIT License (MIT)\n//!\n//! Copyright (c) 2019 The Crossbeam Project Developers\n//!\n//! Permission is hereby granted, free of charge, to any\n//! person obtaining a copy of this software and associated\n//! documentation files (the \"Software\"), to deal in the\n//! Software without restriction, including without\n//! limitation the rights to use, copy, modify, merge,\n//! publish, distribute, sublicense, and/or sell copies of\n//! the Software, and to permit persons to whom the Software\n//! is furnished to do so, subject to the following\n//! conditions:\n//!\n//! The above copyright notice and this permission notice\n//! shall be included in all copies or substantial portions\n//! of the Software.\n//!\n//! THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF\n//! ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED\n//! TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n//! PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\n//! SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\n//! CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n//! OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR\n//! IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n//! DEALINGS IN THE SOFTWARE.\n\nuse std::error;\nuse std::fmt;\n\n/// An error returned from the `send` method.\n///\n/// The message could not be sent because the channel is disconnected.\n///\n/// The error contains the message so it can be recovered.\n#[derive(PartialEq, Eq, Clone, Copy)]\npub struct SendError<T>(pub T);\n\n/// An error returned from the `try_send` method.\n///\n/// The error contains the message being sent so it can be recovered.\n#[derive(PartialEq, Eq, Clone, Copy)]\npub enum TrySendError<T> {\n    /// The message could not be sent because the channel is full.\n    ///\n    /// If this is a zero-capacity channel, then the error indicates that there was no receiver\n    /// available to receive the message at the time.\n    Full(T),\n\n    /// The message could not be sent because the channel is disconnected.\n    Disconnected(T),\n}\n\n/// An error returned from the `send_timeout` method.\n///\n/// The error contains the message being sent so it can be recovered.\n#[derive(PartialEq, Eq, Clone, Copy)]\npub enum SendTimeoutError<T> {\n    /// The message could not be sent because the channel is full and the operation timed out.\n    ///\n    /// If this is a zero-capacity channel, then the error indicates that there was no receiver\n    /// available to receive the message and the operation timed out.\n    Timeout(T),\n\n    /// The message could not be sent because the channel is disconnected.\n    Disconnected(T),\n}\n\n/// An error returned from the `recv` method.\n///\n/// A message could not be received because the channel is empty and disconnected.\n#[derive(PartialEq, Eq, Clone, Copy, Debug)]\npub struct RecvError;\n\n/// An error returned from the `try_recv` method.\n#[derive(PartialEq, Eq, Clone, Copy, Debug)]\npub enum TryRecvError {\n    /// A message could not be received because the channel is empty.\n    ///\n    /// If this is a zero-capacity channel, then the error indicates that there was no sender\n    /// available to send a message at the time.\n    Empty,\n\n    /// The message could not be received because the channel is empty and disconnected.\n    Disconnected,\n}\n\n/// An error returned from the `recv_timeout` method.\n#[derive(PartialEq, Eq, Clone, Copy, Debug)]\npub enum RecvTimeoutError {\n    /// A message could not be received because the channel is empty and the operation timed out.\n    ///\n    /// If this is a zero-capacity channel, then the error indicates that there was no sender\n    /// available to send a message and the operation timed out.\n    Timeout,\n\n    /// The message could not be received because the channel is empty and disconnected.\n    Disconnected,\n}\n\nimpl<T> fmt::Debug for SendError<T> {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        \"SendError(..)\".fmt(f)\n    }\n}\n\nimpl<T> fmt::Display for SendError<T> {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        \"sending on a disconnected channel\".fmt(f)\n    }\n}\n\nimpl<T: Send> error::Error for SendError<T> {}\n\nimpl<T> SendError<T> {\n    /// Unwraps the message.\n    ///\n    /// # Examples\n    ///\n    /// ```\n    /// use crossfire::mpmc;\n    ///\n    /// let (s, r) = mpmc::bounded_blocking::<&str>(10);\n    /// drop(r);\n    ///\n    /// if let Err(err) = s.send(\"foo\") {\n    ///     assert_eq!(err.into_inner(), \"foo\");\n    /// }\n    /// ```\n    pub fn into_inner(self) -> T {\n        self.0\n    }\n}\n\nimpl<T> fmt::Debug for TrySendError<T> {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        match *self {\n            Self::Full(..) => \"Full(..)\".fmt(f),\n            Self::Disconnected(..) => \"Disconnected(..)\".fmt(f),\n        }\n    }\n}\n\nimpl<T> fmt::Display for TrySendError<T> {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        match *self {\n            Self::Full(..) => \"sending on a full channel\".fmt(f),\n            Self::Disconnected(..) => \"sending on a disconnected channel\".fmt(f),\n        }\n    }\n}\n\nimpl<T: Send> error::Error for TrySendError<T> {}\n\nimpl<T> From<SendError<T>> for TrySendError<T> {\n    fn from(err: SendError<T>) -> Self {\n        match err {\n            SendError(t) => Self::Disconnected(t),\n        }\n    }\n}\n\nimpl<T> TrySendError<T> {\n    /// Unwraps the message.\n    ///\n    /// # Examples\n    ///\n    /// ```\n    /// use crossfire::mpmc;\n    ///\n    /// let (s, r) = mpmc::bounded_blocking::<&str>(0);\n    ///\n    /// if let Err(err) = s.try_send(\"foo\") {\n    ///     assert_eq!(err.into_inner(), \"foo\");\n    /// }\n    /// ```\n    pub fn into_inner(self) -> T {\n        match self {\n            Self::Full(v) => v,\n            Self::Disconnected(v) => v,\n        }\n    }\n\n    /// Returns `true` if the send operation failed because the channel is full.\n    pub fn is_full(&self) -> bool {\n        matches!(self, Self::Full(_))\n    }\n\n    /// Returns `true` if the send operation failed because the channel is disconnected.\n    pub fn is_disconnected(&self) -> bool {\n        matches!(self, Self::Disconnected(_))\n    }\n}\n\nimpl<T> fmt::Debug for SendTimeoutError<T> {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        \"SendTimeoutError(..)\".fmt(f)\n    }\n}\n\nimpl<T> fmt::Display for SendTimeoutError<T> {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        match *self {\n            Self::Timeout(..) => \"timed out waiting on send operation\".fmt(f),\n            Self::Disconnected(..) => \"sending on a disconnected channel\".fmt(f),\n        }\n    }\n}\n\nimpl<T: Send> error::Error for SendTimeoutError<T> {}\n\nimpl<T> From<SendError<T>> for SendTimeoutError<T> {\n    fn from(err: SendError<T>) -> Self {\n        match err {\n            SendError(e) => Self::Disconnected(e),\n        }\n    }\n}\n\nimpl<T> SendTimeoutError<T> {\n    /// Unwraps the message.\n    ///\n    /// # Examples\n    ///\n    /// ```\n    /// use std::time::Duration;\n    /// use crossfire::mpmc;\n    ///\n    /// let (s, r) = mpmc::bounded_blocking::<&str>(10);\n    /// drop(r);\n    ///\n    /// if let Err(err) = s.send_timeout(\"foo\", Duration::from_secs(1)) {\n    ///     assert_eq!(err.into_inner(), \"foo\");\n    /// }\n    /// ```\n    pub fn into_inner(self) -> T {\n        match self {\n            Self::Timeout(v) => v,\n            Self::Disconnected(v) => v,\n        }\n    }\n\n    /// Returns `true` if the send operation timed out.\n    pub fn is_timeout(&self) -> bool {\n        matches!(self, Self::Timeout(_))\n    }\n\n    /// Returns `true` if the send operation failed because the channel is disconnected.\n    pub fn is_disconnected(&self) -> bool {\n        matches!(self, Self::Disconnected(_))\n    }\n}\n\nimpl fmt::Display for RecvError {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        \"receiving on an empty and disconnected channel\".fmt(f)\n    }\n}\n\nimpl error::Error for RecvError {}\n\nimpl fmt::Display for TryRecvError {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        match *self {\n            Self::Empty => \"receiving on an empty channel\".fmt(f),\n            Self::Disconnected => \"receiving on an empty and disconnected channel\".fmt(f),\n        }\n    }\n}\n\nimpl error::Error for TryRecvError {}\n\nimpl From<RecvError> for TryRecvError {\n    fn from(err: RecvError) -> Self {\n        match err {\n            RecvError => Self::Disconnected,\n        }\n    }\n}\n\nimpl TryRecvError {\n    /// Returns `true` if the receive operation failed because the channel is empty.\n    pub fn is_empty(&self) -> bool {\n        matches!(self, Self::Empty)\n    }\n\n    /// Returns `true` if the receive operation failed because the channel is disconnected.\n    pub fn is_disconnected(&self) -> bool {\n        matches!(self, Self::Disconnected)\n    }\n}\n\nimpl fmt::Display for RecvTimeoutError {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        match *self {\n            Self::Timeout => \"timed out waiting on receive operation\".fmt(f),\n            Self::Disconnected => \"channel is empty and disconnected\".fmt(f),\n        }\n    }\n}\n\nimpl error::Error for RecvTimeoutError {}\n\nimpl From<RecvError> for RecvTimeoutError {\n    fn from(err: RecvError) -> Self {\n        match err {\n            RecvError => Self::Disconnected,\n        }\n    }\n}\n\nimpl RecvTimeoutError {\n    /// Returns `true` if the receive operation timed out.\n    pub fn is_timeout(&self) -> bool {\n        matches!(self, Self::Timeout)\n    }\n\n    /// Returns `true` if the receive operation failed because the channel is disconnected.\n    pub fn is_disconnected(&self) -> bool {\n        matches!(self, Self::Disconnected)\n    }\n}\n"
  },
  {
    "path": "src/crossbeam/mod.rs",
    "content": "pub mod array_queue;\npub mod array_queue_mpsc;\npub mod array_queue_spsc;\npub mod err;\npub mod seg_queue;\n"
  },
  {
    "path": "src/crossbeam/seg_queue.rs",
    "content": "//! Modify by frostyplanet@gmail.com for the crossfire crate:\n//!\n//!   - Modify for select according to crossbeam-channel, but without disconnect mark bit\n//!\n//! Fork from crossbeam-queue crate commit 5a154def002304814d50f3c7658bd30eb46b2fad\n//!\n//! The MIT License (MIT)\n//!\n//! Copyright (c) 2026 frostyplanet@gmail.com\n//!\n//! Copyright (c) 2019 The Crossbeam Project Developers\n//!\n//! Permission is hereby granted, free of charge, to any\n//! person obtaining a copy of this software and associated\n//! documentation files (the \"Software\"), to deal in the\n//! Software without restriction, including without\n//! limitation the rights to use, copy, modify, merge,\n//! publish, distribute, sublicense, and/or sell copies of\n//! the Software, and to permit persons to whom the Software\n//! is furnished to do so, subject to the following\n//! conditions:\n//!\n//! The above copyright notice and this permission notice\n//! shall be included in all copies or substantial portions\n//! of the Software.\n//!\n//! THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF\n//! ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED\n//! TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n//! PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\n//! SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\n//! CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n//! OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR\n//! IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n//! DEALINGS IN THE SOFTWARE.\n//\n\nuse crate::flavor::Token;\nuse core::cell::UnsafeCell;\nuse core::fmt;\nuse core::marker::PhantomData;\nuse core::mem::MaybeUninit;\nuse core::panic::{RefUnwindSafe, UnwindSafe};\nuse core::ptr;\nuse core::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering};\nuse crossbeam_utils::{Backoff, CachePadded};\nuse std::alloc::{alloc_zeroed, handle_alloc_error, Layout};\nuse std::boxed::Box;\n\n// Bits indicating the state of a slot:\n// * If a value has been written into the slot, `WRITE` is set.\n// * If a value has been read from the slot, `READ` is set.\n// * If the block is being destroyed, `DESTROY` is set.\nconst WRITE: usize = 1;\nconst READ: usize = 2;\nconst DESTROY: usize = 4;\n\n// Each block covers one \"lap\" of indices.\nconst LAP: usize = 32;\n// The maximum number of values a block can hold.\nconst BLOCK_CAP: usize = LAP - 1;\n// How many lower bits are reserved for metadata.\nconst SHIFT: usize = 1;\n// Indicates that the block is not the last one.\nconst HAS_NEXT: usize = 1;\n\n/// A slot in a block.\nstruct Slot<T> {\n    /// The value.\n    value: UnsafeCell<MaybeUninit<T>>,\n\n    /// The state of the slot.\n    state: AtomicUsize,\n}\n\nimpl<T> Slot<T> {\n    /// Waits until a value is written into the slot.\n    fn wait_write(&self) {\n        let backoff = Backoff::new();\n        while self.state.load(Ordering::Acquire) & WRITE == 0 {\n            backoff.snooze();\n        }\n    }\n}\n\n/// A block in a linked list.\n///\n/// Each block in the list can hold up to `BLOCK_CAP` values.\nstruct Block<T> {\n    /// The next block in the linked list.\n    next: AtomicPtr<Block<T>>,\n\n    /// Slots for values.\n    slots: [Slot<T>; BLOCK_CAP],\n}\n\nimpl<T> Block<T> {\n    const LAYOUT: Layout = {\n        let layout = Layout::new::<Self>();\n        assert!(\n            layout.size() != 0,\n            \"Block should never be zero-sized, as it has an AtomicPtr field\"\n        );\n        layout\n    };\n\n    /// Creates an empty block.\n    fn new() -> Box<Self> {\n        // SAFETY: layout is not zero-sized\n        let ptr = unsafe { alloc_zeroed(Self::LAYOUT) };\n        // Handle allocation failure\n        if ptr.is_null() {\n            handle_alloc_error(Self::LAYOUT)\n        }\n        // SAFETY: This is safe because:\n        //  [1] `Block::next` (AtomicPtr) may be safely zero initialized.\n        //  [2] `Block::slots` (Array) may be safely zero initialized because of [3, 4].\n        //  [3] `Slot::value` (UnsafeCell) may be safely zero initialized because it\n        //       holds a MaybeUninit.\n        //  [4] `Slot::state` (AtomicUsize) may be safely zero initialized.\n        // TODO: unsafe { Box::new_zeroed().assume_init() }\n        unsafe { Box::from_raw(ptr.cast()) }\n    }\n\n    /// Waits until the next pointer is set.\n    fn wait_next(&self) -> *mut Self {\n        let backoff = Backoff::new();\n        loop {\n            let next = self.next.load(Ordering::Acquire);\n            if !next.is_null() {\n                return next;\n            }\n            backoff.snooze();\n        }\n    }\n\n    /// Sets the `DESTROY` bit in slots starting from `start` and destroys the block.\n    unsafe fn destroy(this: *mut Self, start: usize) {\n        // It is not necessary to set the `DESTROY` bit in the last slot because that slot has\n        // begun destruction of the block.\n        for i in start..BLOCK_CAP - 1 {\n            let slot = unsafe { (*this).slots.get_unchecked(i) };\n\n            // Mark the `DESTROY` bit if a thread is still using the slot.\n            if slot.state.load(Ordering::Acquire) & READ == 0\n                && slot.state.fetch_or(DESTROY, Ordering::AcqRel) & READ == 0\n            {\n                // If a thread is still using the slot, it will continue destruction of the block.\n                return;\n            }\n        }\n\n        // No thread is using the block, now it is safe to destroy it.\n        drop(unsafe { Box::from_raw(this) });\n    }\n}\n\n/// A position in a queue.\nstruct Position<T> {\n    /// The index in the queue.\n    index: AtomicUsize,\n\n    /// The block in the linked list.\n    block: AtomicPtr<Block<T>>,\n}\n\n/// An unbounded multi-producer multi-consumer queue.\n///\n/// This queue is implemented as a linked list of segments, where each segment is a small buffer\n/// that can hold a handful of elements. There is no limit to how many elements can be in the queue\n/// at a time. However, since segments need to be dynamically allocated as elements get pushed,\n/// this queue is somewhat slower than [`ArrayQueue`].\n///\n/// [`ArrayQueue`]: super::ArrayQueue\npub struct SegQueue<T> {\n    /// The head of the queue.\n    head: CachePadded<Position<T>>,\n\n    /// The tail of the queue.\n    tail: CachePadded<Position<T>>,\n\n    /// Indicates that dropping a `SegQueue<T>` may drop values of type `T`.\n    _marker: PhantomData<T>,\n}\n\nunsafe impl<T> Send for SegQueue<T> {}\nunsafe impl<T> Sync for SegQueue<T> {}\n\nimpl<T> UnwindSafe for SegQueue<T> {}\nimpl<T> RefUnwindSafe for SegQueue<T> {}\n\nimpl<T> SegQueue<T> {\n    /// Creates a new unbounded queue.\n    pub const fn new() -> Self {\n        Self {\n            head: CachePadded::new(Position {\n                block: AtomicPtr::new(ptr::null_mut()),\n                index: AtomicUsize::new(0),\n            }),\n            tail: CachePadded::new(Position {\n                block: AtomicPtr::new(ptr::null_mut()),\n                index: AtomicUsize::new(0),\n            }),\n            _marker: PhantomData,\n        }\n    }\n\n    /// Pushes back an element to the tail.\n    #[inline(always)]\n    pub fn push(&self, value: T) {\n        let backoff = Backoff::new();\n        let mut tail = self.tail.index.load(Ordering::Acquire);\n        let mut block = self.tail.block.load(Ordering::Acquire);\n        let mut next_block = None;\n\n        loop {\n            // Calculate the offset of the index into the block.\n            let offset = (tail >> SHIFT) % LAP;\n\n            // If we reached the end of the block, wait until the next one is installed.\n            if offset == BLOCK_CAP {\n                backoff.snooze();\n                tail = self.tail.index.load(Ordering::Acquire);\n                block = self.tail.block.load(Ordering::Acquire);\n                continue;\n            }\n\n            // If we're going to have to install the next block, allocate it in advance in order to\n            // make the wait for other threads as short as possible.\n            if offset + 1 == BLOCK_CAP && next_block.is_none() {\n                next_block = Some(Block::<T>::new());\n            }\n\n            // If this is the first push operation, we need to allocate the first block.\n            if block.is_null() {\n                let new = Box::into_raw(Block::<T>::new());\n\n                if self\n                    .tail\n                    .block\n                    .compare_exchange(block, new, Ordering::Release, Ordering::Relaxed)\n                    .is_ok()\n                {\n                    self.head.block.store(new, Ordering::Release);\n                    block = new;\n                } else {\n                    next_block = unsafe { Some(Box::from_raw(new)) };\n                    tail = self.tail.index.load(Ordering::Acquire);\n                    block = self.tail.block.load(Ordering::Acquire);\n                    continue;\n                }\n            }\n\n            let new_tail = tail + (1 << SHIFT);\n\n            // Try advancing the tail forward.\n            match self.tail.index.compare_exchange_weak(\n                tail,\n                new_tail,\n                Ordering::SeqCst,\n                Ordering::Acquire,\n            ) {\n                Ok(_) => unsafe {\n                    // If we've reached the end of the block, install the next one.\n                    if offset + 1 == BLOCK_CAP {\n                        let next_block = Box::into_raw(next_block.unwrap());\n                        let next_index = new_tail.wrapping_add(1 << SHIFT);\n\n                        self.tail.block.store(next_block, Ordering::Release);\n                        self.tail.index.store(next_index, Ordering::Release);\n                        (*block).next.store(next_block, Ordering::Release);\n                    }\n\n                    // Write the value into the slot.\n                    let slot = (*block).slots.get_unchecked(offset);\n                    slot.value.get().write(MaybeUninit::new(value));\n                    slot.state.fetch_or(WRITE, Ordering::Release);\n\n                    return;\n                },\n                Err(t) => {\n                    tail = t;\n                    block = self.tail.block.load(Ordering::Acquire);\n                    backoff.spin();\n                }\n            }\n        }\n    }\n\n    #[inline(always)]\n    pub fn start_read(&self) -> Option<Token> {\n        if let Some((block, offset)) = self._pop::<false>() {\n            Some(Token::new(block as *const u8, offset))\n        } else {\n            None\n        }\n    }\n\n    #[inline(always)]\n    pub fn pop<const FINAL: bool>(&self) -> Option<T> {\n        if let Some((block, offset)) = self._pop::<FINAL>() {\n            Some(self._read(block, offset))\n        } else {\n            None\n        }\n    }\n\n    #[inline(always)]\n    fn _pop<const FINAL: bool>(&self) -> Option<(*mut Block<T>, usize)> {\n        let backoff = Backoff::new();\n        let mut head;\n        if FINAL {\n            head = self.head.index.load(Ordering::SeqCst);\n            let tail = self.tail.index.load(Ordering::SeqCst);\n            if head >> SHIFT == tail >> SHIFT {\n                return None;\n            }\n        } else {\n            head = self.head.index.load(Ordering::Acquire);\n        }\n        let mut block = self.head.block.load(Ordering::Acquire);\n\n        loop {\n            // Calculate the offset of the index into the block.\n            let offset = (head >> SHIFT) % LAP;\n\n            // If we reached the end of the block, wait until the next one is installed.\n            if offset == BLOCK_CAP {\n                backoff.snooze();\n                head = self.head.index.load(Ordering::Acquire);\n                block = self.head.block.load(Ordering::Acquire);\n                continue;\n            }\n\n            let mut new_head = head + (1 << SHIFT);\n\n            if new_head & HAS_NEXT == 0 {\n                atomic::fence(Ordering::SeqCst);\n                let tail = self.tail.index.load(Ordering::Relaxed);\n\n                // If the tail equals the head, that means the queue is empty.\n                if head >> SHIFT == tail >> SHIFT {\n                    return None;\n                }\n\n                // If head and tail are not in the same block, set `HAS_NEXT` in head.\n                if (head >> SHIFT) / LAP != (tail >> SHIFT) / LAP {\n                    new_head |= HAS_NEXT;\n                }\n            }\n\n            // The block can be null here only if the first push operation is in progress. In that\n            // case, just wait until it gets initialized.\n            if block.is_null() {\n                backoff.snooze();\n                head = self.head.index.load(Ordering::Acquire);\n                block = self.head.block.load(Ordering::Acquire);\n                continue;\n            }\n\n            // Try moving the head index forward.\n            match self.head.index.compare_exchange_weak(\n                head,\n                new_head,\n                Ordering::SeqCst,\n                Ordering::Acquire,\n            ) {\n                Ok(_) => unsafe {\n                    // If we've reached the end of the block, move to the next one.\n                    if offset + 1 == BLOCK_CAP {\n                        let next = (*block).wait_next();\n                        let mut next_index = (new_head & !HAS_NEXT).wrapping_add(1 << SHIFT);\n                        if !(*next).next.load(Ordering::Relaxed).is_null() {\n                            next_index |= HAS_NEXT;\n                        }\n\n                        self.head.block.store(next, Ordering::Release);\n                        self.head.index.store(next_index, Ordering::Release);\n                    }\n                    return Some((block, offset));\n                },\n                Err(h) => {\n                    head = h;\n                    block = self.head.block.load(Ordering::Acquire);\n                    backoff.spin();\n                }\n            }\n        }\n    }\n\n    #[inline(always)]\n    pub fn read(&self, token: Token) -> T {\n        let block = token.pos as *mut Block<T>;\n        let offset = token.stamp;\n        self._read(block, offset)\n    }\n\n    #[inline(always)]\n    fn _read(&self, block: *mut Block<T>, offset: usize) -> T {\n        unsafe {\n            let slot = (*block).slots.get_unchecked(offset);\n            // Read the value.\n            slot.wait_write();\n            let value = slot.value.get().read().assume_init();\n            // Destroy the block if we've reached the end, or if another thread wanted to\n            // destroy but couldn't because we were busy reading from the slot.\n            if offset + 1 == BLOCK_CAP {\n                Block::destroy(block, 0);\n            } else if slot.state.fetch_or(READ, Ordering::AcqRel) & DESTROY != 0 {\n                Block::destroy(block, offset + 1);\n            }\n            value\n        }\n    }\n\n    /// Returns `true` if the queue is empty.\n    #[inline(always)]\n    pub fn is_empty(&self) -> bool {\n        let head = self.head.index.load(Ordering::SeqCst);\n        let tail = self.tail.index.load(Ordering::SeqCst);\n        head >> SHIFT == tail >> SHIFT\n    }\n\n    /// Returns the number of elements in the queue.\n    pub fn len(&self) -> usize {\n        loop {\n            // Load the tail index, then load the head index.\n            let mut tail = self.tail.index.load(Ordering::SeqCst);\n            let mut head = self.head.index.load(Ordering::SeqCst);\n\n            // If the tail index didn't change, we've got consistent indices to work with.\n            if self.tail.index.load(Ordering::SeqCst) == tail {\n                // Erase the lower bits.\n                tail &= !((1 << SHIFT) - 1);\n                head &= !((1 << SHIFT) - 1);\n\n                // Fix up indices if they fall onto block ends.\n                if (tail >> SHIFT) & (LAP - 1) == LAP - 1 {\n                    tail = tail.wrapping_add(1 << SHIFT);\n                }\n                if (head >> SHIFT) & (LAP - 1) == LAP - 1 {\n                    head = head.wrapping_add(1 << SHIFT);\n                }\n\n                // Rotate indices so that head falls into the first block.\n                let lap = (head >> SHIFT) / LAP;\n                tail = tail.wrapping_sub((lap * LAP) << SHIFT);\n                head = head.wrapping_sub((lap * LAP) << SHIFT);\n\n                // Remove the lower bits.\n                tail >>= SHIFT;\n                head >>= SHIFT;\n\n                // Return the difference minus the number of blocks between tail and head.\n                return tail - head - tail / LAP;\n            }\n        }\n    }\n}\n\nimpl<T> Drop for SegQueue<T> {\n    #[inline]\n    fn drop(&mut self) {\n        let mut head = *self.head.index.get_mut();\n        let mut tail = *self.tail.index.get_mut();\n        let mut block = *self.head.block.get_mut();\n\n        // Erase the lower bits.\n        head &= !((1 << SHIFT) - 1);\n        tail &= !((1 << SHIFT) - 1);\n\n        unsafe {\n            // Drop all values between `head` and `tail` and deallocate the heap-allocated blocks.\n            while head != tail {\n                let offset = (head >> SHIFT) % LAP;\n\n                if offset < BLOCK_CAP {\n                    // Drop the value in the slot.\n                    let slot = (*block).slots.get_unchecked(offset);\n                    (*slot.value.get()).assume_init_drop();\n                } else {\n                    // Deallocate the block and move to the next one.\n                    let next = *(*block).next.get_mut();\n                    drop(Box::from_raw(block));\n                    block = next;\n                }\n\n                head = head.wrapping_add(1 << SHIFT);\n            }\n\n            // Deallocate the last remaining block.\n            if !block.is_null() {\n                drop(Box::from_raw(block));\n            }\n        }\n    }\n}\n\nimpl<T> fmt::Debug for SegQueue<T> {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        f.pad(\"SegQueue { .. }\")\n    }\n}\n\nimpl<T> Default for SegQueue<T> {\n    fn default() -> Self {\n        Self::new()\n    }\n}\n\nimpl<T> IntoIterator for SegQueue<T> {\n    type Item = T;\n\n    type IntoIter = IntoIter<T>;\n\n    fn into_iter(self) -> Self::IntoIter {\n        IntoIter { value: self }\n    }\n}\n\n#[derive(Debug)]\npub struct IntoIter<T> {\n    value: SegQueue<T>,\n}\n\nimpl<T> Iterator for IntoIter<T> {\n    type Item = T;\n\n    fn next(&mut self) -> Option<Self::Item> {\n        let value = &mut self.value;\n        let head = *value.head.index.get_mut();\n        let tail = *value.tail.index.get_mut();\n        if head >> SHIFT == tail >> SHIFT {\n            None\n        } else {\n            let block = *value.head.block.get_mut();\n            let offset = (head >> SHIFT) % LAP;\n\n            // SAFETY: We have mutable access to this, so we can read without\n            // worrying about concurrency. Furthermore, we know this is\n            // initialized because it is the value pointed at by `value.head`\n            // and this is a non-empty queue.\n            let item = unsafe {\n                let slot = (*block).slots.get_unchecked(offset);\n                slot.value.get().read().assume_init()\n            };\n            if offset + 1 == BLOCK_CAP {\n                // Deallocate the block and move to the next one.\n                // SAFETY: The block is initialized because we've been reading\n                // from it this entire time. We can drop it b/c everything has\n                // been read out of it, so nothing is pointing to it anymore.\n                unsafe {\n                    let next = *(*block).next.get_mut();\n                    drop(Box::from_raw(block));\n                    *value.head.block.get_mut() = next;\n                }\n                // The last value in a block is empty, so skip it\n                *value.head.index.get_mut() = head.wrapping_add(2 << SHIFT);\n                // Double-check that we're pointing to the first item in a block.\n                debug_assert_eq!((*value.head.index.get_mut() >> SHIFT) % LAP, 0);\n            } else {\n                *value.head.index.get_mut() = head.wrapping_add(1 << SHIFT);\n            }\n            Some(item)\n        }\n    }\n}\n"
  },
  {
    "path": "src/flavor/array.rs",
    "content": "use super::{FlavorBounded, FlavorImpl, FlavorSelect, Queue, Token};\nuse crate::crossbeam::array_queue::ArrayQueue;\nuse std::mem::MaybeUninit;\n\n/// Which Equals to crossbeam_queue::ArrayQueue\npub type Array<T> = _Array<T, true, true>;\n\npub struct _Array<T, const MP: bool, const MC: bool>(ArrayQueue<T, MP, MC>);\n\nimpl<T, const MP: bool, const MC: bool> _Array<T, MP, MC> {\n    pub fn new(mut bound: usize) -> Self {\n        assert!(bound <= u32::MAX as usize);\n        if bound == 0 {\n            bound = 1;\n        }\n        Self(ArrayQueue::<T, MP, MC>::new(bound))\n    }\n}\n\nimpl<T, const MP: bool, const MC: bool> Queue for _Array<T, MP, MC> {\n    type Item = T;\n\n    #[inline(always)]\n    fn pop(&self) -> Option<T> {\n        self.0.pop(true)\n    }\n\n    #[inline(always)]\n    fn push(&self, item: T) -> Result<(), T> {\n        let _item = MaybeUninit::new(item);\n        if unsafe { self.0.push_with_ptr(_item.as_ptr()) } {\n            Ok(())\n        } else {\n            Err(unsafe { _item.assume_init_read() })\n        }\n    }\n\n    #[inline(always)]\n    fn is_full(&self) -> bool {\n        self.0.is_full()\n    }\n\n    #[inline(always)]\n    fn is_empty(&self) -> bool {\n        self.0.is_empty()\n    }\n\n    #[inline(always)]\n    fn len(&self) -> usize {\n        self.0.len()\n    }\n\n    #[inline(always)]\n    fn capacity(&self) -> Option<usize> {\n        Some(self.0.capacity())\n    }\n}\n\nimpl<T, const MP: bool, const MC: bool> FlavorImpl for _Array<T, MP, MC> {\n    #[inline(always)]\n    fn try_send(&self, item: &MaybeUninit<T>) -> bool {\n        unsafe { self.0.push_with_ptr(item.as_ptr()) }\n    }\n\n    #[inline(always)]\n    fn try_send_oneshot(&self, item: *const T) -> Option<bool> {\n        unsafe { self.0.try_push_oneshot(item) }\n    }\n\n    #[inline]\n    fn try_recv(&self) -> Option<T> {\n        self.0.pop(false)\n    }\n\n    #[inline]\n    fn try_recv_final(&self) -> Option<T> {\n        self.0.pop(true)\n    }\n\n    #[inline]\n    fn backoff_limit(&self) -> u16 {\n        if self.0.capacity() > 10 {\n            crate::backoff::DEFAULT_LIMIT\n        } else {\n            #[cfg(target_arch = \"x86_64\")]\n            {\n                crate::backoff::DEFAULT_LIMIT\n            }\n            #[cfg(not(target_arch = \"x86_64\"))]\n            {\n                crate::backoff::MAX_LIMIT\n            }\n        }\n    }\n\n    #[inline]\n    fn may_direct_copy(&self) -> bool {\n        if MP {\n            true\n        } else {\n            // sender has no CAS, not safe to direct copy\n            false\n        }\n    }\n}\n\nimpl<T, const MP: bool, const MC: bool> FlavorSelect for _Array<T, MP, MC> {\n    #[inline]\n    fn try_select(&self, final_check: bool) -> Option<Token> {\n        self.0.start_read(final_check)\n    }\n\n    #[inline(always)]\n    fn read_with_token(&self, token: Token) -> T {\n        self.0.read(token)\n    }\n}\n\nimpl<T, const MP: bool, const MC: bool> FlavorBounded for _Array<T, MP, MC> {\n    #[inline(always)]\n    fn new_with_bound(size: usize) -> Self {\n        Self::new(size)\n    }\n}\n"
  },
  {
    "path": "src/flavor/array_mpsc.rs",
    "content": "use super::{FlavorBounded, FlavorImpl, FlavorSelect, Queue, Token};\nuse crate::crossbeam::array_queue_mpsc::ArrayQueueMpsc;\nuse std::mem::MaybeUninit;\n\n/// Simplified ArrayQueue tweaks for MPSC\n///\n/// Push and pop fast path reduced one atomic ops compared to its MPMC version (only 3 ops instead\n/// of 4),\n/// and it's faster to detect the empty / full condition (2 ops instead of 3).\npub struct ArrayMpsc<T>(ArrayQueueMpsc<T>);\n\nimpl<T> ArrayMpsc<T> {\n    pub fn new(mut bound: usize) -> Self {\n        assert!(bound <= u32::MAX as usize);\n        if bound == 0 {\n            bound = 1;\n        }\n        Self(ArrayQueueMpsc::<T>::new(bound))\n    }\n}\n\nimpl<T> Queue for ArrayMpsc<T> {\n    type Item = T;\n\n    #[inline(always)]\n    fn pop(&self) -> Option<T> {\n        self.0.pop(true)\n    }\n\n    #[inline(always)]\n    fn push(&self, item: T) -> Result<(), T> {\n        let _item = MaybeUninit::new(item);\n        if unsafe { self.0.push_with_ptr(_item.as_ptr()) } {\n            Ok(())\n        } else {\n            Err(unsafe { _item.assume_init_read() })\n        }\n    }\n\n    #[inline(always)]\n    fn is_full(&self) -> bool {\n        self.0.is_full()\n    }\n\n    #[inline(always)]\n    fn is_empty(&self) -> bool {\n        self.0.is_empty()\n    }\n\n    #[inline(always)]\n    fn len(&self) -> usize {\n        self.0.len()\n    }\n\n    #[inline(always)]\n    fn capacity(&self) -> Option<usize> {\n        Some(self.0.capacity())\n    }\n}\n\nimpl<T> FlavorImpl for ArrayMpsc<T> {\n    #[inline(always)]\n    fn try_send(&self, item: &MaybeUninit<T>) -> bool {\n        unsafe { self.0.push_with_ptr(item.as_ptr()) }\n    }\n\n    #[inline(always)]\n    fn try_send_oneshot(&self, item: *const T) -> Option<bool> {\n        unsafe { self.0.try_push_oneshot(item) }\n    }\n\n    #[inline(always)]\n    fn try_recv_cached(&self) -> Option<T> {\n        self.0.pop_cached()\n    }\n\n    #[inline]\n    fn try_recv(&self) -> Option<T> {\n        self.0.pop(false)\n    }\n\n    #[inline]\n    fn try_recv_final(&self) -> Option<T> {\n        self.0.pop(true)\n    }\n\n    #[inline]\n    fn backoff_limit(&self) -> u16 {\n        if self.0.capacity() > 10 {\n            crate::backoff::DEFAULT_LIMIT\n        } else {\n            #[cfg(target_arch = \"x86_64\")]\n            {\n                crate::backoff::DEFAULT_LIMIT\n            }\n            #[cfg(not(target_arch = \"x86_64\"))]\n            {\n                crate::backoff::MAX_LIMIT\n            }\n        }\n    }\n\n    #[inline]\n    fn may_direct_copy(&self) -> bool {\n        true\n    }\n}\n\nimpl<T> FlavorSelect for ArrayMpsc<T> {\n    #[inline]\n    fn try_select(&self, final_check: bool) -> Option<Token> {\n        self.0.start_read(final_check)\n    }\n\n    #[inline(always)]\n    fn read_with_token(&self, token: Token) -> T {\n        self.0.read(token)\n    }\n}\n\nimpl<T> FlavorBounded for ArrayMpsc<T> {\n    #[inline(always)]\n    fn new_with_bound(size: usize) -> Self {\n        Self::new(size)\n    }\n}\n"
  },
  {
    "path": "src/flavor/array_spsc.rs",
    "content": "use super::{FlavorBounded, FlavorImpl, FlavorSelect, Queue, Token};\nuse crate::crossbeam::array_queue_spsc::ArrayQueueSpsc;\nuse std::mem::MaybeUninit;\n\n/// Ultra light-weight bounded SPSC\n///\n/// which derives from ArrayQueue, but without stamp.\n/// With only two atomics for cache affinity, the fastpath only require two ops to one atomic.\n///\npub struct ArraySpsc<T>(ArrayQueueSpsc<T>);\n\nimpl<T> ArraySpsc<T> {\n    pub fn new(mut bound: usize) -> Self {\n        assert!(bound <= u32::MAX as usize);\n        if bound == 0 {\n            bound = 1;\n        }\n        Self(ArrayQueueSpsc::<T>::new(bound))\n    }\n}\n\nimpl<T> Queue for ArraySpsc<T> {\n    type Item = T;\n\n    #[inline(always)]\n    fn pop(&self) -> Option<T> {\n        self.0.pop(true)\n    }\n\n    #[inline(always)]\n    fn push(&self, item: T) -> Result<(), T> {\n        let _item = MaybeUninit::new(item);\n        if unsafe { self.0.push_with_ptr_final(_item.as_ptr()) } {\n            Ok(())\n        } else {\n            Err(unsafe { _item.assume_init_read() })\n        }\n    }\n\n    #[inline(always)]\n    fn is_full(&self) -> bool {\n        self.0.is_full()\n    }\n\n    #[inline(always)]\n    fn is_empty(&self) -> bool {\n        self.0.is_empty()\n    }\n\n    #[inline(always)]\n    fn len(&self) -> usize {\n        self.0.len()\n    }\n\n    #[inline(always)]\n    fn capacity(&self) -> Option<usize> {\n        Some(self.0.capacity())\n    }\n}\n\nimpl<T> FlavorImpl for ArraySpsc<T> {\n    #[inline(always)]\n    fn try_send(&self, item: &MaybeUninit<T>) -> bool {\n        unsafe { self.0.push_with_ptr(item.as_ptr()) }\n    }\n\n    #[inline(always)]\n    fn try_send_oneshot(&self, item: *const T) -> Option<bool> {\n        Some(unsafe { self.0.push_with_ptr_final(item) })\n    }\n\n    #[inline]\n    fn try_recv_cached(&self) -> Option<T> {\n        self.0.pop_cached()\n    }\n\n    #[inline]\n    fn try_recv(&self) -> Option<T> {\n        self.0.pop(false)\n    }\n\n    #[inline]\n    fn try_recv_final(&self) -> Option<T> {\n        self.0.pop(true)\n    }\n\n    #[inline]\n    fn backoff_limit(&self) -> u16 {\n        crate::backoff::MAX_LIMIT\n    }\n\n    #[inline]\n    fn may_direct_copy(&self) -> bool {\n        // NOTE:\n        // The spsc is not safe for direct copy,\n        // because it has no cas, consumer cannot touch the producers pointer\n        false\n    }\n}\n\nimpl<T> FlavorSelect for ArraySpsc<T> {\n    #[inline]\n    fn try_select(&self, final_check: bool) -> Option<Token> {\n        self.0.start_read(final_check)\n    }\n\n    #[inline(always)]\n    fn read_with_token(&self, token: Token) -> T {\n        self.0.read(token)\n    }\n}\n\nimpl<T> FlavorBounded for ArraySpsc<T> {\n    #[inline(always)]\n    fn new_with_bound(size: usize) -> Self {\n        Self::new(size)\n    }\n}\n"
  },
  {
    "path": "src/flavor/list.rs",
    "content": "use super::{FlavorImpl, FlavorNew, FlavorSelect, Queue, Token};\nuse crate::crossbeam::seg_queue::SegQueue;\nuse std::mem::MaybeUninit;\n\n/// Which equals to crossbeam_queue::SeqQueue\npub struct List<T>(SegQueue<T>);\n\nimpl<T> List<T> {\n    #[inline(always)]\n    pub fn new() -> Self {\n        Self(SegQueue::<T>::new())\n    }\n}\n\nimpl<T> Queue for List<T> {\n    type Item = T;\n\n    #[inline(always)]\n    fn pop(&self) -> Option<T> {\n        self.0.pop::<false>()\n    }\n\n    #[inline(always)]\n    fn push(&self, item: T) -> Result<(), T> {\n        self.0.push(item);\n        Ok(())\n    }\n\n    #[inline(always)]\n    fn len(&self) -> usize {\n        self.0.len()\n    }\n\n    #[inline(always)]\n    fn capacity(&self) -> Option<usize> {\n        None\n    }\n\n    #[inline(always)]\n    fn is_full(&self) -> bool {\n        false\n    }\n\n    #[inline(always)]\n    fn is_empty(&self) -> bool {\n        self.0.is_empty()\n    }\n}\n\nimpl<T> FlavorImpl for List<T> {\n    #[inline(always)]\n    fn try_send(&self, item: &MaybeUninit<T>) -> bool {\n        self.0.push(unsafe { item.assume_init_read() });\n        true\n    }\n\n    #[inline]\n    fn try_recv(&self) -> Option<T> {\n        self.0.pop::<false>()\n    }\n\n    #[inline]\n    fn try_recv_final(&self) -> Option<T> {\n        self.0.pop::<true>()\n    }\n\n    #[inline]\n    fn backoff_limit(&self) -> u16 {\n        crate::backoff::DEFAULT_LIMIT\n    }\n\n    #[inline]\n    fn may_direct_copy(&self) -> bool {\n        false\n    }\n}\n\nimpl<T> FlavorNew for List<T> {\n    #[inline]\n    fn new() -> Self {\n        List::new()\n    }\n}\n\nimpl<T> FlavorSelect for List<T> {\n    #[inline]\n    fn try_select(&self, final_check: bool) -> Option<Token> {\n        if final_check && self.0.is_empty() {\n            return None;\n        }\n        self.0.start_read()\n    }\n\n    #[inline(always)]\n    fn read_with_token(&self, token: Token) -> T {\n        self.0.read(token)\n    }\n}\n"
  },
  {
    "path": "src/flavor/mod.rs",
    "content": "use crate::waker_registry::*;\nuse std::marker::PhantomData;\nuse std::mem::MaybeUninit;\nuse std::ops::Deref;\n\npub mod array;\npub use array::Array;\nmod array_mpsc;\npub use array_mpsc::ArrayMpsc;\nmod array_spsc;\npub use array_spsc::ArraySpsc;\nmod list;\npub use list::*;\nmod one;\npub use one::*;\nmod one_mpsc;\npub use one_mpsc::OneMpsc;\nmod one_spmc;\npub use one_spmc::OneSpsc;\n\n/// Essential struct for select and read interface\npub(crate) struct Token {\n    pub(crate) pos: *const u8,\n    pub(crate) stamp: usize,\n}\n\nimpl Token {\n    #[inline]\n    pub(crate) fn new(pos: *const u8, stamp: usize) -> Self {\n        Self { pos, stamp }\n    }\n}\n\nimpl Default for Token {\n    #[inline]\n    fn default() -> Self {\n        Self { pos: std::ptr::null_mut(), stamp: 0 }\n    }\n}\n\n// The queue trait should be public because AsyncStream, AsyncRx ... all use it's associate type `Item`\n/// Trait for lockless queue, it's safe to use if you don't want the channel mechanisms\npub trait Queue {\n    type Item;\n\n    fn pop(&self) -> Option<Self::Item>;\n\n    fn push(&self, item: Self::Item) -> Result<(), Self::Item>;\n\n    fn len(&self) -> usize;\n\n    fn capacity(&self) -> Option<usize>;\n\n    fn is_full(&self) -> bool;\n\n    fn is_empty(&self) -> bool;\n}\n\n/// Internal flavor interface\npub(crate) trait FlavorImpl: Queue {\n    fn try_send(&self, item: &MaybeUninit<Self::Item>) -> bool;\n\n    #[inline]\n    fn try_send_oneshot(&self, _item: *const Self::Item) -> Option<bool> {\n        unimplemented!()\n    }\n\n    /// For multiplex, only using cached value\n    ///\n    /// (without spin and loading sender value)\n    #[inline]\n    fn try_recv_cached(&self) -> Option<Self::Item> {\n        self.try_recv()\n    }\n\n    fn try_recv(&self) -> Option<Self::Item>;\n\n    fn try_recv_final(&self) -> Option<Self::Item>;\n\n    fn backoff_limit(&self) -> u16;\n\n    #[inline(always)]\n    fn may_direct_copy(&self) -> bool {\n        false\n    }\n}\n\npub(crate) trait FlavorSelect: Queue {\n    /// Note: this is internal function, it does not check if the token has other result\n    fn try_select(&self, final_check: bool) -> Option<Token>;\n\n    /// Note: this is internal function, it does not check if the token is valid\n    fn read_with_token(&self, token: Token) -> Self::Item;\n}\n\n// because enum_dispatch does not support associate type\nmacro_rules! queue_dispatch {\n    ($wrap_method: ident)=>{\n        #[inline(always)]\n        fn pop(&self) -> Option<Self::Item> {\n            $wrap_method!(self, pop)\n        }\n\n        #[inline(always)]\n        fn push(&self, item: Self::Item) -> Result<(), Self::Item> {\n            $wrap_method!(self, push item)\n        }\n\n        #[inline(always)]\n        fn len(&self) -> usize {\n            $wrap_method!(self, len)\n        }\n\n        #[inline(always)]\n        fn capacity(&self) -> Option<usize> {\n            $wrap_method!(self, capacity)\n        }\n\n        #[inline(always)]\n        fn is_full(&self) -> bool {\n            $wrap_method!(self, is_full)\n        }\n\n        #[inline(always)]\n        fn is_empty(&self) -> bool {\n            $wrap_method!(self, is_empty)\n        }\n    };\n}\npub(super) use queue_dispatch;\n\n// because enum_dispatch does not support associate type\nmacro_rules! flavor_dispatch {\n    ($wrap_method: ident)=>{\n        #[inline(always)]\n        fn try_send(&self, item: &MaybeUninit<Self::Item>) -> bool {\n            $wrap_method!(self, try_send item)\n        }\n\n        #[inline]\n        fn try_send_oneshot(&self, _item: *const Self::Item) -> Option<bool> {\n            $wrap_method!(self, try_send_oneshot _item)\n        }\n\n        #[inline(always)]\n        fn try_recv_cached(&self) -> Option<Self::Item> {\n            $wrap_method!(self, try_recv_cached)\n        }\n\n        #[inline(always)]\n        fn try_recv(&self) -> Option<Self::Item> {\n            $wrap_method!(self, try_recv)\n        }\n\n        #[inline(always)]\n        fn try_recv_final(&self) -> Option<Self::Item> {\n            $wrap_method!(self, try_recv_final)\n        }\n\n        #[inline(always)]\n        fn backoff_limit(&self) -> u16 {\n            $wrap_method!(self, backoff_limit)\n        }\n\n        #[inline(always)]\n        fn may_direct_copy(&self) -> bool {\n            $wrap_method!(self, may_direct_copy)\n        }\n    };\n}\npub(super) use flavor_dispatch;\n\n// because enum_dispatch does not support associate type\nmacro_rules! flavor_select_dispatch {\n    ($wrap_method: ident) => {\n        #[inline(always)]\n        fn try_select(&self, final_check: bool) -> Option<Token> {\n            $wrap_method!(self, try_select final_check)\n        }\n\n        #[inline(always)]\n        fn read_with_token(&self, token: Token) -> Self::Item {\n            $wrap_method!(self, read_with_token token)\n        }\n    };\n}\n#[allow(unused_imports)]\npub(super) use flavor_select_dispatch;\n\npub trait Flavor: Send + 'static + FlavorImpl {\n    type Send: RegistrySend<Self::Item>;\n    type Recv: RegistryRecv;\n}\n\npub trait FlavorMP {}\npub trait FlavorMC {}\n\npub trait FlavorNew {\n    fn new() -> Self;\n}\n\npub trait FlavorBounded {\n    fn new_with_bound(size: usize) -> Self;\n}\n\n/// A type wrapper for channel flavor\npub struct FlavorWrap<F, S, R> {\n    inner: F,\n    _phan: PhantomData<fn(&S, &R)>,\n}\n\n/// break evaluation overflow of F\nunsafe impl<F, S, R> Send for FlavorWrap<F, S, R> {}\n\nimpl<F, S, R> FlavorWrap<F, S, R>\nwhere\n    F: FlavorImpl,\n    S: RegistrySend<F::Item>,\n    R: RegistryRecv,\n{\n    #[inline(always)]\n    pub fn new() -> Self\n    where\n        F: FlavorNew,\n    {\n        Self::from_inner(<F as FlavorNew>::new())\n    }\n\n    #[inline(always)]\n    pub(crate) fn from_inner(f: F) -> Self {\n        Self { inner: f, _phan: Default::default() }\n    }\n}\n\nimpl<F, S, R> FlavorNew for FlavorWrap<F, S, R>\nwhere\n    F: FlavorImpl + FlavorNew,\n    S: RegistrySend<F::Item>,\n    R: RegistryRecv,\n{\n    #[inline(always)]\n    fn new() -> Self {\n        Self::from_inner(<F as FlavorNew>::new())\n    }\n}\n\nimpl<F, S, R> FlavorBounded for FlavorWrap<F, S, R>\nwhere\n    F: FlavorImpl + FlavorBounded,\n    S: RegistrySend<F::Item>,\n    R: RegistryRecv,\n{\n    #[inline(always)]\n    fn new_with_bound(size: usize) -> Self {\n        Self::from_inner(<F as FlavorBounded>::new_with_bound(size))\n    }\n}\n\nimpl<F, S, R> Flavor for FlavorWrap<F, S, R>\nwhere\n    F: FlavorImpl + 'static,\n    S: RegistrySend<F::Item>,\n    R: RegistryRecv,\n{\n    type Send = S;\n    type Recv = R;\n}\n\nimpl<F, S, R> Deref for FlavorWrap<F, S, R>\nwhere\n    F: FlavorImpl,\n    S: RegistrySend<F::Item>,\n    R: RegistryRecv,\n{\n    type Target = F;\n\n    #[inline(always)]\n    fn deref(&self) -> &F {\n        &self.inner\n    }\n}\n\nimpl<F, R> FlavorMP for FlavorWrap<F, RegistryDummy, R>\nwhere\n    F: FlavorImpl,\n    R: RegistryRecv,\n{\n}\nimpl<T, F, R> FlavorMP for FlavorWrap<F, RegistryMultiSend<T>, R>\nwhere\n    F: FlavorImpl,\n    R: RegistryRecv,\n{\n}\n\nimpl<F: FlavorImpl, S> FlavorMC for FlavorWrap<F, S, RegistryMultiRecv> {}\n\nmacro_rules! wrap_new_type {\n    ($self: expr, $method:ident $($arg:expr)*)=>{\n        $self.inner.$method($($arg)*)\n    };\n}\n\nimpl<F, S, R> Queue for FlavorWrap<F, S, R>\nwhere\n    F: FlavorImpl,\n    S: RegistrySend<F::Item>,\n    R: RegistryRecv,\n{\n    type Item = F::Item;\n    queue_dispatch!(wrap_new_type);\n}\n\nimpl<F, S, R> FlavorImpl for FlavorWrap<F, S, R>\nwhere\n    F: FlavorImpl,\n    S: RegistrySend<F::Item>,\n    R: RegistryRecv,\n{\n    flavor_dispatch!(wrap_new_type);\n}\n\nimpl<F, S, R> FlavorSelect for FlavorWrap<F, S, R>\nwhere\n    F: FlavorImpl + FlavorSelect,\n    S: RegistrySend<F::Item>,\n    R: RegistryRecv,\n{\n    flavor_select_dispatch!(wrap_new_type);\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use std::mem::size_of;\n\n    #[test]\n    fn print_flavor_size() {\n        //        println!(\"Flavor size {}\", size_of::<Flavor<usize>>());\n        println!(\"one size {}\", size_of::<One<usize>>());\n        println!(\"array size {}\", size_of::<Array<usize>>());\n        println!(\"list size {}\", size_of::<List<usize>>());\n    }\n}\n"
  },
  {
    "path": "src/flavor/one.rs",
    "content": "use super::{FlavorImpl, FlavorNew, FlavorSelect, Queue, Token};\nuse crate::backoff::*;\nuse core::cell::UnsafeCell;\nuse core::mem::{needs_drop, MaybeUninit};\nuse core::ptr;\nuse core::sync::atomic::{\n    compiler_fence, AtomicU16, AtomicU32,\n    Ordering::{self, Acquire, Relaxed, Release, SeqCst},\n};\nuse crossbeam_utils::CachePadded;\n\n/// A simplify ArrayQueue specialized for size=1\n///\n/// It contains two slots, allow sender and receiver works truly concurrent,\n/// while the buffer capacity is still 1.\n/// For one-sized queue, contention are higher than larger ArrayQueue, so it's better to use one atomic,\n/// which packs head & tail, to reduce the operation cost, and the stamps in the slot are guards to\n/// access the slot.\npub struct One<T> {\n    pos: CachePadded<AtomicU32>,\n\n    /// The value in this slot.\n    slots: [Slot<T>; 2],\n}\n\nunsafe impl<T> Sync for One<T> {}\nunsafe impl<T> Send for One<T> {}\n\nimpl<T> Queue for One<T> {\n    type Item = T;\n\n    #[inline(always)]\n    fn pop(&self) -> Option<T> {\n        self._pop(Ordering::SeqCst)\n    }\n\n    #[inline(always)]\n    fn push(&self, item: T) -> Result<(), T> {\n        let _item = MaybeUninit::new(item);\n        if unsafe { self._try_push(SeqCst, _item.as_ptr(), Acquire).is_ok() } {\n            Ok(())\n        } else {\n            Err(unsafe { _item.assume_init_read() })\n        }\n    }\n\n    #[inline(always)]\n    fn len(&self) -> usize {\n        if self.is_full() {\n            1\n        } else {\n            0\n        }\n    }\n\n    #[inline(always)]\n    fn capacity(&self) -> Option<usize> {\n        Some(1)\n    }\n\n    #[inline(always)]\n    fn is_full(&self) -> bool {\n        !self.is_empty()\n    }\n\n    #[inline(always)]\n    fn is_empty(&self) -> bool {\n        let pos = self.pos.load(SeqCst);\n        let (head, tail) = Self::unpack(pos);\n        head == tail\n    }\n}\n\nimpl<T> One<T> {\n    #[inline]\n    pub fn new() -> Self {\n        Self { pos: CachePadded::new(AtomicU32::new(0)), slots: [Slot::init(0), Slot::init(1)] }\n    }\n\n    #[inline(always)]\n    fn unpack(pos: u32) -> (u16, u16) {\n        let head = (pos >> 16) as u16;\n        let tail = pos as u16;\n        (head, tail)\n    }\n\n    #[inline(always)]\n    fn pack(head: u16, tail: u16) -> u32 {\n        ((head as u32) << 16) | (tail as u32)\n    }\n\n    /// return Ok(true) on ok, Ok(false) on full, Err(()) to spin\n    #[inline(always)]\n    unsafe fn _try_push(\n        &self, order: Ordering, value: *const T, failure: Ordering,\n    ) -> Result<(), ()> {\n        let mut pos = self.pos.load(order);\n        compiler_fence(Acquire);\n        loop {\n            let (head, tail) = Self::unpack(pos);\n            if head == tail {\n                let new_pos = Self::pack(head, tail.wrapping_add(1));\n                match self.pos.compare_exchange_weak(pos, new_pos, SeqCst, failure) {\n                    Ok(_) => {\n                        let index = tail & 0x1;\n                        self.slots[index as usize].write(tail, value);\n                        return Ok(());\n                    }\n                    Err(_pos) => {\n                        pos = _pos;\n                    }\n                }\n            } else {\n                return Err(());\n            }\n        }\n    }\n\n    #[inline(always)]\n    fn _start_read(&self, order: Ordering) -> Option<(u16, u16)> {\n        let mut pos = self.pos.load(order);\n        compiler_fence(Acquire);\n        loop {\n            let (head, tail) = Self::unpack(pos);\n            if head == tail {\n                return None;\n            }\n            let new_pos = Self::pack(tail, tail);\n            match self.pos.compare_exchange_weak(pos, new_pos, SeqCst, Acquire) {\n                Err(_pos) => {\n                    pos = _pos;\n                }\n                Ok(_) => {\n                    let index = head & 0x1;\n                    return Some((index, tail));\n                }\n            }\n        }\n    }\n\n    #[inline(always)]\n    fn _pop(&self, order: Ordering) -> Option<T> {\n        if let Some((index, new_head)) = self._start_read(order) {\n            Some(self.slots[index as usize].read(new_head))\n        } else {\n            None\n        }\n    }\n}\n\nstruct Slot<T> {\n    value: UnsafeCell<MaybeUninit<T>>,\n    stamp: AtomicU16,\n}\n\nimpl<T> Slot<T> {\n    #[inline]\n    fn init(i: u16) -> Self {\n        Self { value: UnsafeCell::new(MaybeUninit::uninit()), stamp: AtomicU16::new(i) }\n    }\n\n    #[inline(always)]\n    fn write(&self, tail: u16, value: *const T) {\n        let mut stamp = self.stamp.load(Acquire);\n        if stamp != tail {\n            let mut backoff = Backoff::new();\n            loop {\n                backoff.spin();\n                stamp = self.stamp.load(Acquire);\n                if stamp == tail {\n                    break;\n                }\n            }\n        }\n        unsafe { (*self.value.get()).write(ptr::read(value)) };\n        self.stamp.store(tail.wrapping_add(1), Release);\n    }\n\n    #[inline(always)]\n    fn read(&self, head: u16) -> T {\n        let mut stamp = self.stamp.load(Acquire);\n        if stamp != head {\n            let mut backoff = Backoff::new();\n            loop {\n                backoff.spin();\n                stamp = self.stamp.load(Acquire);\n                if stamp == head {\n                    break;\n                }\n            }\n        }\n        let msg = unsafe { self.value.get().read().assume_init() };\n        // there might be slow reader, update the stamp to allow writer reuse the slot\n        self.stamp.store(head.wrapping_add(1), Release);\n        msg\n    }\n\n    #[inline(always)]\n    fn drop(&self) {\n        unsafe { self.value.get().read().assume_init_drop() };\n    }\n}\n\nimpl<T> Drop for One<T> {\n    fn drop(&mut self) {\n        if needs_drop::<T>() {\n            let pos = *self.pos.get_mut();\n            let (head, tail) = Self::unpack(pos);\n            if head != tail {\n                let index = head & 0x1;\n                self.slots[index as usize].drop();\n            }\n        }\n    }\n}\n\nimpl<T> FlavorImpl for One<T> {\n    #[inline(always)]\n    fn try_send(&self, item: &MaybeUninit<T>) -> bool {\n        // Will always double-check with is_full or try_send_oneshot()\n        unsafe { self._try_push(Relaxed, item.as_ptr(), Relaxed).is_ok() }\n    }\n\n    #[inline(always)]\n    fn try_send_oneshot(&self, item: *const T) -> Option<bool> {\n        Some(unsafe { self._try_push(SeqCst, item, Acquire).is_ok() })\n    }\n\n    #[inline(always)]\n    fn try_recv(&self) -> Option<T> {\n        self._pop(Relaxed)\n    }\n\n    #[inline(always)]\n    fn try_recv_final(&self) -> Option<T> {\n        self._pop(SeqCst)\n    }\n\n    #[inline]\n    fn backoff_limit(&self) -> u16 {\n        // Due to bound is too small,\n        // yield with MAX_LIMIT to prevent collapse in high contention\n        crate::backoff::MAX_LIMIT\n    }\n\n    #[inline]\n    fn may_direct_copy(&self) -> bool {\n        true\n    }\n}\n\nimpl<T> FlavorNew for One<T> {\n    #[inline]\n    fn new() -> Self {\n        One::new()\n    }\n}\n\nimpl<T> FlavorSelect for One<T> {\n    #[inline]\n    fn try_select(&self, final_check: bool) -> Option<Token> {\n        if let Some((index, head)) =\n            self._start_read(if final_check { Ordering::SeqCst } else { Ordering::Acquire })\n        {\n            Some(Token::new(\n                &self.slots[index as usize] as *const Slot<T> as *const u8,\n                head as usize,\n            ))\n        } else {\n            None\n        }\n    }\n\n    #[inline(always)]\n    fn read_with_token(&self, token: Token) -> T {\n        let slot: &Slot<T> = unsafe { &*token.pos.cast::<Slot<T>>() };\n        slot.read(token.stamp as u16)\n    }\n}\n"
  },
  {
    "path": "src/flavor/one_mpsc.rs",
    "content": "use super::{FlavorImpl, FlavorNew, FlavorSelect, Queue, Token};\nuse crate::backoff::*;\nuse core::cell::UnsafeCell;\nuse core::mem::{needs_drop, MaybeUninit};\nuse core::ptr;\nuse core::sync::atomic::{\n    AtomicU16, AtomicU32,\n    Ordering::{self, Acquire, Release, SeqCst},\n};\nuse crossbeam_utils::CachePadded;\n\n/// A simplify ArrayQueue specialized for size=1\npub struct OneMpsc<T> {\n    pos: CachePadded<AtomicU32>,\n\n    /// The value in this slot.\n    slots: [Slot<T>; 2],\n}\n\nunsafe impl<T> Sync for OneMpsc<T> {}\nunsafe impl<T> Send for OneMpsc<T> {}\n\nimpl<T> Queue for OneMpsc<T> {\n    type Item = T;\n\n    #[inline(always)]\n    fn pop(&self) -> Option<T> {\n        self._pop(Ordering::SeqCst)\n    }\n\n    #[inline(always)]\n    fn push(&self, item: T) -> Result<(), T> {\n        let _item = MaybeUninit::new(item);\n        if unsafe { self._try_push(SeqCst, _item.as_ptr(), Acquire).is_ok() } {\n            Ok(())\n        } else {\n            Err(unsafe { _item.assume_init_read() })\n        }\n    }\n\n    #[inline(always)]\n    fn len(&self) -> usize {\n        if self.is_full() {\n            1\n        } else {\n            0\n        }\n    }\n\n    #[inline(always)]\n    fn capacity(&self) -> Option<usize> {\n        Some(1)\n    }\n\n    #[inline(always)]\n    fn is_full(&self) -> bool {\n        !self.is_empty()\n    }\n\n    #[inline(always)]\n    fn is_empty(&self) -> bool {\n        let pos = self.pos.load(SeqCst);\n        let (head, tail) = Self::unpack(pos);\n        head == tail\n    }\n}\n\nimpl<T> OneMpsc<T> {\n    #[inline]\n    pub fn new() -> Self {\n        Self { pos: CachePadded::new(AtomicU32::new(0)), slots: [Slot::init(0), Slot::init(1)] }\n    }\n\n    #[inline(always)]\n    fn unpack(pos: u32) -> (u16, u16) {\n        let head = (pos >> 16) as u16;\n        let tail = pos as u16;\n        (head, tail)\n    }\n\n    #[inline(always)]\n    fn pack(head: u16, tail: u16) -> u32 {\n        ((head as u32) << 16) | (tail as u32)\n    }\n\n    /// return Ok(true) on ok, Ok(false) on full, Err(()) to spin\n    #[inline(always)]\n    unsafe fn _try_push(\n        &self, order: Ordering, value: *const T, failure: Ordering,\n    ) -> Result<(), ()> {\n        let mut pos = self.pos.load(order);\n        loop {\n            let (head, tail) = Self::unpack(pos);\n            if head == tail {\n                let new_pos = Self::pack(head, tail.wrapping_add(1));\n                match self.pos.compare_exchange_weak(pos, new_pos, SeqCst, failure) {\n                    Ok(_) => {\n                        let index = tail & 0x1;\n                        self.slots[index as usize].write(tail, value);\n                        return Ok(());\n                    }\n                    Err(_pos) => {\n                        pos = _pos;\n                    }\n                }\n            } else {\n                return Err(());\n            }\n        }\n    }\n\n    #[inline(always)]\n    fn _start_read(&self, order: Ordering) -> Option<(u16, u16)> {\n        let pos = self.pos.load(order);\n        let (head, tail) = Self::unpack(pos);\n        if head == tail {\n            return None;\n        }\n        let index = head & 0x1;\n        Some((index, tail))\n    }\n\n    #[inline(always)]\n    fn _read(&self, slot: &Slot<T>, next_head: u16) -> T {\n        let new_pos = Self::pack(next_head, next_head);\n        // Because we have two slot, the sender will write to next index,\n        // it's safe to update the pos before we read, so that sender may begin to write\n        self.pos.store(new_pos, SeqCst);\n\n        slot.read(next_head)\n    }\n\n    #[inline(always)]\n    fn _pop(&self, order: Ordering) -> Option<T> {\n        if let Some((index, new_head)) = self._start_read(order) {\n            Some(self._read(&self.slots[index as usize], new_head))\n        } else {\n            None\n        }\n    }\n}\n\nstruct Slot<T> {\n    value: UnsafeCell<MaybeUninit<T>>,\n    stamp: CachePadded<AtomicU16>,\n}\n\nimpl<T> Slot<T> {\n    #[inline]\n    fn init(i: u16) -> Self {\n        Self {\n            value: UnsafeCell::new(MaybeUninit::uninit()),\n            stamp: CachePadded::new(AtomicU16::new(i)),\n        }\n    }\n\n    #[inline(always)]\n    fn write(&self, tail: u16, value: *const T) {\n        unsafe { (*self.value.get()).write(ptr::read(value)) };\n        self.stamp.store(tail.wrapping_add(1), Release);\n    }\n\n    #[inline(always)]\n    fn read(&self, head: u16) -> T {\n        let mut stamp = self.stamp.load(Acquire);\n        if stamp != head {\n            let mut backoff = Backoff::new();\n            loop {\n                backoff.snooze();\n                stamp = self.stamp.load(Acquire);\n                if stamp == head {\n                    break;\n                }\n            }\n        }\n\n        unsafe { self.value.get().read().assume_init() }\n    }\n\n    #[inline(always)]\n    fn drop(&self) {\n        unsafe { self.value.get().read().assume_init_drop() };\n    }\n}\n\nimpl<T> Drop for OneMpsc<T> {\n    #[inline(always)]\n    fn drop(&mut self) {\n        if needs_drop::<T>() {\n            let pos = *self.pos.get_mut();\n            let (head, tail) = Self::unpack(pos);\n            if head != tail {\n                let index = head & 0x1;\n                self.slots[index as usize].drop();\n            }\n        }\n    }\n}\n\nimpl<T> FlavorImpl for OneMpsc<T> {\n    #[inline(always)]\n    fn try_send(&self, item: &MaybeUninit<T>) -> bool {\n        // Will always double-check with is_full or try_send_oneshot()\n        unsafe { self._try_push(Acquire, item.as_ptr(), Acquire).is_ok() }\n    }\n\n    #[inline(always)]\n    fn try_send_oneshot(&self, item: *const T) -> Option<bool> {\n        Some(unsafe { self._try_push(SeqCst, item, Acquire).is_ok() })\n    }\n\n    #[inline(always)]\n    fn try_recv(&self) -> Option<T> {\n        self._pop(Acquire)\n    }\n\n    #[inline(always)]\n    fn try_recv_final(&self) -> Option<T> {\n        self._pop(SeqCst)\n    }\n\n    #[inline]\n    fn backoff_limit(&self) -> u16 {\n        // Due to bound is too small,\n        // yield with MAX_LIMIT to prevent collapse in high contention\n        crate::backoff::MAX_LIMIT\n    }\n\n    #[inline]\n    fn may_direct_copy(&self) -> bool {\n        true\n    }\n}\n\nimpl<T> FlavorNew for OneMpsc<T> {\n    #[inline]\n    fn new() -> Self {\n        OneMpsc::new()\n    }\n}\n\nimpl<T> FlavorSelect for OneMpsc<T> {\n    #[inline]\n    fn try_select(&self, final_check: bool) -> Option<Token> {\n        if let Some((index, head)) =\n            self._start_read(if final_check { Ordering::SeqCst } else { Ordering::Acquire })\n        {\n            Some(Token::new(\n                &self.slots[index as usize] as *const Slot<T> as *const u8,\n                head as usize,\n            ))\n        } else {\n            None\n        }\n    }\n\n    #[inline(always)]\n    fn read_with_token(&self, token: Token) -> T {\n        let slot: &Slot<T> = unsafe { &*token.pos.cast::<Slot<T>>() };\n        self._read(slot, token.stamp as u16)\n    }\n}\n"
  },
  {
    "path": "src/flavor/one_spmc.rs",
    "content": "use super::{FlavorImpl, FlavorNew, FlavorSelect, Queue, Token};\nuse core::cell::UnsafeCell;\nuse core::mem::{needs_drop, MaybeUninit};\nuse crossbeam_utils::CachePadded;\nuse std::ptr;\nuse std::sync::atomic::{\n    AtomicU64,\n    Ordering::{self, Acquire, SeqCst},\n};\n\n/// This is a spsc version of `One` without stamp.\n///\n/// The sender side allow to push and drop it's own previous value, if receivers had not consumed it.\npub type OneSpsc<T> = OneSp<T, false>;\n\n///// This is a spmc version of `One` without stamp, allow replace() on the sender side.\n/////\n///// The sender side allow to push and drop it's own previous value, if receivers had not consumed it.\n/////\n///// NOTE: use lockless technique inspired by the OFLIT paper, miri will probably report data racing issue,\n///// but it's intentional.\n///// This module cannot not separate pop into start_read/read interface,\n///// so it cannot implement Flavor interface.\n//type OneSpmc<T> = OneSp<T, true>;\n\npub struct OneSp<T, const MC: bool> {\n    pos: CachePadded<AtomicU64>,\n\n    /// The value in this slot.\n    slots: [Slot<T>; 2],\n}\n\nunsafe impl<T, const MC: bool> Sync for OneSp<T, MC> {}\nunsafe impl<T, const MC: bool> Send for OneSp<T, MC> {}\n\nimpl<T, const MC: bool> OneSp<T, MC> {\n    #[inline]\n    pub fn new() -> Self {\n        Self { pos: CachePadded::new(AtomicU64::new(0)), slots: [Slot::init(), Slot::init()] }\n    }\n\n    #[inline(always)]\n    fn unpack(pos: u64) -> (u32, u32) {\n        let head = (pos >> 32) as u32;\n        let tail = pos as u32;\n        (head, tail)\n    }\n\n    #[inline(always)]\n    fn pack(head: u32, tail: u32) -> u64 {\n        ((head as u64) << 32) | (tail as u64)\n    }\n\n    #[inline(always)]\n    pub fn is_empty(&self) -> bool {\n        let pos = self.pos.load(SeqCst);\n        let (head, tail) = Self::unpack(pos);\n        head == tail\n    }\n\n    #[inline(always)]\n    pub fn len(&self) -> usize {\n        if self.is_empty() {\n            0\n        } else {\n            1\n        }\n    }\n\n    #[inline]\n    fn try_push(&self, value: *const T, order: Ordering) -> bool {\n        let pos = self.pos.load(order);\n        let (head, tail) = Self::unpack(pos);\n        if head == tail {\n            let new_tail = tail.wrapping_add(1);\n            let index = new_tail & 0x1;\n            self.slots[index as usize].write(value);\n            let new_pos = Self::pack(head, new_tail);\n            self.pos.store(new_pos, Ordering::SeqCst);\n            true\n        } else {\n            false\n        }\n    }\n}\n\nimpl<T, const MC: bool> Drop for OneSp<T, MC> {\n    fn drop(&mut self) {\n        if needs_drop::<T>() {\n            let pos = *self.pos.get_mut();\n            let (head, tail) = Self::unpack(pos);\n            if head != tail {\n                let index = tail & 0x1;\n                self.slots[index as usize].drop();\n            }\n        }\n    }\n}\n\nimpl<T> OneSpsc<T> {\n    #[inline(always)]\n    fn _read(&self, slot: &Slot<T>, next_head: u32) -> T {\n        // NOTE: This is only valid for SPSC (not for Spmc)\n        // Because we have two slot, the sender will write to next index,\n        // it's safe to update the pos before we read, so that sender may begin to write\n        let new_pos = Self::pack(next_head, next_head);\n        self.pos.store(new_pos, SeqCst);\n        slot.read()\n    }\n\n    #[inline(always)]\n    fn _pop(&self, order: Ordering) -> Option<T> {\n        if let Some(tail) = self.start_read(order) {\n            let index = (tail & 0x1) as usize;\n            Some(self._read(&self.slots[index], tail))\n        } else {\n            None\n        }\n    }\n\n    #[inline(always)]\n    fn start_read(&self, order: Ordering) -> Option<u32> {\n        let pos = self.pos.load(order);\n        let (head, tail) = Self::unpack(pos);\n        if head == tail {\n            None\n        } else {\n            debug_assert_eq!(head.wrapping_add(1), tail);\n            Some(tail)\n        }\n    }\n}\n\nstruct Slot<T> {\n    value: UnsafeCell<MaybeUninit<T>>,\n}\n\nimpl<T> Slot<T> {\n    #[inline]\n    fn init() -> Self {\n        Self { value: UnsafeCell::new(MaybeUninit::uninit()) }\n    }\n\n    #[inline(always)]\n    fn write(&self, value: *const T) {\n        unsafe { (*self.value.get()).write(ptr::read(value)) };\n    }\n\n    //    #[inline(always)]\n    //    fn read_into(&self, dest: *mut T) {\n    //        unsafe {\n    //            let src_ptr = (*self.value.get()).as_ptr();\n    //            ptr::copy_nonoverlapping(src_ptr, dest, 1);\n    //        }\n    //    }\n\n    #[inline(always)]\n    fn read(&self) -> T {\n        unsafe { self.value.get().read().assume_init() }\n    }\n\n    #[inline(always)]\n    fn drop(&self) {\n        unsafe { self.value.get().read().assume_init_drop() };\n    }\n}\n\n/*\nimpl<T> OneSpmc<T> {\n    #[inline]\n    pub fn replace(&self, value: T) {\n        let item = MaybeUninit::new(value);\n        self._replace(item.as_ptr());\n    }\n\n    /// return Ok(true) on ok, Ok(false) on full, Err(()) to spin\n    #[inline(always)]\n    fn _replace(&self, value: *const T) {\n        // No one will advance tail except me\n        let mut pos = self.pos.load(Acquire);\n        let (mut head, tail) = Self::unpack(pos);\n        let new_tail = tail.wrapping_add(1);\n        let index = new_tail & 0x1;\n        self.slots[index as usize].write(value);\n        loop {\n            if head == tail {\n                let new_pos = Self::pack(head, new_tail);\n                self.pos.store(new_pos, Ordering::SeqCst);\n                return;\n            } else {\n                debug_assert_eq!(head.wrapping_add(1), tail);\n                let new_pos = Self::pack(tail, new_tail);\n                match self.pos.compare_exchange_weak(pos, new_pos, SeqCst, Acquire) {\n                    Ok(_) => {\n                        let index = tail & 0x1;\n                        self.slots[index as usize].drop();\n                        return;\n                    }\n                    Err(_pos) => {\n                        if pos != _pos {\n                            pos = _pos;\n                            let _tail;\n                            (head, _tail) = Self::unpack(_pos);\n                            debug_assert_eq!(_tail, tail);\n                        }\n                        continue;\n                    }\n                }\n            }\n        }\n    }\n\n    #[inline(always)]\n    fn _pop(&self, order: Ordering) -> Option<T> {\n        let mut pos = self.pos.load(order);\n        let mut value_copy: MaybeUninit<T> = MaybeUninit::uninit();\n        loop {\n            let (head, tail) = Self::unpack(pos);\n            if head == tail {\n                return None;\n            }\n            let index = tail & 0x1;\n            self.slots[index as usize].read_into(value_copy.as_mut_ptr());\n            debug_assert_eq!(head.wrapping_add(1), tail);\n            let new_pos = Self::pack(tail, tail);\n            match self.pos.compare_exchange_weak(pos, new_pos, SeqCst, order) {\n                Err(_pos) => {\n                    // Other might read the value, or send might use replace to cancel the value,\n                    // should be cas suc to confirm\n                    pos = _pos;\n                }\n                Ok(_) => {\n                    return Some(unsafe { value_copy.assume_init_read() });\n                }\n            }\n        }\n    }\n}\n\nimpl<T> Queue for OneSpmc<T> {\n    type Item = T;\n\n    #[inline(always)]\n    fn len(&self) -> usize {\n        if self.is_empty() {\n            0\n        } else {\n            1\n        }\n    }\n\n    #[inline(always)]\n    fn is_empty(&self) -> bool {\n        Self::is_empty(self)\n    }\n\n    #[inline(always)]\n    fn capacity(&self) -> Option<usize> {\n        Some(1)\n    }\n\n    #[inline(always)]\n    fn is_full(&self) -> bool {\n        !Self::is_empty(self)\n    }\n\n    #[inline(always)]\n    fn pop(&self) -> Option<T> where T: Send {\n        self._pop(Ordering::SeqCst)\n    }\n\n    #[inline]\n    fn push(&self, value: T) -> Result<(), T> where T: Send {\n        let item = MaybeUninit::new(value);\n        if self.try_push(item.as_ptr(), Ordering::SeqCst) {\n            Ok(())\n        } else {\n            Err(unsafe { item.assume_init_read() })\n        }\n    }\n}\n*/\n\nimpl<T> Queue for OneSpsc<T> {\n    type Item = T;\n\n    #[inline(always)]\n    fn len(&self) -> usize {\n        if self.is_empty() {\n            0\n        } else {\n            1\n        }\n    }\n\n    #[inline(always)]\n    fn is_empty(&self) -> bool {\n        Self::is_empty(self)\n    }\n\n    #[inline(always)]\n    fn capacity(&self) -> Option<usize> {\n        Some(1)\n    }\n\n    #[inline(always)]\n    fn is_full(&self) -> bool {\n        !Self::is_empty(self)\n    }\n\n    #[inline(always)]\n    fn pop(&self) -> Option<T> {\n        self._pop(Ordering::SeqCst)\n    }\n\n    #[inline]\n    fn push(&self, value: T) -> Result<(), T> {\n        let item = MaybeUninit::new(value);\n        if self.try_push(item.as_ptr(), Ordering::SeqCst) {\n            Ok(())\n        } else {\n            Err(unsafe { item.assume_init_read() })\n        }\n    }\n}\n\nimpl<T> FlavorImpl for OneSpsc<T> {\n    #[inline(always)]\n    fn try_send(&self, item: &MaybeUninit<T>) -> bool {\n        self.try_push(item.as_ptr(), Acquire)\n    }\n\n    #[inline(always)]\n    fn try_send_oneshot(&self, item: *const T) -> Option<bool> {\n        Some(self.try_push(item, SeqCst))\n    }\n\n    #[inline(always)]\n    fn try_recv(&self) -> Option<T> {\n        self._pop(Ordering::Acquire)\n    }\n\n    #[inline]\n    fn try_recv_final(&self) -> Option<T> {\n        self._pop(Ordering::SeqCst)\n    }\n\n    #[inline]\n    fn backoff_limit(&self) -> u16 {\n        // Due to bound is too small,\n        // yield with MAX_LIMIT to prevent collapse in high contention\n        crate::backoff::MAX_LIMIT\n    }\n\n    #[inline]\n    fn may_direct_copy(&self) -> bool {\n        // NOTE sender has no CAS, not safe to direct copy\n        false\n    }\n}\n\nimpl<T> FlavorNew for OneSpsc<T> {\n    #[inline]\n    fn new() -> Self {\n        OneSpsc::new()\n    }\n}\n\nimpl<T> FlavorSelect for OneSpsc<T> {\n    #[inline]\n    fn try_select(&self, final_check: bool) -> Option<Token> {\n        if let Some(tail) =\n            self.start_read(if final_check { Ordering::SeqCst } else { Ordering::Acquire })\n        {\n            let index = (tail & 0x1) as usize;\n            Some(Token::new(&self.slots[index] as *const Slot<T> as *const u8, tail as usize))\n        } else {\n            None\n        }\n    }\n\n    #[inline(always)]\n    fn read_with_token(&self, token: Token) -> T {\n        let slot: &Slot<T> = unsafe { &*token.pos.cast::<Slot<T>>() };\n        self._read(slot, token.stamp as u32)\n    }\n}\n"
  },
  {
    "path": "src/lib.rs",
    "content": "#![cfg_attr(docsrs, feature(doc_cfg))]\n#![cfg_attr(docsrs, allow(unused_attributes))]\n\n//! # Crossfire\n//!\n//! High-performance lockless spsc/mpsc/mpmc channels, algorithm derives crossbeam with improvements.\n//!\n//! It supports async contexts and bridges the gap between async and blocking contexts.\n//!\n//! For the concept, please refer to the [wiki](https://github.com/frostyplanet/crossfire-rs/wiki).\n//!\n//! ## Version history\n//!\n//! * v1.0: Used in production since 2022.12.\n//!\n//! * v2.0: [2025.6] Refactored the codebase and API\n//!   by removing generic types from the ChannelShared type, which made it easier to code with.\n//!\n//! * v2.1: [2025.9] Removed the dependency on crossbeam-channel\n//!   and implemented with [a modified version of crossbeam-queue](https://github.com/frostyplanet/crossfire-rs/wiki/crossbeam-related),\n//!   brings 2x performance improvements for both async and blocking contexts.\n//!\n//! * v3.0: [2026.1] Refactored API back to generic flavor interface, added [select].\n//!   Dedicated optimization: Bounded SPSC +70%, MPSC +30%, one-size +20%.\n//!   Eliminate enum dispatch cost, async performance improved for another 33%. Checkout [compat] for migiration from v2.x.\n//!\n//! ## Test status\n//!\n//! Refer to the [README](https://github.com/frostyplanet/crossfire-rs?tab=readme-ov-file#test-status) page for known issue on specified platform and runtime.\n//!\n//! ## Performance\n//!\n//! Being a lockless channel, crossfire outperforms other async-capable channels.\n//! And thanks to a lighter notification mechanism, most cases in blocking context are even\n//! better than the original crossbeam-channel,\n//!\n//! benchmark data is posted on [wiki](https://github.com/frostyplanet/crossfire-rs/wiki/benchmark-v3.0.0-2026%E2%80%9001%E2%80%9018).\n//!\n//! Also, being a lockless channel, the algorithm relies on spinning and yielding. Spinning is good on\n//! multi-core systems, but not friendly to single-core systems (like virtual machines).\n//! So we provide a function [detect_backoff_cfg()] to detect the running platform.\n//! Calling it within the initialization section of your code, will get a 2x performance boost on\n//! VPS.\n//!\n//! The benchmark is written in the criterion framework. You can run the benchmark by:\n//!\n//! ``` shell\n//! make bench crossfire\n//! make bench crossfire_select\n//! ```\n//!\n//! ## APIs\n//!\n//! ### Concurrency Modules\n//!\n//! - [spsc], [mpsc], [mpmc]. Each has different underlying implementation\n//!   optimized to its concurrent model.\n//!   The SP or SC interface is only for non-concurrent operation. It's more memory-efficient in waker registration,\n//!   and has atomic ops cost reduced in the lockless algorithm.\n//!\n//! - [oneshot] has its special sender/receiver type because using `Tx` / `Rx` will be too heavy.\n//!\n//! - [select]:\n//!     - [Select<'a>](crate::select::Select): crossbeam-channel style type erased API, borrows receiver address and select with \"token\"\n//!     - [Multiplex](crate::select::Multiplex): Multiplex stream that owns multiple receiver, select from the same type of\n//!       channel flavors, for the same type of message.\n//!\n//! - [waitgroup]: High performance WaitGroup which allows custom threshold\n//!\n//! ### Flavors\n//!\n//! The following lockless queues are expose in [flavor] module, and each one have type alias in spsc/mpsc/mpmc:\n//!\n//! - `List` (which use crossbeam `SegQueue`)\n//! - `Array` (which is an enum that wraps crossbeam `ArrayQueue`, and a `One` if init with size<=1)\n//!   - For a bounded channel, a 0 size case is not supported yet. (rewrite as 1 size).\n//!   - The implementation for spsc & mpsc is simplified from mpmc version.\n//! - `One` (which derives from `ArrayQueue` algorithm, but have better performance in size=1\n//!   scenario, because it have two slots to allow reader and writer works concurrently)\n//! - `Null` (See the doc [crate::null]), for cancellation purpose channel, that only wakeup on\n//!   closing.\n//!\n//! **NOTE** :\n//! Although the name [Array](crate::mpmc::Array), [List](crate::mpmc::List) are the same between spsc/mpsc/mpmc module,\n//! they are different type alias local to its parent module. We suggest distinguish by\n//! namespace when import for use.\n//!\n//! ### Channel builder function\n//!\n//! Aside from function `bounded_*`, `unbounded_*` which specify the sender / receiver type,\n//! each module has [build()](crate::mpmc::build()) and [new()](crate::mpmc::new()) function, which can apply to any channel flavors, and any async/blocking combinations.\n//!\n//!\n//! ### Types\n//!\n//! <table align=\"center\" cellpadding=\"20\">\n//! <tr> <th rowspan=\"2\"> Context</th><th colspan=\"2\" align=\"center\">Sender (Producer)</th> <th colspan=\"2\" align=\"center\">Receiver (Consumer)</th> </tr>\n//! <tr> <td>Single</td> <td>Multiple</td><td>Single</td><td>Multiple</td></tr>\n//! <tr><td align=\"center\" rowspan=\"2\"><b>Blocking</b></td><td colspan=\"2\" align=\"center\"><a href=\"trait.BlockingTxTrait.html\">BlockingTxTrait</a></td>\n//! <td colspan=\"2\" align=\"center\"><a href=\"trait.BlockingRxTrait.html\">BlockingRxTrait</a></td></tr>\n//! <tr>\n//! <td align=\"center\"><a href=\"struct.Tx.html\">Tx</a></td>\n//! <td align=\"center\"><a href=\"struct.MTx.html\">MTx</a></td>\n//! <td align=\"center\"><a href=\"struct.Rx.html\">Rx</a></td>\n//! <td align=\"center\"><a href=\"struct.MRx.html\">MRx</a></td> </tr>\n//!\n//! <tr><td><b>Weak reference</b></td><td></td><td><a href=\"struct.WeakTx.html\">WeakTx</a></td></tr>\n//!\n//! <tr><td align=\"center\" rowspan=\"2\"><b>Async</b></td>\n//! <td colspan=\"2\" align=\"center\"><a href=\"trait.AsyncTxTrait.html\">AsyncTxTrait</a></td>\n//! <td colspan=\"2\" align=\"center\"><a href=\"trait.AsyncRxTrait.html\">AsyncRxTrait</a></td></tr>\n//! <tr><td><a href=\"struct.AsyncTx.html\">AsyncTx</a></td>\n//! <td><a href=\"struct.MAsyncTx.html\">MAsyncTx</a></td><td><a href=\"struct.AsyncRx.html\">AsyncRx</a></td>\n//! <td><a href=\"struct.MAsyncRx.html\">MAsyncRx</a></td></tr>\n//! </table>\n//!\n//! *Safety*: For the SP / SC version, [AsyncTx], [AsyncRx], [Tx], and [Rx] are not `Clone` and without `Sync`.\n//! Although can be moved to other threads, but not allowed to use send/recv while in an Arc. (Refer to the compile_fail\n//! examples in the type document).\n//!\n//! The benefit of using the SP / SC API is completely lockless waker registration, in exchange for a performance boost.\n//!\n//! The sender/receiver can use the **`From`** trait to convert between blocking and async context\n//! counterparts (refer to the [example](#example) below)\n//!\n//! ### Error types\n//!\n//! Error types are the same as crossbeam-channel:\n//!\n//! [TrySendError], [SendError], [SendTimeoutError], [TryRecvError], [RecvError], [RecvTimeoutError]\n//!\n//! ### Async compatibility\n//!\n//! Tested on tokio-1.x and async-std-1.x, crossfire is runtime-agnostic.\n//!\n//! The following scenarios are considered:\n//!\n//! * The [AsyncTx::send()] and [AsyncRx::recv()] operations are **cancellation-safe** in an async context.\n//!   You can safely use the select! macro and timeout() function in tokio/futures in combination with recv().\n//!   On cancellation, [SendFuture] and [RecvFuture] will trigger drop(), which will clean up the state of the waker,\n//!   making sure there is no memory-leak and deadlock.\n//!   But you cannot know the true result from SendFuture, since it's dropped\n//!   upon cancellation. Thus, we suggest using [AsyncTx::send_timeout()] instead.\n//!\n//! * When the \"tokio\" or \"async_std\" feature is enabled, we also provide two additional functions:\n//!\n//! - [send_timeout()](crate::AsyncTx::send_timeout()), which will return the message that failed to be sent in\n//!   [SendTimeoutError]. We guarantee the result is atomic. Alternatively, you can use\n//!   [send_with_timer()](crate::AsyncTx::send_with_timer()).\n//!\n//! - [recv_timeout()](crate::AsyncRx::recv_timeout()), we guarantee the result is atomic.\n//!   Alternatively, you can use [recv_with_timer()](crate::AsyncRx::recv_with_timer())\n//!\n//! * The waker footprint:\n//!\n//! When using a multi-producer and multi-consumer scenario, there's a small memory overhead to pass along a `Weak`\n//! reference of wakers.\n//! Because we aim to be lockless, when the sending/receiving futures are canceled (like tokio::time::timeout()),\n//! it might trigger an immediate cleanup if the try-lock is successful, otherwise will rely on lazy cleanup.\n//! (This won't be an issue because weak wakers will be consumed by actual message send and recv).\n//! On an idle-select scenario, like a notification for close, the waker will be reused as much as possible\n//! if poll() returns pending.\n//!\n//! * Handle written future:\n//!\n//! The future object created by [AsyncTx::send()], [AsyncTx::send_timeout()], [AsyncRx::recv()],\n//! [AsyncRx::recv_timeout()] is `Sized`. You don't need to put them in `Box`.\n//!\n//! If you like to use poll function directly for complex behavior, you can call\n//! [AsyncSink::poll_send()](crate::sink::AsyncSink::poll_send()) or [AsyncStream::poll_item()](crate::stream::AsyncStream::poll_item()) with Context.\n\n//!\n//! ## Usage\n//!\n//! Cargo.toml:\n//! ```toml\n//! [dependencies]\n//! crossfire = \"3.1\"\n//! ```\n//!\n//! ### Feature flags\n//!\n//! * `compat`: Enable the [compat] model, which has the same API namespace struct as V2.x\n//!\n//! * `tokio`: Enable [send_timeout](crate::AsyncTx::send_timeout()), [recv_timeout](crate::AsyncRx::recv_timeout()) with tokio sleep function. (conflict\n//!   with `async_std` feature)\n//!\n//! * `async_std`: Enable send_timeout, recv_timeout with async-std sleep function. (conflict\n//!   with `tokio` feature)\n//!\n//! * `trace_log`: Development mode, to enable internal log while testing or benchmark, to debug deadlock issues.\n//!\n//! ### Example\n//!\n//! blocking / async sender receiver mixed together\n//!\n//! ```rust\n//!\n//! extern crate crossfire;\n//! use crossfire::*;\n//! #[macro_use]\n//! extern crate tokio;\n//! use tokio::time::{sleep, interval, Duration};\n//!\n//! #[tokio::main]\n//! async fn main() {\n//!     let (tx, rx) = mpmc::bounded_async::<usize>(100);\n//!     let mut recv_counter = 0;\n//!     let mut co_tx = Vec::new();\n//!     let mut co_rx = Vec::new();\n//!     const ROUND: usize = 1000;\n//!\n//!     let _tx: MTx<mpmc::Array<usize>> = tx.clone().into_blocking();\n//!     co_tx.push(tokio::task::spawn_blocking(move || {\n//!         for i in 0..ROUND {\n//!             _tx.send(i).expect(\"send ok\");\n//!         }\n//!     }));\n//!     co_tx.push(tokio::spawn(async move {\n//!         for i in 0..ROUND {\n//!             tx.send(i).await.expect(\"send ok\");\n//!         }\n//!     }));\n//!     let _rx: MRx<mpmc::Array<usize>> = rx.clone().into_blocking();\n//!     co_rx.push(tokio::task::spawn_blocking(move || {\n//!         let mut count: usize = 0;\n//!         'A: loop {\n//!             match _rx.recv() {\n//!                 Ok(_i) => {\n//!                     count += 1;\n//!                 }\n//!                 Err(_) => break 'A,\n//!             }\n//!         }\n//!         count\n//!     }));\n//!     co_rx.push(tokio::spawn(async move {\n//!         let mut count: usize = 0;\n//!         'A: loop {\n//!             match rx.recv().await {\n//!                 Ok(_i) => {\n//!                     count += 1;\n//!                 }\n//!                 Err(_) => break 'A,\n//!             }\n//!         }\n//!         count\n//!     }));\n//!     for th in co_tx {\n//!         let _ = th.await.unwrap();\n//!     }\n//!     for th in co_rx {\n//!         recv_counter += th.await.unwrap();\n//!     }\n//!     assert_eq!(recv_counter, ROUND * 2);\n//! }\n//! ```\n\n#[allow(private_bounds)]\n/// lockless queue implementation and channel flavor traits\npub mod flavor;\nmod shared;\npub use shared::ChannelShared;\n\nmod backoff;\npub use backoff::detect_backoff_cfg;\n\n#[allow(dead_code)]\nmod collections;\n#[allow(dead_code)]\nmod waker;\n#[allow(private_bounds)]\nmod waker_registry;\n\npub mod mpmc;\npub mod mpsc;\npub mod oneshot;\npub mod spsc;\npub mod waitgroup;\n\nmod blocking_tx;\npub use blocking_tx::*;\n#[allow(private_bounds)]\nmod blocking_rx;\npub use blocking_rx::*;\nmod async_tx;\npub use async_tx::*;\n#[allow(private_bounds)]\nmod async_rx;\npub use async_rx::*;\nmod weak;\npub use weak::WeakTx;\n\n#[cfg(feature = \"compat\")]\npub mod compat;\npub mod null;\npub mod sink;\npub mod stream;\n\nmod crossbeam;\npub use crossbeam::err::*;\n#[allow(private_bounds)]\npub mod select;\n\n/// logging macro for development\n#[macro_export(local_inner_macros)]\nmacro_rules! trace_log {\n    ($($arg:tt)+)=>{\n        #[cfg(feature=\"trace_log\")]\n        {\n            log::debug!($($arg)+);\n        }\n    };\n}\n\n/// logging macro for development under tokio\n#[macro_export(local_inner_macros)]\nmacro_rules! tokio_task_id {\n    () => {{\n        #[cfg(all(feature = \"trace_log\", feature = \"tokio\"))]\n        {\n            tokio::task::try_id()\n        }\n        #[cfg(not(all(feature = \"trace_log\", feature = \"tokio\")))]\n        {\n            \"\"\n        }\n    }};\n}\n\nuse flavor::Flavor;\nuse std::sync::Arc;\n\n/// type limiter for channel builder\npub trait SenderType {\n    type Flavor: Flavor;\n    fn new(shared: Arc<ChannelShared<Self::Flavor>>) -> Self;\n}\n\n/// type limiter for channel builder\npub trait ReceiverType: AsRef<ChannelShared<Self::Flavor>> {\n    type Flavor: Flavor;\n\n    fn new(shared: Arc<ChannelShared<Self::Flavor>>) -> Self;\n}\n\npub trait NotCloneable {}\n"
  },
  {
    "path": "src/mpmc.rs",
    "content": "//! Multiple producers, multiple consumers.\n//!\n//! The optimization assumes multiple consumers. The waker registration of the receiver is less efficient compared to `mpsc`.\n//!\n//! **NOTE**: For the MC (multiple consumer) version, [MAsyncTx], [MAsyncRx], [MTx] and [MRx] are `Clone` and implement `Sync`.\n//! They can be safely used with `send`/`recv` while in an `Arc`.\n//!\n//! # Examples\n//!\n//! ```\n//! use crossfire::*;\n//! use std::thread;\n//!\n//! struct Worker {\n//!     tx: MAsyncTx<mpmc::Array<usize>>,\n//! }\n//!\n//! impl Worker {\n//!     pub fn new() -> Self {\n//!         // use type hint\n//!         let (tx, rx): (MAsyncTx<_>, MRx<_>) = mpmc::build(mpmc::Array::<usize>::new(100));\n//!         // equals to\n//!         // let (tx, rx): (MAsyncTx<_>, MRx<_>) = mpmc::bounded_blocking::<usize>(100);\n//!         for _ in 0..4 {\n//!             let _rx = rx.clone();\n//!             thread::spawn(move || {\n//!                 match _rx.recv() {\n//!                     Ok(item)=>{\n//!                         println!(\"recv job {}\", item);\n//!                     }\n//!                     Err(_)=>return,\n//!                 }\n//!             });\n//!         }\n//!         Self{\n//!             tx,\n//!         }\n//!     }\n//!     pub async fn submit(&self, msg: usize) {\n//!         self.tx.send(msg).await.expect(\"send\");\n//!     }\n//! }\n//! ```\n\nuse crate::async_rx::*;\nuse crate::async_tx::*;\nuse crate::blocking_rx::*;\nuse crate::blocking_tx::*;\nuse crate::flavor::{\n    flavor_dispatch, flavor_select_dispatch, queue_dispatch, Flavor, FlavorBounded, FlavorImpl,\n    FlavorMC, FlavorMP, FlavorNew, FlavorWrap, Queue,\n};\nuse crate::null::CloseHandle;\nuse crate::shared::*;\nuse crate::{ReceiverType, SenderType};\nuse std::mem::MaybeUninit;\n\n/// Flavor Type for unbounded MPMC channel\npub type List<T> = FlavorWrap<crate::flavor::List<T>, RegistryDummy, RegistryMultiRecv>;\n\n/// Flavor Type for one-sized MPMC channel\npub type One<T> = FlavorWrap<crate::flavor::One<T>, RegistryMultiSend<T>, RegistryMultiRecv>;\n\n/// Flavor Type for bounded MPMC channel\n#[allow(clippy::large_enum_variant)]\npub enum Array<T> {\n    Array(crate::flavor::Array<T>),\n    One(crate::flavor::One<T>),\n}\n\nimpl<T> Array<T> {\n    #[inline]\n    pub fn new(size: usize) -> Self {\n        if size <= 1 {\n            Self::One(crate::flavor::One::new())\n        } else {\n            Self::Array(crate::flavor::Array::<T>::new(size))\n        }\n    }\n}\n\nimpl<T> FlavorMP for Array<T> {}\nimpl<T> FlavorMC for Array<T> {}\n\nmacro_rules! wrap_array {\n    ($self: expr, $method:ident $($arg:expr)*)=>{\n        match $self {\n            Self::Array(inner) => inner.$method($($arg)*),\n            Self::One(inner) => inner.$method($($arg)*),\n        }\n    };\n}\n\nimpl<T> Queue for Array<T> {\n    type Item = T;\n    queue_dispatch!(wrap_array);\n}\n\nimpl<T> FlavorImpl for Array<T> {\n    flavor_dispatch!(wrap_array);\n}\n\nimpl<T> FlavorSelect for Array<T> {\n    flavor_select_dispatch!(wrap_array);\n}\n\nimpl<T> FlavorBounded for Array<T> {\n    #[inline(always)]\n    fn new_with_bound(size: usize) -> Self {\n        Self::new(size)\n    }\n}\n\nimpl<T: 'static> Flavor for Array<T> {\n    type Send = RegistryMultiSend<T>;\n    type Recv = RegistryMultiRecv;\n}\n\n/// The generic builder for all mpmc channel types with a new method (except Array).\n///\n/// Initialize sender and receiver types from a flavor type,\n/// you can let the compiler to infer the type according to return type signature.\n/// (the falvor might have different new() method, but the rest is the same.\n/// # Examples\n///\n/// ```rust\n/// use crossfire::*;\n/// let (tx, rx): (MTx<_>, MRx<_>) = mpmc::new::<mpmc::List<i32>, _, _>();\n/// let (tx, rx): (MAsyncTx<mpmc::One<usize>>, MRx<mpmc::One<usize>>) = mpmc::new();\n/// ```\n#[inline(always)]\npub fn new<F, S, R>() -> (S, R)\nwhere\n    F: Flavor + FlavorNew + FlavorMP + FlavorMC,\n    S: SenderType<Flavor = F> + Clone,\n    R: ReceiverType<Flavor = F> + Clone,\n{\n    build::<F, S, R>(F::new())\n}\n\n/// The generic builder for all mpmc channel types.\n///\n/// Initialize sender and receiver types from a flavor type,\n/// you can let the compiler to infer the type according to return type signature.\n/// (the falvor might have different new() method, but the rest is the same.\n///\n/// # Examples\n///\n/// ```rust\n/// use crossfire::{*, mpmc::*};\n/// let (tx, rx): (MTx<_>, MRx<_>) = build::<List<i32>, _, _>(List::new());\n/// let (tx, rx): (MAsyncTx<One<usize>>, MRx<One<usize>>)  = build(One::new());\n/// ```\n#[inline(always)]\npub fn build<F, S, R>(flavor: F) -> (S, R)\nwhere\n    F: Flavor + FlavorMP + FlavorMC,\n    S: SenderType<Flavor = F> + Clone,\n    R: ReceiverType<Flavor = F> + Clone,\n{\n    let shared = ChannelShared::new(flavor, F::Send::new(), F::Recv::new());\n    (S::new(shared.clone()), R::new(shared))\n}\n\n#[inline]\nfn unbounded_new<T, R>() -> (MTx<List<T>>, R)\nwhere\n    T: 'static,\n    R: ReceiverType<Flavor = List<T>> + Clone,\n{\n    build::<List<T>, MTx<List<T>>, R>(List::<T>::from_inner(crate::flavor::List::<T>::new()))\n}\n\n#[inline]\npub fn unbounded_blocking<T>() -> (MTx<List<T>>, MRx<List<T>>)\nwhere\n    T: 'static,\n{\n    unbounded_new()\n}\n\n#[inline]\npub fn unbounded_async<T>() -> (MTx<List<T>>, MAsyncRx<List<T>>)\nwhere\n    T: 'static,\n{\n    unbounded_new()\n}\n\nfn bounded_new<T, S, R>(size: usize) -> (S, R)\nwhere\n    T: 'static,\n    S: SenderType<Flavor = Array<T>> + Clone,\n    R: ReceiverType<Flavor = Array<T>> + Clone,\n{\n    build::<Array<T>, S, R>(Array::<T>::new(size))\n}\n\n/// MPMC Bounded channel builder\n///\n/// # Examples\n///\n/// ```rust\n/// use crossfire::{mpmc, *};\n/// let (tx, rx) = mpmc::bounded_blocking::<i32>(10);\n/// tx.send(42).unwrap();\n/// assert_eq!(rx.recv(), Ok(42));\n/// ```\n/// Creates a bounded channel with a pair of blocking sender and receiver.\n///\n/// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1.\n#[inline]\npub fn bounded_blocking<T>(size: usize) -> (MTx<Array<T>>, MRx<Array<T>>)\nwhere\n    T: 'static,\n{\n    bounded_new(size)\n}\n\n/// Creates a bounded channel with a pair of async sender and receiver.\n///\n/// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1.\n#[inline]\npub fn bounded_async<T>(size: usize) -> (MAsyncTx<Array<T>>, MAsyncRx<Array<T>>)\nwhere\n    T: 'static,\n{\n    bounded_new(size)\n}\n\n/// Creates a bounded channel with a pair of blocking sender and async receiver.\n///\n/// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1.\n#[inline]\npub fn bounded_blocking_async<T>(size: usize) -> (MTx<Array<T>>, MAsyncRx<Array<T>>)\nwhere\n    T: 'static,\n{\n    bounded_new(size)\n}\n\n/// Creates a bounded channel with a pair of async sender and blocking receiver.\n///\n/// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1.\n#[inline]\npub fn bounded_async_blocking<T>(size: usize) -> (MAsyncTx<Array<T>>, MRx<Array<T>>)\nwhere\n    T: 'static,\n{\n    bounded_new(size)\n}\n\n/// Flavor type for close notification, refer to [crate::null] for usage\npub type Null = FlavorWrap<crate::null::Null, RegistryDummy, RegistryMultiRecv>;\n\nimpl Null {\n    #[inline(always)]\n    pub fn new_blocking(&self) -> (CloseHandle<Null>, MRx<Null>) {\n        new()\n    }\n\n    #[inline(always)]\n    pub fn new_async(self) -> (CloseHandle<Null>, MAsyncRx<Null>) {\n        new()\n    }\n}\n"
  },
  {
    "path": "src/mpsc.rs",
    "content": "//! Multiple producers, single consumer.\n//!\n//! The optimization assumes a single consumer. The waker registration of the receiver is lossless compared to `mpmc`.\n//!\n//! **NOTE**:\n//! - [AsyncRx] and [Rx] are not `Clone` and do not implement `Sync`.\n//!   Although they can be moved to other threads, they are not allowed to be used with `send`/`recv` while in an `Arc`.\n//! - [MAsyncTx] and [MTx] are `Clone` and `Sync`\n//!\n//! The following code is OK:\n//!\n//! ``` rust\n//! use crossfire::*;\n//! async fn foo() {\n//!     let (tx, rx) = mpsc::bounded_async::<usize>(100);\n//!     tokio::spawn(async move {\n//!         let _ = rx.recv().await;\n//!     });\n//!     drop(tx);\n//! }\n//! ```\n//!\n//! Because the `AsyncRx` does not have the `Sync` marker, using `Arc<AsyncRx>` will lose the `Send` marker.\n//!\n//! For your safety, the following code **should not compile**:\n//!\n//! ``` compile_fail\n//! use crossfire::*;\n//! use std::sync::Arc;\n//! async fn foo() {\n//!     let (tx, rx) = mpsc::bounded_async::<usize>(100);\n//!     let rx = Arc::new(rx);\n//!     tokio::spawn(async move {\n//!         let _ = rx.recv().await;\n//!     });\n//!     drop(tx);\n//! }\n//! ```\n\nuse crate::async_rx::*;\nuse crate::async_tx::*;\nuse crate::blocking_rx::*;\nuse crate::blocking_tx::*;\nuse crate::flavor::{\n    flavor_dispatch, flavor_select_dispatch, queue_dispatch, Flavor, FlavorBounded, FlavorImpl,\n    FlavorMP, FlavorNew, FlavorWrap, Queue,\n};\nuse crate::null::CloseHandle;\nuse crate::shared::*;\nuse crate::{NotCloneable, ReceiverType, SenderType};\nuse std::mem::MaybeUninit;\n\n/// Flavor Type alias for unbounded MPSC channel\npub type List<T> = FlavorWrap<crate::flavor::List<T>, RegistryDummy, RegistrySingle>;\n\n/// Flavor type for one-sized MPSC channel\npub type One<T> = FlavorWrap<crate::flavor::One<T>, RegistryMultiSend<T>, RegistrySingle>;\n\n/// Flavor Type alias for bounded MPSC channel wrapped with specified One impl\n#[allow(clippy::large_enum_variant)]\npub enum Array<T> {\n    Array(crate::flavor::ArrayMpsc<T>),\n    One(crate::flavor::OneMpsc<T>),\n}\n\nimpl<T> Array<T> {\n    #[inline]\n    pub fn new(size: usize) -> Self {\n        if size <= 1 {\n            Self::One(crate::flavor::OneMpsc::new())\n        } else {\n            Self::Array(crate::flavor::ArrayMpsc::<T>::new(size))\n        }\n    }\n}\n\nimpl<T> FlavorMP for Array<T> {}\n\nmacro_rules! wrap_array {\n    ($self: expr, $method:ident $($arg:expr)*)=>{\n        match $self {\n            Self::Array(inner) => inner.$method($($arg)*),\n            Self::One(inner) => inner.$method($($arg)*),\n        }\n    };\n}\n\nimpl<T> Queue for Array<T> {\n    type Item = T;\n    queue_dispatch!(wrap_array);\n}\n\nimpl<T> FlavorImpl for Array<T> {\n    flavor_dispatch!(wrap_array);\n}\n\nimpl<T> FlavorSelect for Array<T> {\n    flavor_select_dispatch!(wrap_array);\n}\n\nimpl<T> FlavorBounded for Array<T> {\n    #[inline(always)]\n    fn new_with_bound(size: usize) -> Self {\n        Self::new(size)\n    }\n}\n\nimpl<T: 'static> Flavor for Array<T> {\n    type Send = RegistryMultiSend<T>;\n    type Recv = RegistrySingle;\n}\n\n/// The generic builder for all mpsc channel types with a new method (except Array).\n///\n/// Initialize sender and receiver types from a flavor type,\n/// you can let the compiler to infer the type according to return type signature.\n/// (the falvor might have different new() method, but the rest is the same.\n/// # Examples\n///\n/// ```rust\n/// use crossfire::*;\n/// let (tx, rx): (MTx<_>, Rx<_>) = mpsc::new::<mpsc::List<i32>, _, _>();\n/// let (tx, rx): (MAsyncTx<mpsc::One<usize>>, Rx<mpsc::One<usize>>) = mpsc::new();\n/// ```\n#[inline(always)]\npub fn new<F, S, R>() -> (S, R)\nwhere\n    F: Flavor + FlavorNew + FlavorMP,\n    S: SenderType<Flavor = F> + Clone,\n    R: ReceiverType<Flavor = F> + NotCloneable,\n{\n    build::<F, S, R>(F::new())\n}\n\n/// The generic builder for all mpsc channel types\n///\n/// Initialize sender and receiver types from a flavor type,\n/// you can let the compiler to infer the type according to return type signature.\n/// (the flavor might have different new() method, but the rest is the same.\n///\n/// # Examples\n///\n/// ```rust\n/// use crossfire::{*, mpsc::*};\n/// let (tx, rx): (MTx<_>, Rx<_>) = build::<List<i32>, _, _>(List::new());\n/// let (tx, rx): (MAsyncTx<One<usize>>, Rx<One<usize>>)  = build(One::new());\n/// ```\n#[inline(always)]\npub fn build<F, S, R>(flavor: F) -> (S, R)\nwhere\n    F: Flavor + FlavorMP,\n    S: SenderType<Flavor = F> + Clone,\n    R: ReceiverType<Flavor = F> + NotCloneable,\n{\n    let shared = ChannelShared::new(flavor, F::Send::new(), F::Recv::new());\n    (S::new(shared.clone()), R::new(shared))\n}\n\n#[inline]\nfn unbounded_new<T, R>() -> (MTx<List<T>>, R)\nwhere\n    T: 'static,\n    R: ReceiverType<Flavor = List<T>> + NotCloneable,\n{\n    build::<List<T>, MTx<List<T>>, R>(List::<T>::from_inner(crate::flavor::List::<T>::new()))\n}\n\n#[inline]\npub fn unbounded_blocking<T>() -> (MTx<List<T>>, Rx<List<T>>)\nwhere\n    T: 'static,\n{\n    unbounded_new()\n}\n\n#[inline]\npub fn unbounded_async<T>() -> (MTx<List<T>>, AsyncRx<List<T>>)\nwhere\n    T: 'static,\n{\n    unbounded_new()\n}\n\nfn bounded_new<T, S, R>(size: usize) -> (S, R)\nwhere\n    T: 'static,\n    S: SenderType<Flavor = Array<T>> + Clone,\n    R: ReceiverType<Flavor = Array<T>> + NotCloneable,\n{\n    build::<Array<T>, S, R>(Array::<T>::new(size))\n}\n\n/// Creates a bounded channel with a pair of blocking sender and receiver.\n///\n/// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1.\n#[inline]\npub fn bounded_blocking<T>(size: usize) -> (MTx<Array<T>>, Rx<Array<T>>)\nwhere\n    T: 'static,\n{\n    bounded_new(size)\n}\n\n/// Creates a bounded channel with a pair of async sender and receiver.\n///\n/// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1.\n#[inline]\npub fn bounded_async<T>(size: usize) -> (MAsyncTx<Array<T>>, AsyncRx<Array<T>>)\nwhere\n    T: 'static,\n{\n    bounded_new(size)\n}\n\n/// Creates a bounded channel with a pair of blocking sender and async receiver.\n///\n/// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1.\n#[inline]\npub fn bounded_blocking_async<T>(size: usize) -> (MTx<Array<T>>, AsyncRx<Array<T>>)\nwhere\n    T: 'static,\n{\n    bounded_new(size)\n}\n\n/// Creates a bounded channel with a pair of async sender and blocking receiver.\n///\n/// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1.\n#[inline]\npub fn bounded_async_blocking<T>(size: usize) -> (MAsyncTx<Array<T>>, Rx<Array<T>>)\nwhere\n    T: 'static,\n{\n    bounded_new(size)\n}\n\n/// Flavor type for close notification, refer to [crate::null] for usage\npub type Null = FlavorWrap<crate::null::Null, RegistryDummy, RegistrySingle>;\n\nimpl Null {\n    #[inline(always)]\n    pub fn new_blocking(&self) -> (CloseHandle<Null>, Rx<Null>) {\n        new()\n    }\n\n    #[inline(always)]\n    pub fn new_async(self) -> (CloseHandle<Null>, AsyncRx<Null>) {\n        new()\n    }\n}\n"
  },
  {
    "path": "src/null.rs",
    "content": "//! A null flavor type that use to notify thread/future to close\n//!\n//! It's common practice we use `()` channel in async code, not intended for any message, just\n//! subscribe for close event. (For example, cancelling socket operations, stopping worker loops...)\n//! This is a module designed for that, with minimized polling cost.\n//!\n//! You can initialize a null channel with [crate::mpsc::Null::new_async()] or\n//! [crate::mpmc::Null::new_async()], which return a [CloseHandle], (which can only be `clone` or `drop`,\n//! but unable to send any message), and a normal receiver type (which recv method is always\n//! blocked until the all copy of `CloseHandle` is dropped).\n//!\n//! > NOTE: using mpsc version has less cost then mpmc version.\n//!\n//! # Examples\n//!\n//! Use null channel to stop a background loop.\n//!\n//! ```rust\n//! use crossfire::{null::CloseHandle, *};\n//! use std::time::Duration;\n//!\n//! # #[tokio::main]\n//! # async fn main() {\n//! // Create a null channel\n//! let (stop_tx, stop_rx): (CloseHandle<mpmc::Null>, MAsyncRx<mpmc::Null>)  = mpmc::Null::new().new_async();\n//! let (data_tx, data_rx): (MAsyncTx<mpmc::Array<i32>>, MAsyncRx<mpmc::Array<i32>>) = mpmc::bounded_async::<i32>(10);\n//!\n//! // Spawn a background task\n//! let task = tokio::spawn(async move {\n//!     loop {\n//!         tokio::select! {\n//!             // If the null channel is closed (stop_tx dropped), this branch will be selected\n//!             res = stop_rx.recv() => {\n//!                 if res.is_err() {\n//!                     println!(\"Stopping task\");\n//!                     break;\n//!                 }\n//!             }\n//!             res = data_rx.recv() => {\n//!                 match res {\n//!                     Ok(data) => println!(\"Received data: {}\", data),\n//!                     Err(_) => break,\n//!                 }\n//!             }\n//!         }\n//!     }\n//! });\n//!\n//! data_tx.send(1).await.unwrap();\n//! tokio::time::sleep(Duration::from_millis(10)).await;\n//!\n//! // Drop the stop handle to signal the task to stop\n//! drop(stop_tx);\n//!\n//! task.await.unwrap();\n//! # }\n//! ```\n\nuse crate::flavor::Flavor;\nuse crate::flavor::{FlavorImpl, FlavorNew, FlavorSelect, Queue, Token};\nuse crate::shared::ChannelShared;\nuse crate::SenderType;\nuse core::mem::MaybeUninit;\nuse std::sync::Arc;\n\n/// an flavor type can never receive any message\npub struct Null();\n\nimpl Queue for Null {\n    type Item = ();\n\n    #[inline(always)]\n    fn pop(&self) -> Option<()> {\n        None\n    }\n\n    #[inline(always)]\n    fn push(&self, _item: ()) -> Result<(), ()> {\n        unreachable!();\n    }\n\n    #[inline(always)]\n    fn len(&self) -> usize {\n        0\n    }\n\n    #[inline(always)]\n    fn capacity(&self) -> Option<usize> {\n        None\n    }\n\n    #[inline(always)]\n    fn is_full(&self) -> bool {\n        true\n    }\n\n    #[inline(always)]\n    fn is_empty(&self) -> bool {\n        true\n    }\n}\n\nimpl FlavorImpl for Null {\n    #[inline(always)]\n    fn try_send(&self, _item: &MaybeUninit<()>) -> bool {\n        // work as an /dev/null, although normally init with CloseHandle which don't have send() method\n        true\n    }\n\n    #[inline(always)]\n    fn try_send_oneshot(&self, _item: *const ()) -> Option<bool> {\n        Some(true)\n    }\n\n    #[inline(always)]\n    fn try_recv(&self) -> Option<Self::Item> {\n        // always empty\n        None\n    }\n\n    #[inline(always)]\n    fn try_recv_final(&self) -> Option<Self::Item> {\n        None\n    }\n\n    #[inline]\n    fn backoff_limit(&self) -> u16 {\n        0\n    }\n}\n\nimpl FlavorNew for Null {\n    #[inline]\n    fn new() -> Self {\n        Self()\n    }\n}\n\nimpl FlavorSelect for Null {\n    #[inline(always)]\n    fn try_select(&self, _final_check: bool) -> Option<Token> {\n        None\n    }\n\n    #[inline(always)]\n    fn read_with_token(&self, _token: Token) {\n        unreachable!();\n    }\n}\n\n/// The CloseHandle is a special type for flavor [Null], only impl `Clone` and `Drop`\npub struct CloseHandle<F: Flavor>(Arc<ChannelShared<F>>);\n\nimpl<F: Flavor> Clone for CloseHandle<F> {\n    #[inline(always)]\n    fn clone(&self) -> Self {\n        self.0.add_tx();\n        Self(self.0.clone())\n    }\n}\n\nimpl<F: Flavor> Drop for CloseHandle<F> {\n    #[inline(always)]\n    fn drop(&mut self) {\n        self.0.close_tx();\n    }\n}\n\nimpl<F: Flavor> SenderType for CloseHandle<F>\nwhere\n    F: Flavor<Item = ()>,\n{\n    type Flavor = F;\n\n    #[inline(always)]\n    fn new(shared: Arc<ChannelShared<Self::Flavor>>) -> Self {\n        CloseHandle(shared)\n    }\n}\n"
  },
  {
    "path": "src/oneshot.rs",
    "content": "//! OneShot channel support both thread and async\n//!\n//! NOTE: In order to reduce initialization and teardown cost, this module use specialized sender [TxOneshot] and\n//! receiver [RxOneshot] types.\n//!\n//! # Examples\n//!\n//! ## Thread Context\n//!\n//! ```\n//! use crossfire::oneshot::oneshot;\n//!\n//! let (tx, rx) = oneshot();\n//!\n//! std::thread::spawn(move || {\n//!     tx.send(\"Hello from sender!\");\n//! });\n//!\n//! let received = rx.recv().unwrap();\n//! assert_eq!(received, \"Hello from sender!\");\n//! ```\n//!\n//! ## Async Context\n//!\n//! ```\n//! use crossfire::oneshot::oneshot;\n//!\n//! async fn example() {\n//!     let (tx, rx) = oneshot();\n//!\n//!     tokio::spawn(async move {\n//!         tx.send(\"Hello from async sender!\");\n//!     });\n//!\n//!     let received = rx.await.unwrap();\n//!     assert_eq!(received, \"Hello from async sender!\");\n//! }\n//! ```\n\nuse crate::backoff::Backoff;\nuse crate::shared::*;\n#[allow(unused_imports)]\nuse crate::{tokio_task_id, trace_log};\nuse core::cell::UnsafeCell;\nuse std::future::Future;\nuse std::pin::Pin;\nuse std::ptr::NonNull;\nuse std::sync::atomic::{\n    fence, AtomicU8,\n    Ordering::{self, AcqRel, Acquire, SeqCst},\n};\nuse std::task::{Context, Poll};\nuse std::thread;\nuse std::time::{Duration, Instant};\n\n/// Send/TxOneshot::drop will set this flag once, never changed.\nconst LOCK_FLAG: u8 = 0x1;\n/// set by RxOneshot\nconst WAKER_SET_FLAG: u8 = 0x2;\n/// set by any of TxOneshot/RxOneshot if it exit\nconst CLOSE_FLAG: u8 = 0x4;\nconst EXIST_FLAG: u8 = 0x8;\n\nstruct OneShotInner<T> {\n    state: AtomicU8,\n    value: UnsafeCell<Option<T>>,\n    o_waker: UnsafeCell<Option<ThinWaker>>,\n}\n\nunsafe impl<T: Send> Send for OneShotInner<T> {}\nunsafe impl<T: Send> Sync for OneShotInner<T> {}\n\nimpl<T> OneShotInner<T> {\n    #[inline]\n    fn new() -> Box<Self> {\n        Box::new(Self {\n            value: UnsafeCell::new(None),\n            state: AtomicU8::new(0),\n            o_waker: UnsafeCell::new(None),\n        })\n    }\n\n    #[inline]\n    fn get_waker(&self) -> &mut Option<ThinWaker> {\n        unsafe { &mut *self.o_waker.get() }\n    }\n\n    #[inline(always)]\n    fn value_mut(&self) -> &mut Option<T> {\n        unsafe { &mut *self.value.get() }\n    }\n\n    #[inline(always)]\n    fn set_state(&self, flag: u8) -> u8 {\n        self.state.fetch_or(flag, Ordering::AcqRel)\n    }\n\n    #[inline(always)]\n    fn _try_recv(&self, order: Ordering) -> Result<u8, u8> {\n        let state = self.state.load(order);\n        if state & LOCK_FLAG > 0 {\n            Ok(state)\n        } else {\n            Err(state)\n        }\n    }\n\n    // NOTE: in order to avoid miri borrow checker, use raw ptr here\n    #[inline(always)]\n    fn _consume_value(p: NonNull<Self>, mut state: u8) -> Option<T> {\n        debug_assert!(\n            state & LOCK_FLAG > 0,\n            \"oneshot:({:?}) consume value unexpected {state}\",\n            tokio_task_id!()\n        );\n        let this = unsafe { p.as_ref() };\n        let item = if state & EXIST_FLAG > 0 { this.value_mut().take() } else { None };\n        loop {\n            if state & CLOSE_FLAG > 0 {\n                trace_log!(\n                    \"oneshot:({:?}) recv value={} & destroy\",\n                    tokio_task_id!(),\n                    item.is_some()\n                );\n                fence(Acquire);\n                let _ = unsafe { Box::from_raw(p.as_ptr()) };\n                // they close first\n                return item;\n            }\n            if let Err(s) = this.state.compare_exchange(state, CLOSE_FLAG | state, AcqRel, Acquire)\n            {\n                trace_log!(\n                    \"oneshot:({:?}) recv value={} {state} close retry\",\n                    tokio_task_id!(),\n                    item.is_some()\n                );\n                state = s;\n            } else {\n                trace_log!(\n                    \"oneshot:({:?}) recv value={} {state}\",\n                    tokio_task_id!(),\n                    item.is_some()\n                );\n                // we close first\n                return item;\n            }\n        }\n    }\n\n    /// return true to destroy\n    #[inline(always)]\n    fn _notify_rx(p: NonNull<Self>, exist: bool) -> bool {\n        let this = unsafe { p.as_ref() };\n        let mut old_state = 0;\n        let exist_flag: u8 = if exist { EXIST_FLAG } else { 0 };\n        loop {\n            let new_state = if old_state == 0 {\n                LOCK_FLAG | CLOSE_FLAG | exist_flag\n            } else if old_state == WAKER_SET_FLAG {\n                LOCK_FLAG | WAKER_SET_FLAG | exist_flag\n            } else if old_state & CLOSE_FLAG > 0 {\n                // WAKER_SET_FLAG | CLOSE_FLAG, or just CLOSE_FLAG\n                trace_log!(\"oneshot:({:?}) rx closed\", tokio_task_id!());\n                return true;\n            } else {\n                panic!(\"unexpected state {}\", old_state);\n            };\n            match this.state.compare_exchange_weak(old_state, new_state, AcqRel, Acquire) {\n                Ok(_) => {\n                    if old_state == 0 {\n                        trace_log!(\"oneshot:({:?}) send value\", tokio_task_id!());\n                        return false;\n                    } else {\n                        if let Some(waker) = this.get_waker().as_ref() {\n                            // the sender should never move the waker, because rx::poll will\n                            // validate it.\n                            trace_log!(\"oneshot:({:?}) wake rx\", tokio_task_id!());\n                            waker.wake_by_ref();\n                        } else {\n                            unreachable!();\n                        }\n                        if let Err(state) = this.state.compare_exchange(\n                            new_state,\n                            CLOSE_FLAG | LOCK_FLAG | exist_flag,\n                            AcqRel,\n                            Acquire,\n                        ) {\n                            // Safety: although we have no use for fail value other than debug log,\n                            // but consider use failure ordering Acquire instead of Relaxed for miri,\n                            // as a fence (stop the following from_raw to re-ordering).\n                            debug_assert!(state & CLOSE_FLAG > 0, \"unexpected state {state}\");\n                            trace_log!(\"oneshot:({:?}) rx closed {state}\", tokio_task_id!());\n                            return true;\n                        } else {\n                            // we close first, let rx do the cleanup\n                            return false;\n                        }\n                    }\n                }\n                Err(s) => {\n                    old_state = s;\n                }\n            }\n        }\n    }\n\n    #[inline(always)]\n    fn set_waker(&self, waker: ThinWaker) -> Result<(), u8> {\n        // thread context only need set waker once.\n        // NOTE we should guarantee waker not set twice\n        // (the recv_timeout API should not allow recv twice),\n        // it will complicate things (like async poll).\n        self.get_waker().replace(waker);\n        self.state.compare_exchange(0, WAKER_SET_FLAG, AcqRel, Acquire)?;\n        Ok(())\n    }\n\n    #[inline(always)]\n    fn cancel_waker(&self, abandon: bool) -> Result<(), u8> {\n        let new_state = if abandon { CLOSE_FLAG } else { 0 };\n        if let Err(state) = self.state.compare_exchange(WAKER_SET_FLAG, new_state, AcqRel, Acquire)\n        {\n            // expect LOCK_FLAG | CLOSE_FLAG, or LOCK_FLAG | WAKER_SET_FLAG\n            return Err(state);\n        } else {\n            Ok(())\n        }\n    }\n\n    #[inline(always)]\n    fn is_empty(&self) -> bool {\n        let state = self.state.load(Ordering::SeqCst);\n        state & EXIST_FLAG == 0\n    }\n}\n\n/// Sender for oneshot channel\npub struct TxOneshot<T>(NonNull<OneShotInner<T>>);\n\nunsafe impl<T> Send for TxOneshot<T> {}\nunsafe impl<T> Sync for TxOneshot<T> {}\n\nimpl<T> TxOneshot<T> {\n    /// Sending the item is one-time non-blocking behavior\n    #[inline]\n    pub fn send(self, item: T) {\n        unsafe { self.0.as_ref() }.value_mut().replace(item);\n        if OneShotInner::_notify_rx(self.0, true) {\n            // drop inner\n            let _ = unsafe { Box::from_raw(self.0.as_ptr()) };\n        }\n        std::mem::forget(self);\n    }\n\n    /// return true when RxOneshot is dropped\n    ///\n    /// # Safety\n    ///\n    /// This is not SeqCst, only Acquire, for sender we don't require to know immediately.\n    #[inline]\n    pub fn is_disconnected(&self) -> bool {\n        unsafe { self.0.as_ref() }.state.load(Acquire) & CLOSE_FLAG > 0\n    }\n}\n\nimpl<T> Drop for TxOneshot<T> {\n    #[inline]\n    fn drop(&mut self) {\n        if OneShotInner::_notify_rx(self.0, false) {\n            // drop inner\n            let _ = unsafe { Box::from_raw(self.0.as_ptr()) };\n        }\n    }\n}\n\n/// Receiver for oneshot channel\n#[must_use]\npub struct RxOneshot<T>(Option<NonNull<OneShotInner<T>>>);\n\nunsafe impl<T> Send for RxOneshot<T> {}\n\nimpl<T> Drop for RxOneshot<T> {\n    #[inline]\n    fn drop(&mut self) {\n        if let Some(p) = self.0.as_ref() {\n            let inner = unsafe { p.as_ref() };\n            let old_state = inner.set_state(CLOSE_FLAG);\n            if old_state & CLOSE_FLAG > 0 {\n                trace_log!(\"oneshot:({:?}) rx drop destroy, state={}\", tokio_task_id!(), old_state);\n                debug_assert_eq!(\n                    old_state & (!EXIST_FLAG),\n                    CLOSE_FLAG | LOCK_FLAG,\n                    \"unexpected state {old_state}\"\n                ); // tx drop\n                   // drop inner\n                let _ = unsafe { Box::from_raw(p.as_ptr()) };\n            } else {\n                // let tx do the cleanup\n                trace_log!(\"oneshot:({:?}) rx drop, state={}\", tokio_task_id!(), old_state);\n                debug_assert!(\n                    old_state == 0 // we drop first, tx not trigger\n                        || old_state == WAKER_SET_FLAG // rx.await cancel, or rx.recv_timeout() timeout\n                        || old_state | EXIST_FLAG== (EXIST_FLAG | LOCK_FLAG | WAKER_SET_FLAG), // tx waking while rx.await cancel, or rx.recv_timeout() timeout\n                    \"oneshot:({:?}) rx drop, unexpected state={}\",\n                    tokio_task_id!(),\n                    old_state\n                );\n            }\n        }\n    }\n}\n\nimpl<T> RxOneshot<T> {\n    /// NOTE: this will blocking current thread\n    #[inline]\n    pub fn recv(self) -> Result<T, RecvError> {\n        if let Ok(item) = self._recv_blocking(None) {\n            return Ok(item);\n        }\n        Err(RecvError)\n    }\n\n    /// NOTE: this will blocking current thread with a timeout\n    #[inline]\n    pub fn recv_timeout(self, timeout: Duration) -> Result<T, RecvTimeoutError> {\n        let deadline = Instant::now() + timeout;\n        match self._recv_blocking(Some(deadline)) {\n            Ok(item) => Ok(item),\n            Err(true) => Err(RecvTimeoutError::Timeout),\n            Err(false) => Err(RecvTimeoutError::Disconnected),\n        }\n    }\n\n    #[inline(always)]\n    pub fn is_empty(&self) -> bool {\n        if let Some(p) = self.0.as_ref() {\n            let inner = unsafe { p.as_ref() };\n            inner.is_empty()\n        } else {\n            true\n        }\n    }\n\n    #[inline]\n    pub fn try_recv(&mut self) -> Result<T, TryRecvError> {\n        if let Some(p) = self.0.as_ref() {\n            let p = *p;\n            if let Ok(state) = unsafe { p.as_ref() }._try_recv(Acquire) {\n                self.0 = None;\n                if let Some(item) = OneShotInner::_consume_value(p, state) {\n                    return Ok(item);\n                } else {\n                    return Err(TryRecvError::Disconnected);\n                }\n            } else {\n                Err(TryRecvError::Empty)\n            }\n        } else {\n            Err(TryRecvError::Disconnected)\n        }\n    }\n\n    #[inline]\n    pub async fn recv_async(self) -> Result<T, RecvError> {\n        self.await\n    }\n\n    #[inline]\n    fn poll(&mut self, ctx: &mut Context<'_>) -> Poll<Result<T, ()>> {\n        let p: NonNull<OneShotInner<T>> = if let Some(p) = self.0.as_ref() {\n            *p\n        } else {\n            // might poll after try_recv() finish\n            return Poll::Ready(Err(()));\n        };\n        let inner = unsafe { p.as_ref() };\n        macro_rules! process {\n            ($state: expr) => {\n                self.0 = None;\n                if let Some(item) = OneShotInner::_consume_value(p, $state) {\n                    return Poll::Ready(Ok(item));\n                } else {\n                    return Poll::Ready(Err(()));\n                }\n            };\n        }\n        macro_rules! check_exist {\n            ($order: expr) => {{\n                match inner._try_recv($order) {\n                    Ok(state) => {\n                        process!(state);\n                    }\n                    Err(s) => s,\n                }\n            }};\n        }\n        let state = check_exist!(SeqCst);\n        if state & WAKER_SET_FLAG > 0 {\n            let waker = inner.get_waker().as_ref().unwrap();\n            if waker.will_wake(ctx) {\n                trace_log!(\"oneshot:({:?}) spurious waked state {}\", tokio_task_id!(), state,);\n                return Poll::Pending;\n            }\n            if let Err(state) = inner.cancel_waker(false) {\n                process!(state);\n            }\n        }\n        if let Err(state) = inner.set_waker(ThinWaker::Async(ctx.waker().clone())) {\n            process!(state);\n        }\n        Poll::Pending\n    }\n\n    /// On Disconnected return Err(false),\n    /// Err(true) when timeout.\n    #[inline(always)]\n    pub(crate) fn _recv_blocking(self, deadline: Option<Instant>) -> Result<T, bool> {\n        let p: NonNull<OneShotInner<T>> = if let Some(p) = self.0.as_ref() {\n            *p\n        } else {\n            // might recv() after try_recv() ok/disconnect\n            return Err(false);\n        };\n        let inner = unsafe { p.as_ref() };\n        macro_rules! process {\n            ($state: expr) => {\n                let _ = inner;\n                std::mem::forget(self);\n                if let Some(item) = OneShotInner::_consume_value(p, $state) {\n                    return Ok(item);\n                } else {\n                    return Err(false);\n                }\n            };\n        }\n        macro_rules! try_recv {\n            ($order: expr) => {\n                if let Ok(state) = inner._try_recv($order) {\n                    trace_log!(\"try_recv got {state}\");\n                    process!(state);\n                }\n            };\n        }\n        try_recv!(Acquire);\n        let mut backoff = Backoff::new();\n        while !backoff.snooze() {\n            try_recv!(Acquire);\n        }\n        if let Err(state) = inner.set_waker(ThinWaker::Blocking(thread::current())) {\n            process!(state);\n        }\n        trace_log!(\"oneshot: waker set\");\n        loop {\n            try_recv!(SeqCst);\n            match check_timeout(deadline) {\n                Ok(None) => {\n                    std::thread::park();\n                }\n                Ok(Some(dur)) => {\n                    std::thread::park_timeout(dur);\n                }\n                Err(_) => {\n                    trace_log!(\"oneshot: to cancel_waker on timeout\");\n                    if let Err(state) = inner.cancel_waker(true) {\n                        process!(state);\n                    } else {\n                        let _ = inner;\n                        // we close first\n                        std::mem::forget(self);\n                        return Err(true);\n                    }\n                }\n            }\n        }\n    }\n\n    /// Wrap RxOneshot with timeout, consume self when it's done.\n    /// The Future returns `Result<T, RecvTimeoutError>`\n    #[cfg(any(feature = \"tokio\", feature = \"async_std\"))]\n    #[cfg_attr(docsrs, doc(cfg(any(feature = \"tokio\", feature = \"async_std\"))))]\n    #[inline]\n    pub async fn recv_async_timeout(\n        self, timeout: std::time::Duration,\n    ) -> Result<T, RecvTimeoutError> {\n        #[cfg(feature = \"tokio\")]\n        {\n            let sleep = tokio::time::sleep(timeout);\n            self.recv_async_with_timer(sleep).await\n        }\n        #[cfg(feature = \"async_std\")]\n        {\n            let sleep = async_std::task::sleep(timeout);\n            self.recv_async_with_timer(sleep).await\n        }\n    }\n\n    /// Wrap RxOneshot with custom sleep function, consume self when it's done.\n    ///\n    /// The behavior is atomic: the message is either received successfully or the operation is canceled due to a timeout.\n    ///\n    /// Returns `Ok(T)` when successful.\n    ///\n    /// Returns Err([RecvTimeoutError::Timeout]) when a message could not be received because the channel is empty and the operation timed out.\n    ///\n    /// Returns Err([RecvTimeoutError::Disconnected]) if the sender has been dropped and the channel is empty.\n    ///\n    /// # Argument:\n    ///\n    /// * `sleep`: The sleep function. the return value of `sleep` is ignore. We add generic `R` just in order to support smol::Timer\n    /// # Example\n    ///\n    /// Example with smol\n    ///\n    /// ```rust\n    /// extern crate smol;\n    /// use std::time::Duration;\n    /// use crossfire::*;\n    /// async fn foo() {\n    ///     let (tx, rx) = oneshot::oneshot::<usize>();\n    ///     match rx.recv_async_with_timer(smol::Timer::after(Duration::from_secs(1))).await {\n    ///         Ok(_item)=>{\n    ///             println!(\"message recv\");\n    ///         }\n    ///         Err(RecvTimeoutError::Timeout)=>{\n    ///             println!(\"timeout\");\n    ///         }\n    ///         Err(RecvTimeoutError::Disconnected)=>{\n    ///             println!(\"sender-side closed\");\n    ///         }\n    ///     }\n    /// }\n    /// ```\n    ///\n    /// Example with tokio:\n    ///\n    /// ```rust\n    /// use std::time::Duration;\n    /// use crossfire::*;\n    /// async fn foo() {\n    ///     let (tx, rx) = oneshot::oneshot::<usize>();\n    ///     let sleep = tokio::time::sleep(Duration::from_secs(1));\n    ///     let _r = rx.recv_async_with_timer(sleep).await;\n    /// }\n    /// ```\n    #[inline]\n    pub fn recv_async_with_timer<F, R>(self, sleep: F) -> OneshotTimeoutFuture<T, F, R>\n    where\n        F: Future<Output = R>,\n    {\n        OneshotTimeoutFuture { rx: self, sleep }\n    }\n}\n\nimpl<T> Future for RxOneshot<T> {\n    type Output = Result<T, RecvError>;\n\n    #[inline]\n    fn poll(self: Pin<&mut Self>, ctx: &mut Context) -> Poll<Self::Output> {\n        let this = self.get_mut();\n        match this.poll(ctx) {\n            Poll::Ready(Ok(item)) => Poll::Ready(Ok(item)),\n            Poll::Ready(Err(())) => Poll::Ready(Err(RecvError)),\n            Poll::Pending => Poll::Pending,\n        }\n    }\n}\n\npub struct OneshotTimeoutFuture<T, F, R>\nwhere\n    F: Future<Output = R>,\n{\n    rx: RxOneshot<T>,\n    sleep: F,\n}\n\nimpl<T, F, R> Future for OneshotTimeoutFuture<T, F, R>\nwhere\n    F: Future<Output = R>,\n{\n    type Output = Result<T, RecvTimeoutError>;\n\n    #[inline]\n    fn poll(self: Pin<&mut Self>, ctx: &mut Context) -> Poll<Self::Output> {\n        // NOTE: we can use unchecked to bypass pin because we are not movig \"sleep\",\n        // neither it's exposed outside\n        let this = unsafe { self.get_unchecked_mut() };\n        match this.rx.poll(ctx) {\n            Poll::Ready(Ok(item)) => return Poll::Ready(Ok(item)),\n            Poll::Ready(Err(())) => return Poll::Ready(Err(RecvTimeoutError::Disconnected)),\n            _ => {}\n        }\n        let sleep = unsafe { Pin::new_unchecked(&mut this.sleep) };\n        if sleep.poll(ctx).is_ready() {\n            Poll::Ready(Err(RecvTimeoutError::Timeout))\n        } else {\n            Poll::Pending\n        }\n    }\n}\n\n#[inline]\npub fn oneshot<T>() -> (TxOneshot<T>, RxOneshot<T>) {\n    let p = unsafe { NonNull::new_unchecked(Box::into_raw(OneShotInner::new())) };\n    let tx = TxOneshot(p);\n    let rx = RxOneshot(Some(p));\n    (tx, rx)\n}\n"
  },
  {
    "path": "src/select/mod.rs",
    "content": "//! # Selection between channels\n//!\n//! This module provides:\n//! - [Select]: Allows selecting from multiple borrowed receiver references,\n//!   which is a type-erased interface similar to the select in crossbeam-channel, supporting both `mpmc`, `mpsc`, and `spsc` channels.\n//! - [Multiplex]: Owns and reads from multiple channels as a non-concurrent consumer, mainly for `spsc`, `mpsc`.\n//!\n//! Performance:  dedicated channel > multiplex > select\n\n#[allow(clippy::module_inception)]\npub(crate) mod select;\npub use select::{Select, SelectResult};\n#[allow(private_interfaces)]\nmod multiplex;\npub use multiplex::{Multiplex, Mux};\n\n#[derive(PartialEq, Debug, Clone, Copy)]\n#[repr(u8)]\npub enum SelectMode {\n    RR,\n    Rand,\n    Bias,\n}\n"
  },
  {
    "path": "src/select/multiplex.rs",
    "content": "use crate::backoff::*;\nuse crate::flavor::{Flavor, FlavorBounded, FlavorImpl, FlavorNew, FlavorWrap};\nuse crate::shared::{check_timeout, ChannelShared};\nuse crate::waker::WakerState;\nuse crate::waker_registry::{RegistrySend, SelectWaker, SelectWakerWrapper};\nuse crate::BlockingRxTrait;\nuse crate::SenderType;\nuse crate::{RecvError, RecvTimeoutError, TryRecvError};\nuse std::cell::Cell;\nuse std::fmt;\nuse std::sync::atomic::Ordering;\nuse std::sync::Arc;\nuse std::thread;\nuse std::time::{Duration, Instant};\n\npub const DEFAULT_WEIGHT: u32 = 128;\n\n/// Type alias for multiplexed channel flavor\npub type Mux<F> = FlavorWrap<F, <F as Flavor>::Send, SelectWakerWrapper>;\n\n/// A multiplexer that owns multi channel receivers of the same Flavor type.\n///\n/// Unlike select, it focus on round-robin mode, allow to specified weight on each channel.\n/// It maintains a count of message received for each channel.\n/// That means if the last message recv on the `idx` channel, it will keep trying the same channel\n/// until the number equals to weight has been received. If the channel is empty, it will try the\n/// next one without touching the count. This strategy improves the hit rate of cpu cache and ensures no starvation.\n///\n/// NOTE: The default weight is 128. (When the weight of all channel set to 1, the performance is\n/// the worst because of cpu cache thrashing)\n///\n/// ## Capability and limitation:\n/// - New channel may be added on the fly\n/// - This abstraction is only designed for stable channels for most efficient select.\n/// - If channel close by sender, the receiver will be automatically close inside the Multiplex,\n///   user will not be notify until all its channels closed.\n/// - Due to it binds on Flavor interface, it cannot be use between different type.\n///   If you want to multiplex between list and array, can use the\n///   [CompatFlavor](crate::compat::CompatFlavor)\n/// - **NOTE** : It has internal mutability because it need to impl [BlockingRxTrait](crate::BlockingRxTrait),\n///   the adding channel process remains `&mut self`. Because `Multiplex` is a single consumer just\n///   like [Rx](crate::Rx), it does not have `Sync`. If you can guarantee no concurrent access you\n///   can manutally add the `Sync` back in parent struct.\n///\n///\n/// # Examples\n///\n/// Basic usage with multiple senders:\n///\n/// ```\n/// use crossfire::{mpsc::Array, MTx, select::{Multiplex, Mux}};\n/// use std::thread;\n///\n/// // Create a multiplexer with Array flavor\n/// let mut mp = Multiplex::<Array<i32>>::new();\n///\n/// // Create multiple senders through the multiplexer\n/// let tx1: MTx<Mux<Array<i32>>> = mp.bounded_tx(10);\n/// let tx2: MTx<Mux<Array<i32>>> = mp.bounded_tx(10);\n///\n/// // Send values from different threads\n/// let h1 = thread::spawn(move || {\n///     tx1.send(1).unwrap();\n/// });\n/// let h2 = thread::spawn(move || {\n///     tx2.send(2).unwrap();\n/// });\n///\n/// // Receive values through the multiplexer (order may vary)\n/// let val1 = mp.recv().unwrap();\n/// let val2 = mp.recv().unwrap();\n///\n/// h1.join().unwrap();\n/// h2.join().unwrap();\n/// ```\npub struct Multiplex<F: Flavor> {\n    waker: Arc<SelectWaker>,\n    handlers: Vec<MultiplexHandle<F>>,\n    last_idx: Cell<usize>,\n    count: Cell<u32>,\n}\n\nunsafe impl<F: Flavor> Send for Multiplex<F> {}\n\nstruct MultiplexHandle<F: Flavor> {\n    shared: Arc<ChannelShared<Mux<F>>>,\n    weight: u32,\n}\n\nimpl<F: Flavor> Multiplex<F> {\n    /// Initialize Select with fair, round-robin strategy\n    pub fn new() -> Self {\n        Self {\n            waker: Arc::new(SelectWaker::new()),\n            handlers: Vec::with_capacity(10),\n            count: Cell::new(0),\n            last_idx: Cell::new(0),\n        }\n    }\n\n    #[inline]\n    fn _add_item(&mut self, flavor: F, weight: u32) -> Arc<ChannelShared<Mux<F>>> {\n        self.waker.add_opened();\n        let recvs = self.waker.clone().to_wrapper(self.handlers.len());\n        let shared = ChannelShared::new(Mux::<F>::from_inner(flavor), F::Send::new(), recvs);\n        self.handlers.push(MultiplexHandle { shared: shared.clone(), weight: weight - 1 });\n        self.last_idx.set(self.handlers.len() - 1);\n        shared\n    }\n\n    /// Add a new channels with a new() method to multiplex, return its sender.\n    ///\n    /// # Type Parameters\n    ///\n    /// * `S`: The sender type that implements SenderType with the appropriate Flavor,\n    ///   may be async or blocking sender, MP or SP that match the `Flavor` type.\n    ///\n    /// # Note\n    ///\n    /// This method is only available for flavors that implement `FlavorNew` trait,\n    /// such as `List` / `One` flavor. For flavors like Array that don't implement `FlavorNew`,\n    /// use `bounded_tx` instead.\n    ///\n    /// # Example\n    ///\n    /// with mpsc::List (which sender type is [MTx](crate::MTx) and allow to clone)\n    ///\n    /// ```\n    /// use crossfire::{mpsc::List, MTx, select::{Multiplex, Mux}};\n    /// use tokio;\n    ///\n    /// let mut mp = Multiplex::<List<i32>>::new();\n    /// let tx1: MTx<Mux<List<i32>>> = mp.new_tx();\n    /// let tx2: MTx<Mux<List<i32>>> = mp.new_tx();\n    /// tx1.send(42).expect(\"send\");\n    /// tx2.send(42).expect(\"send\");\n    /// let value = mp.recv().unwrap();\n    /// assert_eq!(value, 42);\n    /// let value = mp.recv().unwrap();\n    /// assert_eq!(value, 42);\n    /// ```\n    ///\n    /// with spsc::One (which sender type is [Tx](crate::Tx) and not cloneable)\n    /// ```\n    /// use crossfire::{spsc::One, Tx, select::{Multiplex, Mux}};\n    /// use tokio;\n    ///\n    /// let mut mp = Multiplex::<One<i32>>::new();\n    /// // Creates an size-1 channel\n    /// let tx1: Tx<Mux<One<i32>>> = mp.new_tx();\n    /// // Creates another size-1 channel\n    /// let tx2: Tx<Mux<One<i32>>> = mp.new_tx();\n    /// std::thread::spawn(move ||{\n    ///     tx2.send(42).expect(\"send\");\n    /// });\n    /// let value = mp.recv().unwrap();\n    /// assert_eq!(value, 42);\n    /// ```\n    pub fn new_tx<S>(&mut self) -> S\n    where\n        F: FlavorNew,\n        S: SenderType<Flavor = Mux<F>>,\n    {\n        let shared = self._add_item(F::new(), DEFAULT_WEIGHT);\n        S::new(shared)\n    }\n\n    /// Add a channel of flavor (impl FlavorNew), with custom weight instead of default\n    /// (the default weight is 128)\n    pub fn new_tx_with_weight<S>(&mut self, weight: u32) -> S\n    where\n        F: FlavorNew,\n        S: SenderType<Flavor = Mux<F>>,\n    {\n        let shared = self._add_item(F::new(), weight);\n        S::new(shared)\n    }\n\n    /// Creates a new bounded sender for the multiplexer\n    ///\n    /// # Arguments\n    ///\n    /// * `size` - The maximum capacity of the channel\n    ///\n    /// # Type Parameters\n    ///\n    /// * `S` - The sender type that implements SenderType with the appropriate Flavor\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// use crossfire::{mpsc::Array, *, select::{Multiplex, Mux}};\n    ///\n    /// let mut mp = Multiplex::<Array<i32>>::new();\n    /// // Creates a bounded channel with capacity 10\n    /// let tx1: MTx<Mux<Array<i32>>> = mp.bounded_tx(10);\n    /// // Creates another bounded channel with capacity 20\n    /// let tx2: MTx<Mux<Array<i32>>> = mp.bounded_tx(20);\n    /// tx1.send(42).expect(\"send\");\n    /// std::thread::spawn(move || {\n    ///     tx2.send(42).expect(\"send\");\n    /// });\n    /// let value = mp.recv().unwrap();\n    /// assert_eq!(value, 42);\n    /// let value = mp.recv().unwrap();\n    /// assert_eq!(value, 42);\n    /// ```\n    pub fn bounded_tx<S>(&mut self, size: usize) -> S\n    where\n        F: FlavorBounded,\n        S: SenderType<Flavor = Mux<F>>,\n    {\n        let shared = self._add_item(F::new_with_bound(size), DEFAULT_WEIGHT);\n        S::new(shared)\n    }\n\n    /// Add a bounded channel to the multiplex, with custom weight (the default is 128)\n    pub fn bounded_tx_with_weight<S>(&mut self, size: usize, weight: u32) -> S\n    where\n        F: FlavorBounded,\n        S: SenderType<Flavor = Mux<F>>,\n    {\n        let shared = self._add_item(F::new_with_bound(size), weight);\n        S::new(shared)\n    }\n\n    /// Attempts to receive a message from any of the multiplexed channels without blocking.\n    ///\n    /// Returns `Ok(item)` if a message is available on any of the channels.\n    /// Returns `Err(TryRecvError::Empty)` if no messages are available.\n    /// Returns `Err(TryRecvError::Disconnected)` if all senders have been dropped.\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// use crossfire::{mpsc::Array, select::{Multiplex, Mux}, MTx, TryRecvError};\n    ///\n    /// let mut mp = Multiplex::<Array<i32>>::new();\n    /// let tx1: MTx<Mux<_>> = mp.bounded_tx(10);\n    /// let _tx2: MTx<Mux<_>> = mp.bounded_tx(10);\n    /// // No message available yet\n    /// assert_eq!(mp.try_recv(), Err(TryRecvError::Empty));\n    /// tx1.send(42).unwrap();\n    /// // Now a message is available\n    /// assert_eq!(mp.try_recv(), Ok(42));\n    /// ```\n    #[inline]\n    pub fn try_recv(&self) -> Result<F::Item, TryRecvError> {\n        let last_idx = self.last_idx.get();\n        if let Some(item) = self._try_select_all::<true>(last_idx, self.handlers.len()) {\n            return Ok(item);\n        }\n        if self.waker.get_opened_count() == 0 {\n            return Err(TryRecvError::Disconnected);\n        }\n        Err(TryRecvError::Empty)\n    }\n\n    /// Receives a message from any of the multiplexed channels, blocking if necessary.\n    ///\n    /// This method will block the current thread until a message is available on any of the channels,\n    /// or until all senders are dropped.\n    #[inline]\n    pub fn recv(&self) -> Result<F::Item, RecvError> {\n        match self._recv_blocking(None) {\n            Ok(item) => Ok(item),\n            Err(_) => Err(RecvError),\n        }\n    }\n\n    /// Receives a message from any of the multiplexed channels with a timeout.\n    /// Will block when channel is empty.\n    ///\n    /// The behavior is atomic: the message is either received successfully or the operation is canceled due to a timeout.\n    ///\n    /// Returns `Ok(T)` when successful.\n    ///\n    /// Returns Err([RecvTimeoutError::Timeout]) when a message could not be received because the channel is empty and the operation timed out.\n    ///\n    /// Returns Err([RecvTimeoutError::Disconnected]) if the sender has been dropped and the channel is empty.\n    #[inline]\n    pub fn recv_timeout(&self, timeout: Duration) -> Result<F::Item, RecvTimeoutError> {\n        match Instant::now().checked_add(timeout) {\n            Some(deadline) => match self._recv_blocking(Some(deadline)) {\n                Ok(item) => Ok(item),\n                Err(true) => Err(RecvTimeoutError::Disconnected),\n                Err(false) => Err(RecvTimeoutError::Timeout),\n            },\n            None => self.try_recv().map_err(|e| match e {\n                TryRecvError::Disconnected => RecvTimeoutError::Disconnected,\n                TryRecvError::Empty => RecvTimeoutError::Timeout,\n            }),\n        }\n    }\n\n    /// NOTE: be aware that _try_recv_cached does not guarantee all message will be receive,\n    /// should retry again\n    #[inline(always)]\n    fn _try_select_cached<const FINAL: bool>(&self) -> Result<F::Item, usize> {\n        let last_idx = self.last_idx.get();\n        let handle = unsafe { self.handlers.get_unchecked(last_idx) };\n        let count = self.count.get();\n        let loop_count = if count > 0 {\n            if let Some(msg) = handle.shared.inner.try_recv_cached() {\n                handle.shared.on_recv();\n                self.count.set(count - 1);\n                return Ok(msg);\n            }\n            self.handlers.len() - 1\n        } else {\n            self.handlers.len()\n        };\n        if let Some(item) = self._try_select_all::<FINAL>(last_idx, loop_count) {\n            return Ok(item);\n        }\n        Err(last_idx)\n    }\n\n    #[inline(always)]\n    fn _try_select_all<const FINAL: bool>(\n        &self, mut idx: usize, loop_count: usize,\n    ) -> Option<F::Item> {\n        let len = self.handlers.len();\n        for _ in 0..loop_count {\n            idx = if idx + 1 >= len { 0 } else { idx + 1 };\n            let handle = unsafe { self.handlers.get_unchecked(idx) };\n            if let Some(msg) = if FINAL {\n                handle.shared.inner.try_recv_final()\n            } else {\n                handle.shared.inner.try_recv()\n            } {\n                handle.shared.on_recv();\n                self.count.set(handle.weight);\n                self.last_idx.set(idx);\n                return Some(msg);\n            }\n        }\n        None\n    }\n\n    /// Internal method to perform blocking receive with optional timeout\n    ///\n    /// # Parameters\n    ///\n    /// * `deadline` - Optional deadline for the operation; if None, blocks indefinitely\n    ///\n    /// # Returns\n    ///\n    /// Returns `Ok(item)` on successful receive, `Err(true)` if disconnected, `Err(false)` if timed out\n    #[inline]\n    fn _recv_blocking(&self, deadline: Option<Instant>) -> Result<F::Item, bool> {\n        let mut start_idx;\n        match self._try_select_cached::<false>() {\n            Ok(item) => return Ok(item),\n            Err(idx) => {\n                start_idx = idx;\n            }\n        }\n        let mut backoff = Backoff::from(BackoffConfig::detect());\n        backoff.snooze();\n        let len = self.handlers.len();\n        loop {\n            loop {\n                if let Some(item) = self._try_select_all::<false>(start_idx, len) {\n                    return Ok(item);\n                }\n                if backoff.snooze() {\n                    break;\n                }\n            }\n            // TODO For thread, actually the waker can be reuse and not change\n            self.waker.init_blocking();\n            let closing = self.waker.get_opened_count() == 0;\n            if let Some(item) = self._try_select_all::<true>(start_idx, len) {\n                return Ok(item);\n            }\n            if closing {\n                // NOTE: double check the channels after checking close count, otherwise we will be\n                // missing some last messages\n                return Err(true);\n            }\n            let mut state = WakerState::Init as u8;\n            while state < WakerState::Woken as u8 {\n                match check_timeout(deadline) {\n                    Ok(None) => {\n                        thread::park();\n                    }\n                    Ok(Some(dur)) => {\n                        thread::park_timeout(dur);\n                    }\n                    Err(_) => {\n                        // As sc don't need to abandon\n                        return Err(false);\n                    }\n                }\n                state = self.waker.get_waker_state(Ordering::SeqCst);\n            }\n            backoff.reset();\n            start_idx = self.waker.get_hint();\n        }\n    }\n}\n\nimpl<F: Flavor> Drop for Multiplex<F> {\n    #[inline]\n    fn drop(&mut self) {\n        for handle in &self.handlers {\n            handle.shared.close_rx();\n        }\n    }\n}\n\nimpl<F: Flavor> fmt::Debug for Multiplex<F> {\n    #[inline]\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        write!(f, \"Multiplex<{}>\", std::any::type_name::<F>())\n    }\n}\n\nimpl<F: Flavor> fmt::Display for Multiplex<F> {\n    #[inline]\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        fmt::Debug::fmt(self, f)\n    }\n}\n\nimpl<F: Flavor> BlockingRxTrait<F::Item> for Multiplex<F>\nwhere\n    F::Item: Send + 'static,\n{\n    #[inline(always)]\n    fn recv(&self) -> Result<F::Item, RecvError> {\n        Self::recv(self)\n    }\n\n    #[inline(always)]\n    fn try_recv(&self) -> Result<F::Item, TryRecvError> {\n        Self::try_recv(self)\n    }\n\n    #[inline(always)]\n    fn recv_timeout(&self, timeout: Duration) -> Result<F::Item, RecvTimeoutError> {\n        Self::recv_timeout(self, timeout)\n    }\n\n    /// The number of messages in the channel at the moment\n    #[inline(always)]\n    fn len(&self) -> usize {\n        0\n    }\n\n    /// always return None\n    #[inline(always)]\n    fn capacity(&self) -> Option<usize> {\n        None\n    }\n\n    /// Returns true when all the channel's empty\n    #[inline(always)]\n    fn is_empty(&self) -> bool {\n        for handle in &self.handlers {\n            if !handle.shared.is_empty() {\n                return false;\n            }\n        }\n        true\n    }\n\n    /// Not practical to impl\n    #[inline(always)]\n    fn is_full(&self) -> bool {\n        false\n    }\n\n    /// Return true if all sender has been close\n    #[inline(always)]\n    fn is_disconnected(&self) -> bool {\n        self.get_tx_count() == 0\n    }\n\n    /// NOTE: it does not count all the clones to the senders\n    #[inline(always)]\n    fn get_tx_count(&self) -> usize {\n        self.waker.get_opened_count()\n    }\n\n    /// This is single consumer\n    #[inline(always)]\n    fn get_rx_count(&self) -> usize {\n        1\n    }\n\n    fn get_wakers_count(&self) -> (usize, usize) {\n        (0, 0)\n    }\n\n    fn clone_to_vec(self, _count: usize) -> Vec<Self> {\n        unimplemented!();\n    }\n}\n"
  },
  {
    "path": "src/select/select.rs",
    "content": "// Internal Implementation Details:\n//\n// Since mixing send and receive operations is rare, and the waker types for senders and receivers\n// are different, we only implement `select` for receive operations.\n//\n// In `shared.rs`, `SelectHandle` is implemented for `ChannelShare<F>`\n//\n// ## SelectWaker\n//\n// `SelectWaker` is wrapped in an `Arc<SelectWaker>`, holding the actual waker\n//\n// ### RegistryMultiRecv\n// - Requires `reg_waker()` to be called only once, so the `registered` flag is saved as `true`.\n// - Provides `cancel_waker()`.\n// - `RegistryMultiInner` maintains a `Vec<(channel_id, Arc<SelectWaker>)>`.\n//   It does not remove the waker after waking it up.\n// - When waking up `SelectWaker`, it saves its own `channel_id` into the `SelectWaker`'s hint.\n// - The `is_empty` flag in `RegistryMulti` can be extended from `bool` to `u8` to represent three states:\n//   `empty`, `has select`, and `without select`.\n//\n// ### RegistrySingle\n// - Needs to re-register in every select loop, so `RecvHandle` saves `registered` as `false`.\n// - `cancel_waker` is an empty implementation.\n// - During registration, it clones the `ArcWaker` (generated at the start of the select flow inside `Arc<SelectWaker>`)\n//   into `RegistrySingle`. A new method can be added to abstract this process.\n//\n// ### Select::drop\n// - Unregister using `cancel_waker()` for all handles.\n//\n// ## Safety and Validation\n// - `SelectResult` is returned to the user and contains a pointer of receiver to the slot.\n// - If the user incorrectly uses a `SelectResult` from one channel on a different receiver,\n//   this pointer address is checked, causing a panic to ensure safety.\n\nuse super::SelectMode;\nuse crate::backoff::*;\nuse crate::flavor::Token;\nuse crate::shared::{check_timeout, ChannelShared};\nuse crate::trace_log;\nuse crate::waker::WakerState;\nuse crate::waker_registry::SelectWaker;\nuse crate::ReceiverType;\nuse crate::{RecvError, RecvTimeoutError, TryRecvError};\nuse smallvec::SmallVec;\nuse std::collections::hash_map::DefaultHasher;\nuse std::fmt;\nuse std::hash::{Hash, Hasher};\nuse std::ops::Add;\nuse std::sync::{atomic::Ordering, Arc};\nuse std::thread;\nuse std::time::{Duration, Instant};\n\n/// The select interface only support select from receivers.\n///\n/// - The user add receivers for subscription.\n/// - call [Select::select] or [Select::select_timeout] and get [SelectResult]\n/// - Use [read_select](crate::Rx::read_select) to handle [SelectResult]. (**Safety**: If `SelectResult`\n///   dropped without processed, will result in message leak/hang.)\n/// - Although the `Select` object has a lifecycle and should live inside a function scope, it can be reused in a loop.\n/// - On drop it will automatically cancel all registration.\n///\n/// ## Example\n///\n/// ```rust\n/// use crossfire::{mpmc, mpsc, RecvError};\n/// use crossfire::select::Select;\n///\n/// let (tx1, rx1) = mpmc::bounded_blocking::<i32>(10);\n/// let (tx2, rx2) = mpsc::bounded_blocking::<i32>(10);\n///\n/// // Send some messages\n/// tx1.send(100).unwrap();\n/// tx2.send(200).unwrap();\n///\n/// // Drop senders to simulate disconnection after messages are sent\n/// drop(tx1);\n/// drop(tx2);\n///\n/// let mut select = Select::new();\n/// select.add(&rx1);\n/// select.add(&rx2);\n///\n/// // Loop until all channels are disconnected and removed from select\n/// loop {\n///     // When `select()` returns `Err(RecvError)`, it means all channels\n///     // previously added to `select` have been disconnected or removed.\n///     // In such a case, there's nothing left to select from, so we break.\n///     let res = match select.select() {\n///         Ok(res) => res,\n///         Err(RecvError) => {\n///             println!(\"All channels disconnected or removed from select. Breaking loop.\");\n///             break;\n///         },\n///     };\n///\n///     // Handle the result from the ready receiver\n///     if res == rx1 {\n///         match rx1.read_select(res) {\n///             Ok(val) => println!(\"Received from rx1: {}\", val),\n///             Err(RecvError) => { // Now RecvError\n///                 println!(\"rx1 disconnected, removing from select.\");\n///                 select.remove(&rx1); // Remove disconnected receiver\n///             },\n///         }\n///     } else if res == rx2 {\n///         match rx2.read_select(res) {\n///             Ok(val) => println!(\"Received from rx2: {}\", val),\n///             Err(RecvError) => { // Now RecvError\n///                 println!(\"rx2 disconnected, removing from select.\");\n///                 select.remove(&rx2); // Remove disconnected receiver\n///             },\n///         }\n///     }\n/// }\n/// ```\npub struct Select<'a> {\n    handlers: SmallVec<[RecvHandle<'a>; 32]>,\n    waker: Arc<SelectWaker>,\n    mode: SelectMode,\n    next_index: usize,\n    rng: u64,\n}\n\nimpl<'a> Select<'a> {\n    /// Initialize Select with fair, round-robin strategy\n    pub fn new() -> Self {\n        Self::new_with(SelectMode::RR)\n    }\n\n    /// Initialize Select with fair strategy (check start from random channel)\n    #[inline]\n    pub fn new_random() -> Self {\n        Self::new_with(SelectMode::Rand)\n    }\n\n    /// Initialize Select with bias strategy (check according to the order of `add()`)\n    #[inline]\n    pub fn new_bias() -> Self {\n        Self::new_with(SelectMode::Bias)\n    }\n\n    #[inline]\n    pub fn new_with(mode: SelectMode) -> Self {\n        let rng = if let SelectMode::Rand = mode {\n            let mut hasher = DefaultHasher::new();\n            Instant::now().hash(&mut hasher);\n            thread::current().id().hash(&mut hasher);\n            hasher.finish()\n        } else {\n            0\n        };\n\n        Self {\n            mode,\n            handlers: SmallVec::new(),\n            waker: Arc::new(SelectWaker::new()),\n            next_index: 0,\n            rng,\n        }\n    }\n\n    /// Add a channel receiver for watch\n    #[inline]\n    pub fn add<R: ReceiverType>(&mut self, recv: &'a R)\n    where\n        ChannelShared<R::Flavor>: SelectHandle,\n    {\n        let shared: &ChannelShared<R::Flavor> = recv.as_ref();\n        self.handlers.push(RecvHandle {\n            registered: false,\n            shared: shared as &dyn SelectHandle,\n            channel: recv as *const R as *const u8,\n        });\n    }\n\n    /// Remove a channel receiver from watch\n    pub fn remove<R: ReceiverType>(&mut self, recv: &R) {\n        let channel = recv as *const R as *const u8;\n        if let Some(index) = self.handlers.iter().position(|h| h.channel == channel) {\n            self.handlers[index].shared.cancel_waker(&self.waker);\n            self.handlers.remove(index);\n            if !self.handlers.is_empty() {\n                if self.next_index >= self.handlers.len() {\n                    self.next_index = 0;\n                }\n                for handler in &mut self.handlers {\n                    handler.registered = false;\n                    handler.shared.cancel_waker(&self.waker);\n                }\n            }\n        }\n    }\n\n    /// Attempts to select a message from any of the registered receivers without blocking.\n    ///\n    /// Returns:\n    /// - `Ok(SelectResult)` if a message is immediately available from any channel.\n    /// - `Err(TryRecvError::Empty)` if no messages are ready, but at least one channel is still connected.\n    /// - `Err(TryRecvError::Disconnected)` if all registered channels are disconnected or removed from select.\n    pub fn try_select(&mut self) -> Result<SelectResult, TryRecvError> {\n        if self.handlers.is_empty() {\n            return Err(TryRecvError::Disconnected);\n        }\n        let idx = self._try_select_begin();\n        if let Some(res) = self._try_select(idx, true) {\n            return Ok(res);\n        }\n        Err(TryRecvError::Empty)\n    }\n\n    #[inline(always)]\n    fn _try_select(&mut self, mut idx: usize, final_check: bool) -> Option<SelectResult> {\n        let len = self.handlers.len();\n        debug_assert!(len > 0);\n        for _ in 0..len {\n            // Ensure idx is within bounds for the current iteration.\n            if idx >= len {\n                idx = 0;\n            }\n            // final_check=true also check if any channel is closed.\n            if let Ok(res) = self.handlers[idx].try_select(final_check) {\n                trace_log!(\"select ok idx={}\", idx);\n                if self.mode == SelectMode::RR {\n                    self.next_index = idx + 1;\n                }\n                return Some(res);\n            } else if final_check {\n                trace_log!(\"select: final_check {}\", idx);\n            }\n            idx += 1;\n        }\n        None\n    }\n\n    #[inline(always)]\n    fn _try_select_begin(&mut self) -> usize {\n        match self.mode {\n            SelectMode::Bias => 0,\n            SelectMode::RR => {\n                if self.next_index >= self.handlers.len() {\n                    0\n                } else {\n                    self.next_index\n                }\n            }\n            SelectMode::Rand => {\n                let mut x = self.rng;\n                x ^= x << 13;\n                x ^= x >> 7;\n                x ^= x << 17;\n                self.rng = x;\n                (x as usize) % self.handlers.len()\n            }\n        }\n    }\n\n    /// Blocking current thread and wait for message from multiple receivers or close event\n    ///\n    /// See [crate::select] document for usage\n    ///\n    /// # Return conditions:\n    ///\n    /// - Return Ok(SelectResult) when one of the channel has result or close.\n    /// - For closed channel, you have to remove the receiver from select, otherwise the select\n    ///   will already return immediately.\n    /// - If there's no handler left in it, will return RecvError\n    pub fn select(&mut self) -> Result<SelectResult, RecvError> {\n        match self._select_blocking(None) {\n            Ok(res) => Ok(res),\n            Err(true) => Err(RecvError),\n            _ => unreachable!(),\n        }\n    }\n\n    /// Blocking current thread and wait with a timeout, for message from multiple receivers or close event\n    ///\n    /// See [crate::select] document for usage\n    ///\n    /// # Return conditions:\n    ///\n    /// - Return Ok(SelectResult) when one of the channel has result or close.\n    /// - For closed channel, you have to remove the receiver from select, otherwise the select\n    ///   will already return immediately.\n    /// - For Timeout returns RecvTimeoutError::Timeout;\n    /// - If there's no handler left in it, will return RecvTimeoutError::Disconnected.\n    pub fn select_timeout(&mut self, timeout: Duration) -> Result<SelectResult, RecvTimeoutError> {\n        let deadline = Instant::now().add(timeout);\n        match self._select_blocking(Some(deadline)) {\n            Ok(res) => Ok(res),\n            Err(true) => Err(RecvTimeoutError::Disconnected),\n            Err(false) => Err(RecvTimeoutError::Timeout),\n        }\n    }\n\n    #[inline(always)]\n    fn _select_blocking(&mut self, deadline: Option<Instant>) -> Result<SelectResult, bool> {\n        // Initial non-blocking check, respecting SelectMode\n        if self.handlers.is_empty() {\n            return Err(true); // All handlers are disconnected or removed\n        }\n        let mut idx = self._try_select_begin();\n        if let Some(res) = self._try_select(idx, false) {\n            return Ok(res);\n        }\n        let mut backoff = Backoff::from(BackoffConfig::detect());\n        backoff.snooze();\n        // If try_select returned None, we check if all handlers are gone.\n        loop {\n            loop {\n                if let Some(res) = self._try_select(idx, false) {\n                    return Ok(res);\n                }\n                if backoff.snooze() {\n                    break;\n                }\n            }\n            // init SelectWaker\n            self.waker.init_blocking();\n            // Register all handlers (handlers with `registered=true` may be skipped).\n            for (i, handler) in self.handlers.iter_mut().enumerate() {\n                handler.reg_waker(i, &self.waker);\n            }\n            // After registration, do another check, this time with final_check=true\n            if let Some(res) = self._try_select(idx, true) {\n                return Ok(res);\n            }\n            trace_log!(\"select: park\");\n            let mut state = WakerState::Init as u8;\n            while state < WakerState::Woken as u8 {\n                match check_timeout(deadline) {\n                    Ok(None) => {\n                        std::thread::park();\n                    }\n                    Ok(Some(dur)) => {\n                        std::thread::park_timeout(dur);\n                    }\n                    Err(_) => {\n                        return Err(false);\n                    }\n                }\n                state = self.waker.get_waker_state(Ordering::SeqCst);\n                trace_log!(\"select: unpark state={}\", state);\n            }\n            // NOTE: there may be spurious wakeup, but since the SelectWaker is registered in\n            // wake up, first check the one with hint\n            idx = self.waker.get_hint();\n            trace_log!(\"select: hint idx {}\", idx);\n        }\n    }\n}\n\nimpl<'a> Drop for Select<'a> {\n    #[inline(always)]\n    fn drop(&mut self) {\n        for handler in &self.handlers {\n            handler.shared.cancel_waker(&self.waker);\n        }\n    }\n}\n\nimpl<'a> std::fmt::Debug for Select<'a> {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        write!(f, \"Select\")\n    }\n}\n\nstruct RecvHandle<'a> {\n    shared: &'a dyn SelectHandle,\n    // If multi is true, the registration is persistent until cancel\n    registered: bool,\n    // for validate against unsafe usage\n    channel: *const u8,\n}\n\nimpl<'a> RecvHandle<'a> {\n    #[inline(always)]\n    fn try_select(&self, final_check: bool) -> Result<SelectResult, ()> {\n        if let Some(token) = self.shared.try_select(final_check) {\n            return Ok(SelectResult { channel: self.channel, token });\n        }\n        Err(())\n    }\n\n    #[inline(always)]\n    fn reg_waker(&mut self, index: usize, global_waker: &Arc<SelectWaker>) {\n        if self.registered {\n            return;\n        }\n        if self.shared.reg_waker(index, global_waker) {\n            trace_log!(\"select: reg waker\");\n            self.registered = true;\n        }\n    }\n}\n\n/// The result from [Select::select], use for calling `read_select()` on the receiver type, may contains event to receive or disconnected event\n///\n/// **Safety**: If `SelectResult` dropped without processed, will result in message leak/hang.\n///\n/// See the example of select interface.\npub struct SelectResult {\n    // for validation\n    pub(crate) channel: *const u8,\n    pub(crate) token: Token,\n}\n\nimpl fmt::Debug for SelectResult {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        write!(f, \"SelectResult(from {:p})\", self.channel)\n    }\n}\n\nimpl SelectResult {\n    /// Check if the result is for specified receiver\n    #[inline]\n    pub fn is_from<R: ReceiverType>(&self, rx: &R) -> bool {\n        self.channel == rx as *const R as *const u8\n    }\n}\n\nimpl<R: ReceiverType> PartialEq<R> for SelectResult {\n    /// Short cut for [SelectResult::is_from()]\n    #[inline]\n    fn eq(&self, other: &R) -> bool {\n        self.is_from(other)\n    }\n}\n\n#[allow(private_bounds)]\npub(crate) trait SelectHandle: Send {\n    /// If final_check is true, should check channel closing, should use SeqCst ordering\n    fn try_select(&self, final_check: bool) -> Option<Token>;\n\n    /// For RegistryMulti return true means the waker will be persistent, otherwise return false\n    fn reg_waker(&self, channel_id: usize, waker: &Arc<SelectWaker>) -> bool;\n\n    fn cancel_waker(&self, waker: &Arc<SelectWaker>);\n}\n"
  },
  {
    "path": "src/shared.rs",
    "content": "use crate::backoff::*;\npub(crate) use crate::crossbeam::err::*;\npub(crate) use crate::flavor::{Flavor, FlavorSelect, Token};\nuse crate::select::select::SelectHandle;\nuse crate::trace_log;\npub(crate) use crate::waker::*;\npub(crate) use crate::waker_registry::*;\nuse std::mem::MaybeUninit;\nuse std::sync::atomic::{compiler_fence, fence, AtomicUsize, Ordering};\nuse std::sync::Arc;\nuse std::time::{Duration, Instant};\n\npub struct ChannelShared<F: Flavor> {\n    pub(crate) inner: F,\n    tx_count: AtomicUsize,\n    rx_count: AtomicUsize,\n    pub(crate) senders: F::Send,\n    pub(crate) recvs: F::Recv,\n    pub(crate) backoff_limit: u16,\n    pub(crate) large: bool,\n    pub(crate) may_direct_copy: bool,\n}\n\nimpl<F: Flavor> ChannelShared<F> {\n    pub(crate) fn new(inner: F, senders: F::Send, recvs: F::Recv) -> Arc<Self> {\n        let mut large = false;\n        if let Some(bound) = inner.capacity() {\n            if bound >= 10 {\n                large = true;\n            }\n        }\n        Arc::new(Self {\n            tx_count: AtomicUsize::new(1),\n            rx_count: AtomicUsize::new(1),\n            senders,\n            recvs,\n            backoff_limit: inner.backoff_limit(),\n            large,\n            may_direct_copy: inner.may_direct_copy(),\n            inner,\n        })\n    }\n\n    #[inline(always)]\n    pub(crate) fn try_recv(&self) -> Result<F::Item, TryRecvError> {\n        if let Some(item) = self.inner.try_recv_final() {\n            self.on_recv();\n            Ok(item)\n        } else {\n            if self.is_tx_closed() {\n                return Err(TryRecvError::Disconnected);\n            }\n            Err(TryRecvError::Empty)\n        }\n    }\n\n    #[inline(always)]\n    pub(crate) fn read_with_token(&self, token: Token) -> Result<F::Item, RecvError>\n    where\n        F: FlavorSelect,\n    {\n        if token.pos.is_null() {\n            Err(RecvError)\n        } else {\n            let item = self.inner.read_with_token(token);\n            self.on_recv();\n            Ok(item)\n        }\n    }\n\n    /// The number of messages in the channel.\n    #[inline(always)]\n    pub fn len(&self) -> usize {\n        self.inner.len()\n    }\n\n    /// The capacity of the channel. Returns `None` for unbounded channels.\n    #[inline(always)]\n    pub fn capacity(&self) -> Option<usize> {\n        self.inner.capacity()\n    }\n\n    /// Returns `true` if the channel is empty.\n    #[inline(always)]\n    pub fn is_empty(&self) -> bool {\n        self.inner.is_empty()\n    }\n\n    /// Returns `true` if the channel is full.\n    pub fn is_full(&self) -> bool {\n        self.inner.is_full()\n    }\n\n    /// Returns the number of senders for the channel.\n    #[inline(always)]\n    pub fn get_tx_count(&self) -> usize {\n        self.tx_count.load(Ordering::SeqCst)\n    }\n\n    /// Returns the number of receivers for the channel.\n    #[inline(always)]\n    pub fn get_rx_count(&self) -> usize {\n        self.rx_count.load(Ordering::SeqCst)\n    }\n\n    #[inline(always)]\n    pub(crate) fn sender_direct_copy(&self) -> bool {\n        self.may_direct_copy && self.senders.use_direct_copy()\n    }\n\n    /// Returns the number of wakers for senders and receivers. For debugging purposes.\n    pub fn get_wakers_count(&self) -> (usize, usize) {\n        (self.senders.len(), self.recvs.len())\n    }\n\n    #[inline(always)]\n    pub(crate) fn is_tx_closed(&self) -> bool {\n        self.tx_count.load(Ordering::SeqCst) == 0\n    }\n\n    #[inline(always)]\n    pub(crate) fn is_rx_closed(&self) -> bool {\n        self.rx_count.load(Ordering::SeqCst) == 0\n    }\n\n    #[inline(always)]\n    pub(crate) fn add_tx(&self) {\n        // The drop will close_tx, which has release fence\n        let _ = self.tx_count.fetch_add(1, Ordering::Relaxed);\n    }\n\n    /// for Upgrade of WeakTx\n    #[inline(always)]\n    pub(crate) fn try_add_tx(&self) -> bool {\n        let mut count = self.tx_count.load(Ordering::Relaxed);\n        loop {\n            if count == 0 {\n                return false;\n            }\n            match self.tx_count.compare_exchange(\n                count,\n                count + 1,\n                Ordering::SeqCst,\n                Ordering::Acquire,\n            ) {\n                Ok(_) => {\n                    return true;\n                }\n                Err(_count) => {\n                    count = _count;\n                    std::hint::spin_loop();\n                }\n            }\n        }\n    }\n\n    #[inline(always)]\n    pub(crate) fn add_rx(&self) {\n        // The drop will close_rx, which has release fence\n        let _ = self.rx_count.fetch_add(1, Ordering::Relaxed);\n    }\n\n    /// This method is called when a sender is dropped.\n    #[inline(always)]\n    pub(crate) fn close_tx(&self) {\n        let old = self.tx_count.fetch_sub(1, Ordering::Release);\n        if old <= 1 {\n            trace_log!(\"closing from tx\");\n            fence(Ordering::SeqCst);\n            self.recvs.close();\n        } else {\n            trace_log!(\"drop tx {}\", old - 1);\n        }\n    }\n\n    /// This method is called when a receiver is dropped.\n    #[inline(always)]\n    pub(crate) fn close_rx(&self) {\n        let old = self.rx_count.fetch_sub(1, Ordering::Release);\n        if old <= 1 {\n            trace_log!(\"closing from rx\");\n            fence(Ordering::SeqCst);\n            // There's SeqCst fence inside RegistrySender::close\n            self.senders.close();\n        } else {\n            trace_log!(\"drop rx {}\", old - 1);\n        }\n    }\n\n    /// if need_wake == true, called from on_recv(), when return None indicates try to wake up next.\n    /// when need_wake == false, will always return Some(state).\n    ///\n    /// NOTE: when return state=Done, the waker is not set to Done\n    #[inline]\n    pub(crate) fn sender_double_check<const SINK: bool>(\n        &self, item: &MaybeUninit<F::Item>, o_waker: &mut Option<<F::Send as Registry>::Waker>,\n    ) -> u8 {\n        // Not allow Spurious wake and enter this function again;\n        if let Some(res) = self.inner.try_send_oneshot(item.as_ptr()) {\n            if res {\n                self.on_send();\n                self.senders.cancel_reuse_waker(o_waker, WakerState::Done)\n            } else {\n                let state = if SINK {\n                    WakerState::Init as u8\n                } else {\n                    self.senders.commit_waiting(o_waker)\n                };\n                if self.is_rx_closed() {\n                    return WakerState::Closed as u8;\n                }\n                state\n            }\n        } else {\n            // Unlikely to be disconnected,\n            self.senders.cancel_reuse_waker(o_waker, WakerState::Woken)\n        }\n    }\n\n    /// Wait a little more for the waker state change,\n    /// NOTE: it's important to yield when you have more sender than receiver\n    #[inline(always)]\n    pub(crate) fn sender_snooze(\n        &self, o_waker: &Option<<F::Send as Registry>::Waker>, backoff: &mut Backoff,\n    ) -> u8 {\n        backoff.reset();\n        loop {\n            let state = self.senders.get_waker_state(o_waker, Ordering::Relaxed);\n            compiler_fence(Ordering::AcqRel);\n            if state >= WakerState::Woken as u8 {\n                return state;\n            }\n            if backoff.snooze() {\n                return state;\n            }\n        }\n    }\n\n    /// Wake up one rx\n    #[inline(always)]\n    pub(crate) fn on_send(&self) {\n        self.recvs.fire();\n    }\n\n    /// Wake up one tx\n    #[inline(always)]\n    pub(crate) fn on_recv(&self) {\n        if WakeResult::Sent == self.senders.fire(&self.inner) {\n            self.on_send();\n        }\n    }\n\n    /// Call on cancellation, return true to indicate drop temporary message\n    /// return false to indicate already Done.\n    #[inline(always)]\n    pub(crate) fn abandon_send_waker(&self, waker: &<F::Send as Registry>::Waker) -> bool {\n        match self.senders.abandon_waker(waker) {\n            Ok(_) => true,\n            Err(state) => {\n                trace_log!(\"tx: abandon err  {:?} {}\", waker, state);\n                if state == WakerState::Woken as u8 {\n                    // We are awake, but give up sending, should notify another sender for safety\n                    self.on_recv();\n                } else if state == WakerState::Closed as u8 {\n                } else {\n                    debug_assert_eq!(state, WakerState::Done as u8);\n                    // Unused code for direct_copy\n                    return false;\n                }\n                true\n            }\n        }\n    }\n\n    /// Call on cancellation, return true to indicate drop temporary message\n    #[inline(always)]\n    pub(crate) fn abandon_recv_waker(&self, waker: &<F::Recv as Registry>::Waker) {\n        if let Err(state) = self.recvs.abandon_waker(waker) {\n            trace_log!(\"rx: abandon err {:?} {}\", waker, state);\n            if state == WakerState::Woken as u8 {\n                // We are awake, but give up receiving, should notify another receiver for safety\n                self.on_send();\n            } else if state == WakerState::Closed as u8 {\n                // Closed\n            } else {\n                debug_assert_eq!(state, WakerState::Done as u8);\n                // Unused code for direct_copy\n            }\n        }\n    }\n\n    #[inline(always)]\n    pub(crate) fn get_async_backoff(&self) -> Option<Backoff> {\n        if self.large {\n            return None;\n        }\n        let cfg = BackoffConfig::detect();\n        if cfg.spin_limit == 0 {\n            // 1 core don't backoff\n            return None;\n        }\n        // It's effective to yield for size=1\n        Some(Backoff::from(cfg.limit(self.backoff_limit)))\n    }\n}\n\nimpl<F: Flavor + FlavorSelect> SelectHandle for ChannelShared<F> {\n    #[inline(always)]\n    fn try_select(&self, final_check: bool) -> Option<Token> {\n        if let Some(token) = self.inner.try_select(final_check) {\n            return Some(token);\n        }\n        if final_check && self.get_tx_count() == 0 {\n            return Some(Token::default());\n        }\n        None\n    }\n\n    #[inline(always)]\n    fn reg_waker(&self, channel_id: usize, waker: &Arc<SelectWaker>) -> bool {\n        self.recvs.reg_select_waker(channel_id, waker)\n    }\n\n    #[inline(always)]\n    fn cancel_waker(&self, waker: &Arc<SelectWaker>) {\n        self.recvs.cancel_select_waker(waker)\n    }\n}\n\n/// On timed out, returns Err(())\n#[inline(always)]\npub fn check_timeout(deadline: Option<Instant>) -> Result<Option<Duration>, ()> {\n    if let Some(end) = deadline {\n        let now = Instant::now();\n        if now < end {\n            return Ok(Some(end - now));\n        } else {\n            return Err(());\n        }\n    }\n    Ok(None)\n}\n"
  },
  {
    "path": "src/sink.rs",
    "content": "use crate::shared::*;\nuse crate::{flavor::FlavorMP, AsyncTx, MAsyncTx, TrySendError};\nuse std::fmt;\nuse std::mem::MaybeUninit;\nuse std::ops::Deref;\nuse std::task::*;\n\n/// An async sink that allows you to write custom futures with `poll_send(ctx)`.\npub struct AsyncSink<F: Flavor> {\n    tx: AsyncTx<F>,\n    waker: Option<<F::Send as Registry>::Waker>,\n}\n\nimpl<F: Flavor> fmt::Debug for AsyncSink<F> {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        write!(f, \"AsyncSink\")\n    }\n}\n\nimpl<F: Flavor> fmt::Display for AsyncSink<F> {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        write!(f, \"AsyncSink\")\n    }\n}\n\nimpl<F: Flavor> AsyncSink<F> {\n    #[inline]\n    pub fn new(tx: AsyncTx<F>) -> Self {\n        Self { tx, waker: None }\n    }\n}\n\nimpl<F: Flavor> Deref for AsyncSink<F> {\n    type Target = AsyncTx<F>;\n\n    #[inline]\n    fn deref(&self) -> &Self::Target {\n        &self.tx\n    }\n}\n\nimpl<F: Flavor> From<AsyncTx<F>> for AsyncSink<F> {\n    #[inline]\n    fn from(tx: AsyncTx<F>) -> Self {\n        tx.into_sink()\n    }\n}\n\nimpl<F: Flavor + FlavorMP> From<MAsyncTx<F>> for AsyncSink<F> {\n    #[inline]\n    fn from(tx: MAsyncTx<F>) -> Self {\n        tx.into_sink()\n    }\n}\n\nimpl<F: Flavor> AsyncSink<F>\nwhere\n    F::Item: Unpin,\n{\n    /// `poll_send()` will try to send a message.\n    /// If the channel is full, it will register a notification for the next poll.\n    ///\n    /// # Behavior\n    ///\n    /// The polling behavior is different from [SendFuture](crate::SendFuture).\n    /// Because the waker is not exposed to the user, you cannot perform delicate operations on\n    /// the waker (compared to the `Drop` handler in `SendFuture`).\n    /// To make sure no deadlock happens on cancellation, the `WakerState` will be `Init`\n    /// after being registered (and will not be converted to `Waiting`).\n    /// The receivers will wake up all `Init` state wakers until they find a normal\n    /// pending sender in the `Waiting` state.\n    ///\n    /// # Return value:\n    ///\n    /// Returns `Ok(())` on message sent.\n    ///\n    /// Returns `Err([crate::TrySendError::Full])` for a `Poll::Pending` case.\n    /// The next time the channel is not full, your future will be woken again.\n    /// You should then continue calling `poll_send()` to send the message.\n    /// If you want to cancel, just don't call `poll_send()` again. There are no side effects,\n    /// and other senders will have a chance to send their messages.\n    ///\n    /// Returns `Err([crate::TrySendError::Disconnected])` when all `Rx` are dropped.\n    #[inline]\n    pub fn poll_send(\n        &mut self, ctx: &mut Context, item: F::Item,\n    ) -> Result<(), TrySendError<F::Item>> {\n        let _item = MaybeUninit::new(item);\n        let shared = &self.tx.shared;\n        if shared.inner.try_send(&_item) {\n            shared.on_send();\n            return Ok(());\n        }\n        match self.tx.poll_send::<true>(ctx, &_item, &mut self.waker) {\n            Poll::Ready(Ok(())) => Ok(()),\n            Poll::Ready(Err(())) => Err(TrySendError::Disconnected(unsafe { _item.assume_init() })),\n            Poll::Pending => Err(TrySendError::Full(unsafe { _item.assume_init() })),\n        }\n    }\n}\n\nimpl<F: Flavor> Drop for AsyncSink<F> {\n    fn drop(&mut self) {\n        if let Some(waker) = self.waker.as_ref() {\n            self.tx.shared.abandon_send_waker(waker);\n        }\n    }\n}\n"
  },
  {
    "path": "src/spsc.rs",
    "content": "//! Single producer, single consumer.\n//!\n//! The optimization assumes a single producer and consumer, so waker registration is completely lockless.\n//!\n//! **NOTE**: For the SP/SC version, [AsyncTx], [AsyncRx], [Tx], and [Rx] are not `Clone` and do not implement `Sync`.\n//! Although they can be moved to other threads, they are not allowed to be used with `send`/`recv` while in an `Arc`.\n//!\n//! The following code is OK:\n//!\n//! ``` rust\n//! use crossfire::*;\n//! async fn foo() {\n//!     let (tx, rx) = spsc::bounded_async::<usize>(100);\n//!     tokio::spawn(async move {\n//!          let _ = tx.send(2).await;\n//!     });\n//!     drop(rx);\n//! }\n//! ```\n//!\n//! Because the `AsyncTx` does not have the `Sync` marker, using `Arc<AsyncTx>` will lose the `Send` marker.\n//!\n//! For your safety, the following code **should not compile**:\n//!\n//! ``` compile_fail\n//! use crossfire::*;\n//! use std::sync::Arc;\n//! async fn foo() {\n//!     let (tx, rx) = spsc::bounded_async::<usize>(100);\n//!     let tx = Arc::new(tx);\n//!     tokio::spawn(async move {\n//!          let _ = tx.send(2).await;\n//!     });\n//!     drop(rx);\n//! }\n//! ```\n\nuse crate::async_rx::*;\nuse crate::async_tx::*;\nuse crate::blocking_rx::*;\nuse crate::blocking_tx::*;\nuse crate::flavor::{\n    flavor_dispatch, flavor_select_dispatch, queue_dispatch, Flavor, FlavorBounded, FlavorImpl,\n    FlavorNew, FlavorWrap, Queue,\n};\nuse crate::shared::*;\nuse crate::{NotCloneable, ReceiverType, SenderType};\nuse std::mem::MaybeUninit;\n\n/// Flavor Type for unbounded SPSC channel\npub type List<T> = FlavorWrap<crate::flavor::List<T>, RegistryDummy, RegistrySingle>;\n\n/// Flavor type for one-sized SPSC channel\npub type One<T> = FlavorWrap<crate::flavor::OneSpsc<T>, RegistrySingle, RegistrySingle>;\n\n/// Flavor Type for bounded SPSC channel\n#[allow(clippy::large_enum_variant)]\npub enum Array<T> {\n    Array(crate::flavor::ArraySpsc<T>),\n    One(crate::flavor::OneSpsc<T>),\n}\n\nimpl<T> Array<T> {\n    #[inline]\n    pub fn new(size: usize) -> Self {\n        if size <= 1 {\n            Self::One(crate::flavor::OneSpsc::new())\n        } else {\n            Self::Array(crate::flavor::ArraySpsc::<T>::new(size))\n        }\n    }\n}\n\nmacro_rules! wrap_array {\n    ($self: expr, $method:ident $($arg:expr)*)=>{\n        match $self {\n            Self::Array(inner) => inner.$method($($arg)*),\n            Self::One(inner) => inner.$method($($arg)*),\n        }\n    };\n}\n\nimpl<T> Queue for Array<T> {\n    type Item = T;\n    queue_dispatch!(wrap_array);\n}\n\nimpl<T> FlavorImpl for Array<T> {\n    flavor_dispatch!(wrap_array);\n}\n\nimpl<T> FlavorSelect for Array<T> {\n    flavor_select_dispatch!(wrap_array);\n}\n\nimpl<T> FlavorBounded for Array<T> {\n    #[inline(always)]\n    fn new_with_bound(size: usize) -> Self {\n        Self::new(size)\n    }\n}\n\nimpl<T: 'static> Flavor for Array<T> {\n    type Send = RegistrySingle;\n    type Recv = RegistrySingle;\n}\n\n/// The generic builder for all spsc channel types with a new method (except Array).\n///\n/// Initialize sender and receiver types from a flavor type,\n/// you can let the compiler to infer the type according to return type signature.\n/// (the falvor might have different new() method, but the rest is the same.\n/// # Examples\n///\n/// ```rust\n/// use crossfire::*;\n/// let (tx, rx): (Tx<_>, Rx<_>) = spsc::new::<spsc::List<i32>, _, _>();\n/// let (tx, rx): (AsyncTx<spsc::One<usize>>, Rx<spsc::One<usize>>) = spsc::new();\n/// ```\n#[inline(always)]\npub fn new<F, S, R>() -> (S, R)\nwhere\n    F: Flavor + FlavorNew,\n    S: SenderType<Flavor = F> + NotCloneable,\n    R: ReceiverType<Flavor = F> + NotCloneable,\n{\n    build::<F, S, R>(F::new())\n}\n\n/// The generic builder for all spsc channel types.\n///\n/// Initialize sender and receiver types from a flavor type,\n/// you can let the compiler to infer the type according to return type signature.\n/// (the falvor might have different new() method, but the rest is the same.\n/// # Examples\n///\n/// ```rust\n/// use crossfire::{*, spsc::*};\n/// let (tx, rx): (Tx<_>, Rx<_>) = build::<List<i32>, _, _>(List::new());\n/// let (tx, rx): (AsyncTx<One<usize>>, Rx<One<usize>>)  = build(One::new());\n/// ```\n#[inline(always)]\npub fn build<F, S, R>(flavor: F) -> (S, R)\nwhere\n    F: Flavor,\n    S: SenderType<Flavor = F> + NotCloneable,\n    R: ReceiverType<Flavor = F> + NotCloneable,\n{\n    let shared = ChannelShared::new(flavor, F::Send::new(), F::Recv::new());\n    (S::new(shared.clone()), R::new(shared))\n}\n\n#[inline]\nfn unbounded_new<T, R>() -> (Tx<List<T>>, R)\nwhere\n    T: 'static,\n    R: ReceiverType<Flavor = List<T>> + NotCloneable,\n{\n    build::<List<T>, Tx<List<T>>, R>(List::<T>::from_inner(crate::flavor::List::<T>::new()))\n}\n\n#[inline]\npub fn unbounded_blocking<T>() -> (Tx<List<T>>, Rx<List<T>>)\nwhere\n    T: 'static,\n{\n    unbounded_new()\n}\n\n#[inline]\npub fn unbounded_async<T>() -> (Tx<List<T>>, AsyncRx<List<T>>)\nwhere\n    T: 'static,\n{\n    unbounded_new()\n}\n\nfn bounded_new<T, S, R>(size: usize) -> (S, R)\nwhere\n    T: 'static,\n    S: SenderType<Flavor = Array<T>> + NotCloneable,\n    R: ReceiverType<Flavor = Array<T>> + NotCloneable,\n{\n    build::<Array<T>, S, R>(Array::<T>::new(size))\n}\n\n/// Creates a bounded channel with a pair of blocking sender and receiver.\n///\n/// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1.\n#[inline]\npub fn bounded_blocking<T>(size: usize) -> (Tx<Array<T>>, Rx<Array<T>>)\nwhere\n    T: 'static,\n{\n    bounded_new(size)\n}\n\n/// Creates a bounded channel with a pair of async sender and receiver.\n///\n/// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1.\n#[inline]\npub fn bounded_async<T>(size: usize) -> (AsyncTx<Array<T>>, AsyncRx<Array<T>>)\nwhere\n    T: 'static,\n{\n    bounded_new(size)\n}\n\n/// Creates a bounded channel with a pair of blocking sender and async receiver.\n///\n/// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1.\n#[inline]\npub fn bounded_blocking_async<T>(size: usize) -> (Tx<Array<T>>, AsyncRx<Array<T>>)\nwhere\n    T: 'static,\n{\n    bounded_new(size)\n}\n\n/// Creates a bounded channel with a pair of async sender and blocking receiver.\n///\n/// As a special case, a channel size of 0 is not supported and will be treated as a channel of size 1.\n#[inline]\npub fn bounded_async_blocking<T>(size: usize) -> (AsyncTx<Array<T>>, Rx<Array<T>>)\nwhere\n    T: 'static,\n{\n    bounded_new(size)\n}\n"
  },
  {
    "path": "src/stream.rs",
    "content": "use crate::shared::*;\nuse crate::{AsyncRx, MAsyncRx};\nuse futures_core::stream;\nuse std::fmt;\nuse std::ops::Deref;\nuse std::pin::Pin;\nuse std::task::*;\n\n/// Constructed by [AsyncRx::into_stream()](crate::AsyncRx::into_stream())\n///\n/// Implements `futures_core::stream::Stream`.\npub struct AsyncStream<F: Flavor> {\n    rx: AsyncRx<F>,\n    waker: Option<<F::Recv as Registry>::Waker>,\n    ended: bool,\n}\n\nimpl<F: Flavor> fmt::Debug for AsyncStream<F> {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        write!(f, \"AsyncStream\")\n    }\n}\n\nimpl<F: Flavor> fmt::Display for AsyncStream<F> {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        write!(f, \"AsyncStream\")\n    }\n}\n\nimpl<F: Flavor> AsyncStream<F> {\n    #[inline(always)]\n    pub fn new(rx: AsyncRx<F>) -> Self {\n        Self { rx, waker: None, ended: false }\n    }\n\n    /// `poll_item()` will try to receive a message.\n    /// If the channel is empty, it will register a notification for the next poll.\n    ///\n    /// # Behavior\n    ///\n    /// The polling behavior is different from [RecvFuture](crate::RecvFuture).\n    /// Because the waker is not exposed to the user, you cannot perform delicate operations on\n    /// the waker (compared to the `Drop` handler in `RecvFuture`).\n    /// To make sure no deadlock happens on cancellation, the `WakerState` will be `Init`\n    /// after being registered (and will not be converted to `Waiting`).\n    /// The senders will wake up all `Init` state wakers until they find a normal\n    /// pending receiver in the `Waiting` state.\n    ///\n    /// # Return Value:\n    ///\n    /// Returns `Ok(T)` on success.\n    ///\n    /// Returns Err([TryRecvError::Empty]) for a `Poll::Pending` case.\n    /// The next time the channel is not empty, your future will be woken again.\n    /// You should then continue calling `poll_item()` to receive the message.\n    /// If you want to cancel, just don't call `poll_item()` again. Others will still have a chance\n    /// to receive messages.\n    ///\n    /// Returns Err([TryRecvError::Disconnected]) if all `Tx` have been dropped and the channel is empty.\n    #[inline]\n    pub fn poll_item(&mut self, ctx: &mut Context) -> Poll<Option<F::Item>> {\n        match self.rx.poll_item::<true>(ctx, &mut self.waker) {\n            Ok(item) => Poll::Ready(Some(item)),\n            Err(e) => {\n                if e.is_empty() {\n                    return Poll::Pending;\n                }\n                self.ended = true;\n                Poll::Ready(None)\n            }\n        }\n    }\n}\n\nimpl<F: Flavor> Deref for AsyncStream<F> {\n    type Target = AsyncRx<F>;\n\n    #[inline]\n    fn deref(&self) -> &Self::Target {\n        &self.rx\n    }\n}\n\nimpl<F: Flavor> stream::Stream for AsyncStream<F> {\n    type Item = F::Item;\n\n    #[inline(always)]\n    fn poll_next(self: Pin<&mut Self>, ctx: &mut Context) -> Poll<Option<Self::Item>> {\n        let mut _self = self.get_mut();\n        if _self.ended {\n            return Poll::Ready(None);\n        }\n        match _self.rx.poll_item::<false>(ctx, &mut _self.waker) {\n            Ok(item) => Poll::Ready(Some(item)),\n            Err(e) => {\n                if e.is_empty() {\n                    return Poll::Pending;\n                }\n                _self.ended = true;\n                Poll::Ready(None)\n            }\n        }\n    }\n}\n\nimpl<F: Flavor> stream::FusedStream for AsyncStream<F> {\n    fn is_terminated(&self) -> bool {\n        self.ended\n    }\n}\n\nimpl<F: Flavor> Drop for AsyncStream<F> {\n    fn drop(&mut self) {\n        if let Some(waker) = self.waker.as_ref() {\n            self.rx.shared.abandon_recv_waker(waker);\n        }\n    }\n}\n\nimpl<F: Flavor> From<AsyncRx<F>> for AsyncStream<F> {\n    #[inline]\n    fn from(rx: AsyncRx<F>) -> Self {\n        rx.into_stream()\n    }\n}\n\nimpl<F: Flavor> From<MAsyncRx<F>> for AsyncStream<F> {\n    #[inline]\n    fn from(rx: MAsyncRx<F>) -> Self {\n        rx.into_stream()\n    }\n}\n"
  },
  {
    "path": "src/waitgroup.rs",
    "content": "//! This module provides two waitgroup implementation, works in blocking & async context.\n//! The implementation is low-cost ref-counting (counter and waker state is packed inside one atomic), the max value\n//! is (1 << (usize::BITS - 2) - 2)\n//!\n//! - [WaitGroupInline]: Which embedded inline with its parent structure (with no dereference cost)\n//!   - (It requires its parent can be accessed by multi thread, for deep embedded scenario)\n//!   - Threshold is const\n//!   - Requires manual ref count manage, ([done()](WaitGroupInline::done) [done_many()](WaitGroupInline::done_many) is unsafe).\n//!   - only one waiter thread is allowed. ([wait()](WaitGroupInline::wait),\n//!     [wait_async()](WaitGroupInline::wait_async) is unsafe)\n//!\n//! - [WaitGroup]: which is a safe RAII guard API.\n//!   - Its a referenced counted container, optional state inside may be shared between the threads of WaitGroup and its guards.\n//!   - Only one waiter is allowed. (`WaitGroup` is `!Sync`)\n//!   - Use [WaitGroup::add_guard()] to get [WaitGroupGuard].\n//!   - [WaitGroupGuard] has `Clone` (Although `WaitGroup` can not `Clone`)\n//!   - [WaitGroupGuard] drop will decrease ref and protentially wake the main thread.\n//!   - Can change threshold at any time.\n//!     - **NOTE**: threshold is carried inside generated [WaitGroupGuard] to minimize the cost of atomic ops.\n//!       When changing threshold to larger value, wait() might not wake up as soon as new threshold reached.\n//!\n//! # Safety\n//!\n//! [WaitGroup] does not have `Sync` marker, because it's not safe to concurrently wait, due to only one slot reserved for waker.\n//! If you know what you are doing when put it inside other struct, use unsafe impl on its parent\n//! struct.\n//!\n//! ```\n//! use crossfire::waitgroup::WaitGroup;\n//! use std::sync::Arc;\n//! pub struct Parent {\n//!     wg: WaitGroup<()>,\n//! }\n//! // allow parent to have Sync marker for Arc\n//! unsafe impl Sync for Parent {}\n//!\n//! let _parent = Arc::new(Parent{\n//!     wg: WaitGroup::new((), 0),\n//! });\n//! ```\n//!\n//! # Examples\n//!\n//! **Blocking Example: Concurrency Limiter**\n//!\n//! This example simulates a task scheduler that uses a `WaitGroup` to limit\n//! the number of concurrently running tasks to a specific watermark.\n//! It also uses the generic `T` to carry a shared state (e.g. `AtomicBool`)\n//!\n//! ```\n//! use crossfire::waitgroup::WaitGroup;\n//! use std::thread;\n//! use std::time::Duration;\n//! use std::sync::atomic::{AtomicBool, Ordering};\n//!\n//! const MAX_CONCURRENT_TASKS: usize = 4;\n//! const TOTAL_TASKS: usize = 10;\n//!\n//! // Initialize WaitGroup with a threshold of N-1.\n//! // `wait()` will block when the number of running tasks is >= N.\n//! // The `AtomicBool` is used to track if any task failed.\n//! let mut wg = WaitGroup::<AtomicBool>::new(AtomicBool::new(true), MAX_CONCURRENT_TASKS - 1);\n//!\n//! // Use a simple for loop to spawn a total of 10 tasks.\n//! for i in 0..TOTAL_TASKS {\n//!     // `wait()` blocks until `wg.get_left() < MAX_CONCURRENT_TASKS`.\n//!     // This effectively waits for a slot to become available.\n//!     wg.wait();\n//!     // A slot is available, spawn a new task.\n//!     let guard = wg.add_guard();\n//!     thread::spawn(move || {\n//!         thread::sleep(Duration::from_millis(100));\n//!         // do some work\n//!         if i == 5 {\n//!             // Notify failure\n//!             guard.store(false, Ordering::SeqCst);\n//!         }\n//!         drop(guard);\n//!     });\n//! }\n//! // After spawning all tasks, wait for the remaining running tasks to finish.\n//! // Set threshold to 0 to wait until all guards are dropped.\n//! wg.set_threshold(0);\n//! wg.wait();\n//!\n//! assert_eq!(wg.get_left_seqcst(), 0);\n//! assert_eq!(wg.load(Ordering::SeqCst), false);\n//! ```\n//!\n//! **Async Example**\n//!\n//! This example demonstrate task and sub-task, dynamic increase ref count by cloning WaitGroupGuard.\n//!\n//! ```\n//! use crossfire::waitgroup::WaitGroup;\n//! use std::time::Duration;\n//!\n//! #[tokio::test]\n//! async fn wait_group_async_example() {\n//!     let wg = WaitGroup::new((), 0);\n//!     for _j in 0..4 {\n//!         // Create a guard for the manager task.\n//!         let parent_guard = wg.add_guard();\n//!         tokio::spawn(async move {\n//!             // This manager task will spawn 2 workers.\n//!             for i in 0..2 {\n//!                 let child_guard = parent_guard.clone();\n//!                 tokio::spawn(async move {\n//!                     // Do some work...\n//!                     tokio::time::sleep(Duration::from_millis(50 * (i + 1))).await;\n//!                     // worker_guard is dropped here.\n//!                 });\n//!             }\n//!             // The manager's work is to spawn workers,\n//!             // so it drops its own guard after the loop.\n//!             drop(manager_guard);\n//!         });\n//!     }\n//!     // Wait until the manager guard and all its clones are dropped.\n//!     wg.wait_async().await;\n//!     assert_eq!(wg.get_left_seqcst(), 0);\n//! }\n//! ```\n\nuse crate::backoff::Backoff;\nuse crate::shared::{check_timeout, ThinWaker};\n#[allow(unused_imports)]\nuse crate::{tokio_task_id, trace_log};\nuse std::cell::UnsafeCell;\nuse std::future::Future;\nuse std::mem::transmute;\nuse std::ops::Deref;\nuse std::pin::Pin;\nuse std::ptr::NonNull;\nuse std::sync::atomic::{\n    AtomicUsize,\n    Ordering::{self, Acquire, Relaxed, Release, SeqCst},\n};\nuse std::task::{Context, Poll, Waker};\nuse std::thread;\nuse std::time::{Duration, Instant};\n\n/// An unsafe version WaitGroup which does not allocate, and not dereference cost, must embedded in a shared parent structure.\n///\n/// # Limitation\n///\n/// - THRESHOLD is const, default to zero\n/// - Only one thread / coroutine to wait, all wait_XXX() function is unsafe.\n/// - done() is unsafe.\n/// - Also provide add_many() done_many().\npub struct WaitGroupInline<const THRESHOLD: usize = 0> {\n    inner: WaitGroupInner<()>,\n}\n\nimpl<const THRESHOLD: usize> WaitGroupInline<THRESHOLD> {\n    pub fn new() -> Self {\n        // the inline version don't need its ref to represent ownership\n        Self { inner: WaitGroupInner::new((), 0) }\n    }\n\n    /// load total reference count of `WaitGroupGuard` with SeqCst\n    #[inline(always)]\n    pub fn get_left_seqcst(&self) -> usize {\n        self.inner.count(SeqCst)\n    }\n\n    /// Return total reference count of `WaitGroupGuard` with Acquire\n    #[inline(always)]\n    pub fn get_left(&self) -> usize {\n        self.inner.count(Acquire)\n    }\n\n    /// Add one count to the WaitGroup\n    #[inline(always)]\n    pub fn add(&self) {\n        self.inner.add(1);\n    }\n\n    /// Add multiple count to the WaitGroup\n    #[inline(always)]\n    pub fn add_many(&self, count: usize) {\n        debug_assert!(count < COUNT_MASK - 2);\n        self.inner.add(count);\n    }\n\n    /// Decrease one count, if it reduced to zero, will waking the waiter thread.\n    ///\n    /// Return true when zero has been reached\n    ///\n    /// # Safety\n    ///\n    /// You have to be careful for underflow, which will panic\n    pub unsafe fn done(&self) -> bool {\n        let p = &self.inner as *const WaitGroupInner<()>;\n        WaitGroupInner::<()>::done::<false>(p, 1, THRESHOLD)\n    }\n\n    /// Decrease multiple count, if it reduced to zero, will waking the waiter thread.\n    ///\n    /// Return true when zero has been reached\n    ///\n    /// # Safety\n    ///\n    /// You have to be careful for underflow, which will panic\n    pub unsafe fn done_many(&self, count: usize) -> bool {\n        debug_assert!(count < COUNT_MASK - 2);\n        let p = &self.inner as *const WaitGroupInner<()>;\n        WaitGroupInner::<()>::done::<false>(p, count, THRESHOLD)\n    }\n\n    /// If the ref count reaches zero, return `Ok(())`, otherwise `Err(())`\n    #[inline]\n    pub fn try_wait(&self) -> Result<(), ()> {\n        // one ref owned by mysql\n        if self.inner.count(SeqCst) <= THRESHOLD {\n            Ok(())\n        } else {\n            Err(())\n        }\n    }\n\n    /// Block current coroutine until count drop below threshold.\n    ///\n    /// # Safety\n    ///\n    /// Only one thread is allow to wait\n    #[inline]\n    pub unsafe fn wait_async<'a>(&'a self) -> WaitGroupFuture<'a, ()> {\n        WaitGroupFuture { inner: &self.inner, threshold: THRESHOLD, waker: None }\n    }\n\n    /// Block current coroutine until count drop below threshold, or until timeout happens\n    ///\n    /// # Safety\n    ///\n    /// Only one thread is allow to wait\n    #[cfg(feature = \"tokio\")]\n    #[cfg_attr(docsrs, doc(cfg(feature = \"tokio\")))]\n    #[inline]\n    pub unsafe fn wait_async_timeout<'a>(\n        &'a self, timeout: Duration,\n    ) -> WaitGroupTimeoutFuture<'a, (), tokio::time::Sleep, ()> {\n        let sleep = tokio::time::sleep(timeout);\n        self.wait_async_with_timer(sleep)\n    }\n\n    /// Block current coroutine until count drop below threshold, or until timeout happens\n    ///\n    /// # Safety\n    ///\n    /// Only one thread is allow to wait\n    #[cfg(feature = \"async_std\")]\n    #[cfg_attr(docsrs, doc(cfg(feature = \"async_std\")))]\n    #[inline]\n    pub unsafe fn wait_async_timeout<'a>(\n        &'a self, timeout: Duration,\n    ) -> WaitGroupTimeoutFuture<'a, (), impl Future<Output = ()>, ()> {\n        let sleep = async_std::task::sleep(timeout);\n        self.wait_async_with_timer(sleep)\n    }\n\n    /// Block current coroutine until count drop below threshold, with a custom sleep / or cancel function\n    ///\n    /// # Safety\n    ///\n    /// Only one thread is allow to wait\n    #[inline]\n    pub unsafe fn wait_async_with_timer<'a, FR, R>(\n        &'a self, fut: FR,\n    ) -> WaitGroupTimeoutFuture<'a, (), FR, R>\n    where\n        FR: Future<Output = R>,\n    {\n        WaitGroupTimeoutFuture { inner: &self.inner, threshold: THRESHOLD, sleep: fut, waker: None }\n    }\n\n    /// Blocking current thread and Wait until count drop below threshold.\n    ///\n    /// # Safety\n    ///\n    /// Only one thread is allow to wait\n    #[inline]\n    pub unsafe fn wait(&self) {\n        let _ = self.inner.wait_blocking(None, THRESHOLD);\n    }\n\n    /// Blocking current thread and Wait until count drop below threshold, or until timeout\n    ///\n    /// # Safety\n    ///\n    /// Only one thread is allow to wait\n    #[inline]\n    pub unsafe fn wait_timeout(&self, timeout: Duration) -> Result<(), ()> {\n        self.inner.wait_blocking(Some(Instant::now() + timeout), THRESHOLD)\n    }\n}\n\n/// A WaitGroup implementation allows custom threshold (>=0), works in blocking & async context.\n///\n/// Features:\n/// - Only one waiter, concurrent ref count.\n/// - Carry optional state inside, shared between the main thread and WaitGroupGuard, just like Arc.\n/// - Change threshold at any time.\n///   - **NOTE**:\n///     threshold is carried inside generated [WaitGroupGuard] to minimize the cost of atomic ops.\n///     When changing threshold to larger value, wait() might not wake up as soon as new threshold reached.\n/// - Low-cost create and drop, because reference count and waker state is packed inside one atomic.\n/// - WaitGroupGuard dropping is wait-free, which decrease ref count with SeqCst CAS.\n/// - Max reference count to (1 << (usize::BITS - 2) - 2)\n///\n/// You don't need to put WaitGroup into Arc, use [WaitGroup::add_guard()] to get `WaitGroupGuard`.\n/// It's ok to clone [WaitGroupGuard], which will increase internal ref count.\n///\n/// # Safety\n///\n/// It's not safe to concurrently wait, so it does not have `Sync` marker.\n/// If you know what you are doing when put it inside other struct, use unsafe impl.\n///\n/// See module level [doc](crate::waitgroup) for example.\npub struct WaitGroup<T> {\n    threshold: usize,\n    inner: NonNull<WaitGroupInner<T>>,\n    // Remove the Sync marker to prevent concurrent waiting\n}\n\nunsafe impl<T: Send> Send for WaitGroup<T> {}\n\nimpl<T> WaitGroup<T> {\n    #[inline(always)]\n    pub fn new(inner: T, threshold: usize) -> Self {\n        // need one ref to represent ownership\n        let inner = Box::new(WaitGroupInner::new(inner, 1));\n        Self {\n            // one ref owned by myself\n            threshold: threshold + 1,\n            inner: unsafe { NonNull::new_unchecked(Box::into_raw(inner)) },\n        }\n    }\n\n    /// Threshold can be changed on the fly, which only affect the next `wait()`.\n    ///\n    /// # Safety\n    ///\n    /// Previous threshold is carried inside generated `WaitGroupGuard`.\n    /// When changing threshold to larger value, wait() might not wake up as soon as new threshold reached.\n    #[inline]\n    pub fn set_threshold(&mut self, threshold: usize) {\n        // one ref owned by myself\n        self.threshold = threshold + 1;\n    }\n\n    #[inline(always)]\n    fn get_inner(&self) -> &WaitGroupInner<T> {\n        unsafe { self.inner.as_ref() }\n    }\n\n    /// load total reference count of `WaitGroupGuard` with SeqCst\n    #[inline(always)]\n    pub fn get_left_seqcst(&self) -> usize {\n        // minus my own ref\n        self.get_inner().count(SeqCst) - 1\n    }\n\n    /// Return total reference count of `WaitGroupGuard` with Acquire\n    #[inline(always)]\n    pub fn get_left(&self) -> usize {\n        // minus my own ref\n        self.get_inner().count(Acquire) - 1\n    }\n\n    /// Add one ref count to the WaitGroup, return a guard to decrease the count on drop.\n    #[inline(always)]\n    pub fn add_guard(&self) -> WaitGroupGuard<T> {\n        self.get_inner().add(1);\n        WaitGroupGuard { inner: self.inner, threshold: self.threshold }\n    }\n\n    /// If the ref count is below threshold, return `Ok(())`, otherwise `Err(())`\n    #[inline]\n    pub fn try_wait(&self) -> Result<(), ()> {\n        // one ref owned by mysql\n        if self.get_inner().count(SeqCst) <= self.threshold {\n            Ok(())\n        } else {\n            Err(())\n        }\n    }\n\n    /// Block current coroutine until count drop below threshold.\n    ///\n    /// # Safety\n    ///\n    /// Only one thread is allow to wait\n    #[inline]\n    pub fn wait_async<'a>(&'a self) -> WaitGroupFuture<'a, T>\n    where\n        T: Send + Unpin,\n    {\n        let inner = self.get_inner();\n        WaitGroupFuture { inner, threshold: self.threshold, waker: None }\n    }\n\n    /// Block current coroutine until count drop below threshold, or until timeout happens\n    ///\n    /// # Safety\n    ///\n    /// Only one thread is allow to wait\n    #[cfg(feature = \"tokio\")]\n    #[cfg_attr(docsrs, doc(cfg(feature = \"tokio\")))]\n    #[inline]\n    pub fn wait_async_timeout<'a>(\n        &'a self, timeout: Duration,\n    ) -> WaitGroupTimeoutFuture<'a, T, tokio::time::Sleep, ()>\n    where\n        T: Send + Unpin,\n    {\n        let sleep = tokio::time::sleep(timeout);\n        self.wait_async_with_timer(sleep)\n    }\n\n    /// Block current coroutine until count drop below threshold, or until timeout happens\n    ///\n    /// # Safety\n    ///\n    /// Only one thread is allow to wait\n    #[cfg(feature = \"async_std\")]\n    #[cfg_attr(docsrs, doc(cfg(feature = \"async_std\")))]\n    #[inline]\n    pub fn wait_async_timeout<'a>(\n        &'a self, timeout: Duration,\n    ) -> WaitGroupTimeoutFuture<'a, T, impl Future<Output = ()>, ()>\n    where\n        T: Send + Unpin,\n    {\n        let sleep = async_std::task::sleep(timeout);\n        self.wait_async_with_timer(sleep)\n    }\n\n    /// Block current coroutine until count drop below threshold, with a custom sleep / or cancel function\n    ///\n    /// # Safety\n    ///\n    /// Only one thread is allow to wait\n    #[inline]\n    pub fn wait_async_with_timer<'a, FR, R>(\n        &'a self, fut: FR,\n    ) -> WaitGroupTimeoutFuture<'a, T, FR, R>\n    where\n        FR: Future<Output = R>,\n        T: Send + Unpin,\n    {\n        let inner = self.get_inner();\n        WaitGroupTimeoutFuture { inner, threshold: self.threshold, sleep: fut, waker: None }\n    }\n\n    /// Blocking current thread and Wait until count drop below threshold.\n    ///\n    /// # Safety\n    ///\n    /// Only one thread is allow to wait\n    #[inline]\n    pub fn wait(&self) {\n        let _ = self.get_inner().wait_blocking(None, self.threshold);\n    }\n\n    /// Blocking current thread and Wait until count drop below threshold, or until timeout\n    ///\n    /// # Safety\n    ///\n    /// Only one thread is allow to wait\n    #[inline]\n    pub fn wait_timeout(&self, timeout: Duration) -> Result<(), ()> {\n        self.get_inner().wait_blocking(Some(Instant::now() + timeout), self.threshold)\n    }\n}\n\nimpl<T> Drop for WaitGroup<T> {\n    #[inline]\n    fn drop(&mut self) {\n        unsafe {\n            WaitGroupInner::destroy(self.inner);\n        }\n    }\n}\n\nimpl<T> Deref for WaitGroup<T> {\n    type Target = T;\n    #[inline]\n    fn deref(&self) -> &T {\n        &unsafe { self.inner.as_ref() }.inner\n    }\n}\n\n/// An RAII implementation got represent ref count in WaitGroup.\n///\n/// When cloning WaitGroupGuard, which will increase the ref count in WaitGroup.\n///\n/// WaitGroupGuard dropping is wait-free, which decrease ref count with SeqCst CAS.\n/// will wake up the waiter once ref count decrease below threshold.\n///\n/// **NOTE**: Threshold is carried inside as non-atomic, not syned with the main thread for\n/// efficiency. But it's sufficient for most scenario.\n///\npub struct WaitGroupGuard<T> {\n    inner: NonNull<WaitGroupInner<T>>,\n    threshold: usize,\n}\n\nunsafe impl<T: Send> Send for WaitGroupGuard<T> {}\nunsafe impl<T: Sync> Sync for WaitGroupGuard<T> {}\n\nimpl<T> Drop for WaitGroupGuard<T> {\n    #[inline(always)]\n    fn drop(&mut self) {\n        unsafe {\n            WaitGroupInner::done_ptr(self.inner, 1, self.threshold);\n        }\n    }\n}\n\nimpl<T> Clone for WaitGroupGuard<T> {\n    #[inline]\n    fn clone(&self) -> Self {\n        let inner = unsafe { self.inner.as_ref() };\n        inner.add(1);\n        Self { inner: self.inner, threshold: self.threshold }\n    }\n}\n\nimpl<T> Deref for WaitGroupGuard<T> {\n    type Target = T;\n    #[inline]\n    fn deref(&self) -> &T {\n        &unsafe { self.inner.as_ref() }.inner\n    }\n}\n\nstruct WaitGroupInner<T> {\n    /// Refer to the doc of State\n    state: AtomicUsize,\n    o_waker: UnsafeCell<Option<ThinWaker>>,\n    inner: T,\n}\n\nunsafe impl<T: Sync> Sync for WaitGroupInner<T> {}\n\nimpl<T> WaitGroupInner<T> {\n    #[inline(always)]\n    fn new(inner: T, init_count: usize) -> Self {\n        Self { state: AtomicUsize::new(init_count), o_waker: UnsafeCell::new(None), inner }\n    }\n\n    #[inline]\n    fn count(&self, order: Ordering) -> usize {\n        self.state.load(order) & COUNT_MASK\n    }\n\n    #[inline(always)]\n    fn get_waker(&self) -> &mut Option<ThinWaker> {\n        unsafe { transmute(self.o_waker.get()) }\n    }\n\n    #[inline]\n    fn add(&self, count: usize) {\n        let old_state = self.state.fetch_add(count, Relaxed);\n        if State::new(old_state).count() >= COUNT_MASK - 2 {\n            panic!(\"WaitGroup count overflowed\");\n        }\n    }\n\n    #[inline]\n    unsafe fn destroy(p: NonNull<Self>) -> bool {\n        let this = unsafe { p.as_ref() };\n        let mut state = this.state.load(SeqCst);\n        loop {\n            let s = State::new(state);\n            if s.is_locked() || s.count() > 1 {\n                if let Err(_state) =\n                    this.state.compare_exchange_weak(state, state - 1, SeqCst, Acquire)\n                {\n                    state = _state;\n                    continue;\n                }\n                trace_log!(\"wg:({:?}) drop delay state={}\", tokio_task_id!(), state - 1);\n                return false;\n            }\n            {\n                trace_log!(\"wg:({:?}) drop\", tokio_task_id!());\n                let _ = unsafe { Box::from_raw(p.as_ptr()) };\n                return true;\n            }\n        }\n    }\n\n    #[inline(always)]\n    unsafe fn done_ptr(p: NonNull<Self>, count: usize, threshold: usize) -> bool {\n        let _p = p.as_ptr();\n        if Self::done::<true>(_p, count, threshold) {\n            let _ = unsafe { Box::from_raw(_p) };\n            return true;\n        } else {\n            false\n        }\n    }\n\n    /// return true to allow drop\n    #[inline]\n    fn done<const OWNER_SHIP: bool>(this: *const Self, count: usize, threshold: usize) -> bool {\n        trace_log!(\"wg:({:?}) enter done {count} {threshold}\", tokio_task_id!());\n        unsafe {\n            let mut state = (*this).state.load(Relaxed);\n            loop {\n                let mut s = State::new(state);\n                if OWNER_SHIP && s.is_last(count) {\n                    // in case non SeqCst read old value, double check with SeqCst\n                    let _state = (*this).state.load(SeqCst);\n                    if _state == state {\n                        trace_log!(\"wg:({:?}) done drop {count} {threshold}\", tokio_task_id!());\n                        return true;\n                    }\n                    state = _state;\n                    continue;\n                }\n                // NOTE: When flag == WAKER_FLAG_LOCK, means one other thread is reading the waker,\n                // we just try to decrease the count, but we should not drop it even ref reach 0\n                let try_lock = s.try_done(count, threshold);\n                if try_lock {\n                    debug_assert!(s.is_locked());\n                }\n                match (*this).state.compare_exchange_weak(state, s.to_usize(), SeqCst, Acquire) {\n                    Ok(_) => {\n                        if try_lock {\n                            let o_waker = (*this).get_waker().take();\n                            // Probably the last chance to check state, should use SeqCst to unlock.\n                            // ref count may reach 0, means I'm the last one.\n                            if OWNER_SHIP {\n                                let old = (*this).state.fetch_and(!WAKER_FLAG_MASK, SeqCst);\n                                if old & COUNT_MASK == 0 {\n                                    trace_log!(\n                                        \"wg:({:?}) done locked drop cur {count} = 0\",\n                                        tokio_task_id!(),\n                                    );\n                                    // Safety: we had the lock, won't be others change the waker,\n                                    // we are the last one, don't need to actually wake, just destroy.\n                                    return true;\n                                }\n                            } else {\n                                (*this).state.fetch_and(!WAKER_FLAG_MASK, Release);\n                            }\n                            if let Some(waker) = o_waker {\n                                trace_log!(\n                                    \"wg:({:?}) done waked {count} -> {} <= {threshold}\",\n                                    tokio_task_id!(),\n                                    s.count()\n                                );\n                                waker.wake();\n                            }\n                        } else {\n                            trace_log!(\"wg:({:?}) done {count} -> {}\", tokio_task_id!(), s.count());\n                        }\n                        return false;\n                    }\n                    Err(cur) => {\n                        state = cur;\n                    }\n                }\n            }\n        }\n    }\n\n    /// may_skip = true, for blocking context does not need to overwrite waker\n    #[inline]\n    fn try_set_waker(&self, waker: ThinWaker, threshold: usize, may_skip: bool) -> Result<(), ()> {\n        let mut state = self.state.load(SeqCst);\n        loop {\n            let s = State::new(state);\n            if s.count() <= threshold {\n                // Safety: because of this, use SeqCst to prevent reading old value\n                return Err(());\n            } else if s.is_locked() {\n                // done() is waking\n                std::hint::spin_loop();\n                state = self.state.load(Acquire);\n                trace_log!(\"wg:({:?}) set_waker try again\", tokio_task_id!());\n                continue;\n            }\n            let old_state = if s.has_waker() {\n                if may_skip {\n                    trace_log!(\"wg:({:?}) set_waker skip\", tokio_task_id!());\n                    return Ok(());\n                }\n                // waker exist, first try lock, then replace\n                if let Err(s) =\n                    self.state.compare_exchange_weak(state, s.try_lock(), SeqCst, Acquire)\n                {\n                    state = s;\n                    continue;\n                }\n                self.get_waker().replace(waker);\n                trace_log!(\"wg:({:?}) set_waker replaced\", tokio_task_id!());\n                // clear WAKER_FLAG_LOCK and set WAKER_FLAG_SET\n                self.state.fetch_xor(WAKER_FLAG_MASK, SeqCst)\n            } else {\n                self.get_waker().replace(waker);\n                trace_log!(\"wg:({:?}) set_waker ok\", tokio_task_id!());\n                self.state.fetch_or(WAKER_FLAG_SET, SeqCst)\n            };\n            if State::new(old_state).count() <= threshold {\n                return Err(());\n            }\n            return Ok(());\n        }\n    }\n\n    #[inline]\n    fn wait_blocking(&self, deadline: Option<Instant>, threshold: usize) -> Result<(), ()> {\n        macro_rules! check {\n            ($order: expr) => {\n                let cur = self.count($order);\n                if cur <= threshold {\n                    trace_log!(\"wg:({:?}) check {cur} <= {threshold}\", tokio_task_id!());\n                    return Ok(());\n                }\n                trace_log!(\"wg:({:?}) check {cur} > {threshold}\", tokio_task_id!());\n            };\n        }\n        check!(Acquire);\n        let mut backoff = Backoff::new();\n        let mut set_waker = false;\n        loop {\n            let r = backoff.snooze();\n            check!(Acquire);\n            if r {\n                let waker = ThinWaker::Blocking(thread::current());\n                if self.try_set_waker(waker, threshold, set_waker).is_err() {\n                    return Ok(());\n                } else {\n                    set_waker = true;\n                }\n                match check_timeout(deadline) {\n                    Ok(None) => thread::park(),\n                    Ok(Some(dur)) => thread::park_timeout(dur),\n                    Err(_) => {\n                        return Err(());\n                    }\n                }\n                backoff.reset();\n            }\n        }\n    }\n\n    #[inline]\n    fn poll_async(\n        &self, ctx: &mut Context, o_waker: &mut Option<Waker>, threshold: usize,\n    ) -> Poll<()> {\n        macro_rules! check {\n            ($order: expr) => {{\n                let s = State::new(self.state.load($order));\n                let cur = s.count();\n                if cur <= threshold {\n                    trace_log!(\"wg:({:?}) READY check {cur} <= {threshold}\", tokio_task_id!());\n                    return Poll::Ready(());\n                }\n                trace_log!(\"wg:({:?}) check {cur} > {threshold}\", tokio_task_id!());\n                s.has_waker()\n            }};\n        }\n        let has_waker = check!(Acquire);\n        let new_waker = ctx.waker();\n        if has_waker {\n            #[allow(clippy::needless_else)]\n            if let Some(old_waker) = o_waker {\n                if old_waker.will_wake(new_waker) {\n                    trace_log!(\"wg:({:?}) will_wake=true\", tokio_task_id!());\n                    check!(SeqCst);\n                    trace_log!(\"wg:({:?}) PENDING\", tokio_task_id!());\n                    return Poll::Pending;\n                } else {\n                    trace_log!(\"wg:({:?}) waker will_wake=false\", tokio_task_id!())\n                }\n            }\n        }\n        if self.try_set_waker(ThinWaker::Async(new_waker.clone()), threshold, false).is_err() {\n            trace_log!(\"wg:({:?}) READY during set_waker\", tokio_task_id!());\n            Poll::Ready(())\n        } else {\n            o_waker.replace(new_waker.clone());\n            trace_log!(\"wg:({:?}) PENDING\", tokio_task_id!());\n            Poll::Pending\n        }\n    }\n}\n\n#[must_use]\npub struct WaitGroupFuture<'a, T> {\n    inner: &'a WaitGroupInner<T>,\n    threshold: usize,\n    waker: Option<Waker>,\n}\n\nimpl<'a, T> Future for WaitGroupFuture<'a, T>\nwhere\n    T: Send + Unpin,\n{\n    type Output = ();\n\n    fn poll(self: Pin<&mut Self>, ctx: &mut Context) -> Poll<Self::Output> {\n        let this = unsafe { self.get_unchecked_mut() };\n        this.inner.poll_async(ctx, &mut this.waker, this.threshold)\n    }\n}\n\n/// Wait until the ref count is below threshold, return `Ok(())`.\n/// If timeout happens returns `Err(())`\n#[must_use]\npub struct WaitGroupTimeoutFuture<'a, T, FR, R>\nwhere\n    FR: Future<Output = R>,\n    T: Send + Unpin,\n{\n    inner: &'a WaitGroupInner<T>,\n    sleep: FR,\n    threshold: usize,\n    waker: Option<Waker>,\n}\n\nimpl<'a, T, FR, R> Future for WaitGroupTimeoutFuture<'a, T, FR, R>\nwhere\n    FR: Future<Output = R>,\n    T: Send + Unpin,\n{\n    type Output = Result<(), ()>;\n\n    fn poll(self: Pin<&mut Self>, ctx: &mut Context) -> Poll<Self::Output> {\n        let this = unsafe { self.get_unchecked_mut() };\n        if this.inner.poll_async(ctx, &mut this.waker, this.threshold).is_ready() {\n            return Poll::Ready(Ok(()));\n        }\n        let sleep = unsafe { Pin::new_unchecked(&mut this.sleep) };\n        if sleep.poll(ctx).is_ready() {\n            Poll::Ready(Err(()))\n        } else {\n            Poll::Pending\n        }\n    }\n}\n\nconst WAKER_FLAG_SET: usize = 1 << (usize::BITS - 1);\nconst WAKER_FLAG_LOCK: usize = 1 << (usize::BITS - 2);\nconst WAKER_FLAG_MASK: usize = WAKER_FLAG_SET | WAKER_FLAG_LOCK;\nconst COUNT_MASK: usize = !WAKER_FLAG_MASK;\n\n/// The 2 highest bit is WAKER_FLAG_SET | WAKER_FLAG_LOCK, they are exclusive, so there're 3\n/// states:\n/// - 0: waker is not set\n/// - WAKER_FLAG_SET: there's a waker, some one might be waiting, it's possible to give up waiting\n///   when threshold is reached\n/// - WAKER_FLAG_LOCK: there's one thread is reading the waker, when he is done, should reset the\n///   state to 0.\n///\n/// ref count:\n/// - the lower bits is for ref count. When initial to be 1.\n/// - The WaitGroup can be drop early, leaving the WaitGroupGuard holders to drop the count.\n/// - when the last holder drop the count to 0, is responsible to free the memory, with the following exception:\n/// - NOTE that When WAKER_FLAG_LOCK is set, not allow to free the memory even count reach\n///   0, the last one release the lock is responsible to free the memory\nstruct State(usize);\n\nimpl State {\n    #[inline(always)]\n    fn new(state: usize) -> Self {\n        Self(state)\n    }\n\n    #[inline(always)]\n    fn count(&self) -> usize {\n        self.0 & COUNT_MASK\n    }\n\n    #[inline(always)]\n    fn waker_flag(&self) -> usize {\n        self.0 & WAKER_FLAG_MASK\n    }\n\n    #[inline(always)]\n    fn is_locked(&self) -> bool {\n        self.0 & WAKER_FLAG_LOCK > 0\n    }\n\n    #[inline(always)]\n    fn has_waker(&self) -> bool {\n        self.0 & WAKER_FLAG_SET > 0\n    }\n\n    #[inline(always)]\n    fn try_lock(&self) -> usize {\n        self.count() | WAKER_FLAG_LOCK\n    }\n\n    /// When no one lock and I'm the last one, can drop directly, return true\n    #[inline]\n    fn is_last(&self, delta: usize) -> bool {\n        let waker_flag = self.waker_flag();\n        waker_flag != WAKER_FLAG_LOCK && self.count() == delta\n    }\n\n    /// # Return value:\n    /// - should_lock==true: when reach threshold, should dec count and try_lock.\n    /// - should_lock==false: just decrease count.\n    #[inline(always)]\n    fn try_done(&mut self, delta: usize, threshold: usize) -> bool {\n        let waker_flag = self.waker_flag();\n        let old_count = self.count();\n        let new_count = if old_count >= delta {\n            old_count - delta\n        } else {\n            panic!(\"underflow detected {} < {}\", old_count, delta);\n        };\n        let try_lock = new_count <= threshold && waker_flag == WAKER_FLAG_SET;\n        if try_lock {\n            self.0 = WAKER_FLAG_LOCK | new_count;\n            true\n        } else {\n            self.0 = waker_flag | new_count;\n            false\n        }\n    }\n\n    #[inline(always)]\n    #[allow(clippy::wrong_self_convention)]\n    fn to_usize(&self) -> usize {\n        self.0\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use captains_log::{recipe, ConsoleTarget, Level};\n    use std::thread;\n\n    #[test]\n    fn test_waitgroup_inner_count() {\n        let wg = WaitGroup::new((), 0);\n        assert_eq!(wg.get_left_seqcst(), 0);\n        let guard1 = wg.add_guard();\n        assert_eq!(wg.get_left_seqcst(), 1);\n        let guard2 = wg.add_guard();\n        assert_eq!(wg.get_left_seqcst(), 2);\n        drop(guard1);\n        assert_eq!(wg.get_left_seqcst(), 1);\n        drop(guard2);\n        assert_eq!(wg.get_left_seqcst(), 0);\n    }\n\n    #[test]\n    fn test_waitgroup_state() {\n        assert_eq!(State::new(2).count(), 2);\n        assert!(State::new(2 | WAKER_FLAG_SET).has_waker());\n        assert!(!State::new(2 | WAKER_FLAG_SET).is_locked());\n        assert!(!State::new(2 | WAKER_FLAG_LOCK).has_waker());\n        assert!(State::new(2 | WAKER_FLAG_LOCK).is_locked());\n        let mut s = State::new(2);\n        // no waker\n        assert_eq!(s.try_done(1, 1), false);\n        assert!(!s.is_locked());\n        assert_eq!(s.count(), 1);\n        // threshold is ignore, just drop\n        assert!(s.is_last(1));\n        // state don't need to change\n        assert_eq!(s.count(), 1);\n\n        // WAKER_FLAG_SET ( 3-1 <=2 )-> WAKER_FLAG_LOCK\n        let mut s = State::new(3 | WAKER_FLAG_SET);\n        assert!(!s.is_last(1));\n        assert_eq!(s.try_done(1, 2), true);\n        assert!(s.is_locked());\n        assert!(!s.has_waker());\n        assert_eq!(s.count(), 2);\n\n        // WAKER_FLAG_LOCK -> dec\n        assert_eq!(s.try_done(1, 0), false);\n        assert!(s.is_locked());\n        assert_eq!(s.count(), 1);\n\n        // WAKER_FLAG_LOCK -> no waker\n        let _s = s.0 & (!WAKER_FLAG_MASK);\n        assert_eq!(_s, 1);\n\n        // WAKER_FLAG_LOCK exist, don't drop, just dec\n        assert_eq!(s.try_done(1, 0), false);\n        assert_eq!(s.count(), 0);\n    }\n\n    #[test]\n    fn test_waitgroup_ptr() {\n        recipe::console_logger(ConsoleTarget::Stdout, Level::Trace).test().build().expect(\"log\");\n        let inner = Box::new(WaitGroupInner::new((), 1));\n        assert_eq!(inner.count(SeqCst), 1);\n        assert_eq!(State::new(inner.state.load(Ordering::SeqCst)).waker_flag(), 0);\n\n        println!(\"test try_set_waker met threshold reach\");\n        assert_eq!(inner.try_set_waker(ThinWaker::Blocking(thread::current()), 1, false), Err(()));\n\n        inner.add(1);\n        assert_eq!(inner.count(SeqCst), 2);\n        println!(\"test try_set_waker ok\");\n        assert!(inner.try_set_waker(ThinWaker::Blocking(thread::current()), 1, false).is_ok());\n        let s = State::new(inner.state.load(Ordering::SeqCst));\n        assert_eq!(s.waker_flag(), WAKER_FLAG_SET, \"s {}, {}\", s.is_locked(), s.has_waker());\n\n        println!(\"test try_set_waker again skip\");\n        assert!(inner.try_set_waker(ThinWaker::Blocking(thread::current()), 1, true).is_ok());\n        let s = State::new(inner.state.load(Ordering::SeqCst));\n        assert_eq!(s.waker_flag(), WAKER_FLAG_SET);\n\n        println!(\"test try_set_waker again force\");\n        assert!(inner.try_set_waker(ThinWaker::Blocking(thread::current()), 1, false).is_ok());\n        let s = State::new(inner.state.load(Ordering::SeqCst));\n        assert_eq!(s.waker_flag(), WAKER_FLAG_SET);\n        assert_eq!(inner.count(SeqCst), 2);\n\n        let p = unsafe { NonNull::new_unchecked(Box::into_raw(inner)) };\n        println!(\"test done triggering wakeup\");\n        unsafe {\n            assert!(!WaitGroupInner::done_ptr(p, 1, 1));\n            {\n                let inner = p.as_ref();\n                assert_eq!(inner.count(SeqCst), 1);\n                let s = State::new(inner.state.load(Ordering::SeqCst));\n                assert_eq!(s.waker_flag(), 0);\n            }\n            println!(\"test done triggering drop\");\n            assert!(WaitGroupInner::done_ptr(p, 1, 0));\n        }\n    }\n\n    #[test]\n    fn test_waitgroup_inner() {\n        recipe::console_logger(ConsoleTarget::Stdout, Level::Trace).test().build().expect(\"log\");\n        let inner = WaitGroupInner::new((), 1);\n        assert_eq!(inner.count(SeqCst), 1);\n        assert_eq!(State::new(inner.state.load(Ordering::SeqCst)).waker_flag(), 0);\n\n        println!(\"test try_set_waker met threshold reach\");\n        assert_eq!(inner.try_set_waker(ThinWaker::Blocking(thread::current()), 1, false), Err(()));\n\n        inner.add(1);\n        assert_eq!(inner.count(SeqCst), 2);\n        println!(\"test try_set_waker ok\");\n        assert!(inner.try_set_waker(ThinWaker::Blocking(thread::current()), 1, false).is_ok());\n        let s = State::new(inner.state.load(Ordering::SeqCst));\n        assert_eq!(s.waker_flag(), WAKER_FLAG_SET, \"s {}, {}\", s.is_locked(), s.has_waker());\n\n        println!(\"test try_set_waker again skip\");\n        assert!(inner.try_set_waker(ThinWaker::Blocking(thread::current()), 1, true).is_ok());\n        let s = State::new(inner.state.load(Ordering::SeqCst));\n        assert_eq!(s.waker_flag(), WAKER_FLAG_SET);\n\n        println!(\"test try_set_waker again force\");\n        assert!(inner.try_set_waker(ThinWaker::Blocking(thread::current()), 1, false).is_ok());\n        let s = State::new(inner.state.load(Ordering::SeqCst));\n        assert_eq!(s.waker_flag(), WAKER_FLAG_SET);\n        assert_eq!(inner.count(SeqCst), 2);\n\n        let p = &inner as *const WaitGroupInner<()>;\n\n        println!(\"test done triggering wakeup\");\n        assert!(!WaitGroupInner::<()>::done::<false>(p, 1, 1));\n        {\n            assert_eq!(inner.count(SeqCst), 1);\n            let s = State::new(inner.state.load(Ordering::SeqCst));\n            assert_eq!(s.waker_flag(), 0);\n        }\n        println!(\"test done last\");\n        WaitGroupInner::<()>::done::<false>(p, 1, 0);\n        assert_eq!(inner.count(Ordering::SeqCst), 0)\n    }\n}\n"
  },
  {
    "path": "src/waker.rs",
    "content": "use crate::collections::ArcCell;\nuse crate::flavor::FlavorImpl;\nuse std::cell::UnsafeCell;\nuse std::fmt;\nuse std::ops::Deref;\nuse std::sync::{\n    atomic::{AtomicU32, AtomicU8, Ordering},\n    Arc, Weak,\n};\nuse std::task::*;\nuse std::thread;\n\n#[derive(Debug, Clone, Copy, PartialEq)]\n#[repr(u8)]\npub enum WakerState {\n    Init = 0, // A temporary state, https://github.com/frostyplanet/crossfire-rs/issues/22\n    Waiting = 1,\n    //Copy = 2, // Omit due to skipping direct copy on async or with deadline\n    Woken = 3,\n    Closed = 4, // Channel closed, or timeout cancellation\n    Done = 5,\n}\n\n#[derive(PartialEq, Debug, Clone, Copy)]\n#[repr(u8)]\npub enum WakeResult {\n    Woken = 0x1, // Woken, stop iteration\n    Sent = 0x3,  // Woken with message direct copied\n    Next = 0x2,  // Woken, but have to continued for more iteration\n    Skip = 0x4,  // Waker Cancelled or Done\n}\n\nimpl WakeResult {\n    #[inline(always)]\n    pub fn is_done(&self) -> bool {\n        (*self as u8) & (WakeResult::Woken as u8) > 0\n    }\n}\n\n/// Although removing direct copy feature of the payload pointer is not used,\n/// leave it to unbuffer channel in the future\npub struct ArcWaker<P>(Arc<WakerInner<P>>);\n\nimpl<P> fmt::Debug for ArcWaker<P> {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        self.0.fmt(f)\n    }\n}\n\nimpl<P> fmt::Debug for WakerInner<P> {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        write!(f, \"waker({})\", self.get_seq())\n    }\n}\n\nimpl<P> Deref for ArcWaker<P> {\n    type Target = WakerInner<P>;\n    #[inline]\n    fn deref(&self) -> &Self::Target {\n        self.0.as_ref()\n    }\n}\n\nimpl<P> ArcWaker<P> {\n    #[inline(always)]\n    pub fn new_async(ctx: &Context, payload: P) -> Self {\n        Self(Arc::new(WakerInner {\n            seq: AtomicU32::new(0),\n            state: AtomicU8::new(WakerState::Init as u8),\n            waker: UnsafeCell::new(ThinWaker::Async(ctx.waker().clone())),\n            payload: UnsafeCell::new(payload),\n        }))\n    }\n\n    #[inline(always)]\n    pub fn new_blocking(payload: P) -> Self {\n        Self(Arc::new(WakerInner {\n            seq: AtomicU32::new(0),\n            state: AtomicU8::new(WakerState::Init as u8),\n            waker: UnsafeCell::new(ThinWaker::Blocking(thread::current())),\n            payload: UnsafeCell::new(payload),\n        }))\n    }\n}\n\nimpl<P> ArcWaker<P> {\n    #[inline(always)]\n    pub fn from_arc(inner: Arc<WakerInner<P>>) -> Self {\n        Self(inner)\n    }\n\n    #[allow(clippy::wrong_self_convention)]\n    #[inline(always)]\n    pub fn to_arc(self) -> Arc<WakerInner<P>> {\n        self.0\n    }\n\n    #[inline(always)]\n    pub fn weak(&self) -> Weak<WakerInner<P>> {\n        Arc::downgrade(&self.0)\n    }\n}\n\n#[derive(Debug)]\npub(crate) enum ThinWaker {\n    Async(Waker),\n    Blocking(thread::Thread),\n}\n\nimpl ThinWaker {\n    #[inline(always)]\n    pub fn wake_by_ref(&self) {\n        match self {\n            Self::Async(w) => w.wake_by_ref(),\n            Self::Blocking(th) => th.unpark(),\n        }\n    }\n\n    #[allow(dead_code)]\n    #[inline(always)]\n    pub fn wake(self) {\n        match self {\n            Self::Async(w) => w.wake(),\n            Self::Blocking(th) => th.unpark(),\n        }\n    }\n\n    #[inline(always)]\n    pub fn will_wake(&self, ctx: &mut Context) -> bool {\n        // ref: https://github.com/frostyplanet/crossfire-rs/issues/14\n        // https://docs.rs/tokio/latest/tokio/runtime/index.html#:~:text=Normally%2C%20tasks%20are%20scheduled%20only,is%20called%20a%20spurious%20wakeup\n        // There might be situation like spurious wakeup, poll() again under no waking up ever\n        // happened, waker still exists in registry but cannot be used to wake the current future.\n        if let Self::Async(_waker) = self {\n            _waker.will_wake(ctx.waker())\n        } else {\n            unreachable!();\n        }\n    }\n}\n\npub struct WakerInner<P> {\n    state: AtomicU8,\n    seq: AtomicU32,\n    waker: UnsafeCell<ThinWaker>,\n    #[allow(dead_code)]\n    payload: UnsafeCell<P>,\n}\n\nunsafe impl<P> Send for WakerInner<P> {}\nunsafe impl<P> Sync for WakerInner<P> {}\n\nimpl<P> WakerInner<P> {\n    #[inline(always)]\n    fn get_waker(&self) -> &ThinWaker {\n        unsafe { &*self.waker.get() }\n    }\n\n    #[inline(always)]\n    fn get_waker_mut(&self) -> &mut ThinWaker {\n        unsafe { &mut *self.waker.get() }\n    }\n\n    #[inline(always)]\n    fn get_payload_mut(&self) -> &mut P {\n        unsafe { &mut *self.payload.get() }\n    }\n\n    #[inline(always)]\n    pub fn reset(&self, payload: P) {\n        // From the object pool to reset value,\n        // we should use SeqCst fence to clear the cache of other cores\n        *self.get_payload_mut() = payload;\n        self.reset_init();\n    }\n\n    #[inline(always)]\n    pub fn get_seq(&self) -> u32 {\n        self.seq.load(Ordering::Relaxed)\n    }\n\n    #[inline(always)]\n    pub fn set_seq(&self, seq: u32) {\n        self.seq.store(seq, Ordering::Relaxed);\n    }\n\n    #[inline(always)]\n    fn update_thread_handle(&self) {\n        let _waker = self.get_waker_mut();\n        *_waker = ThinWaker::Blocking(thread::current());\n    }\n\n    #[inline(always)]\n    pub fn commit_waiting(&self) -> u8 {\n        if let Err(s) = self.try_change_state(WakerState::Init, WakerState::Waiting) {\n            s\n        } else {\n            WakerState::Waiting as u8\n        }\n    }\n\n    #[inline(always)]\n    pub fn try_change_state(&self, cur: WakerState, new_state: WakerState) -> Result<(), u8> {\n        self.state.compare_exchange(\n            cur as u8,\n            new_state as u8,\n            Ordering::SeqCst,\n            Ordering::Acquire,\n        )?;\n        Ok(())\n    }\n\n    #[inline(always)]\n    pub fn reset_init(&self) {\n        // this is before we put into registry (which will extablish happen-before relationship),\n        // it safe to use Relaxed\n        self.state.store(WakerState::Init as u8, Ordering::Relaxed);\n    }\n\n    /// Return current status,\n    /// Closed: might be channel closed, or future successfully cancelled, the future should drop message; try to clear its waker.\n    /// Done: the message actually sent, nothing to DO\n    /// Woken: the future should drop message, and wake another counterpart.\n    #[inline(always)]\n    pub fn abandon(&self) -> Result<(), u8> {\n        // it will content with close(), on_recv(), on_send()\n        match self.change_state_smaller_eq(WakerState::Waiting, WakerState::Closed) {\n            Ok(_) => Ok(()),\n            Err(state) => Err(state),\n        }\n        // NOTE: there's no Copy state, so we do not loop\n    }\n\n    #[inline(always)]\n    pub fn close_wake(&self) -> bool {\n        // should have lock because it will content with abandon()\n        if self.change_state_smaller_eq(WakerState::Waiting, WakerState::Closed).is_ok() {\n            self.get_waker().wake_by_ref();\n            return true;\n        }\n        false\n    }\n\n    // Return Ok(pre_state), otherwise return Err(current_state)\n    #[inline(always)]\n    pub fn change_state_smaller_eq(\n        &self, condition: WakerState, target: WakerState,\n    ) -> Result<u8, u8> {\n        debug_assert!((condition as u8) < (target as u8));\n        // Save one load()\n        let mut state = condition as u8;\n        loop {\n            match self.state.compare_exchange_weak(\n                state,\n                target as u8,\n                Ordering::SeqCst,\n                Ordering::Acquire,\n            ) {\n                Ok(_) => {\n                    return Ok(state);\n                }\n                Err(s) => {\n                    if s > condition as u8 {\n                        return Err(s);\n                    }\n                    state = s;\n                }\n            }\n        }\n    }\n\n    #[inline(always)]\n    pub fn _get_state(&self, order: Ordering) -> u8 {\n        self.state.load(order)\n    }\n\n    #[inline(always)]\n    pub fn get_state(&self) -> u8 {\n        self.state.load(Ordering::SeqCst)\n    }\n\n    #[inline(always)]\n    pub fn get_state_relaxed(&self) -> u8 {\n        self.state.load(Ordering::Relaxed)\n    }\n\n    /// Assume no lock\n    #[inline(always)]\n    pub fn wake(&self) -> WakeResult {\n        // This is after we get waker from waker_registry, which already happen before relationship.\n        // both >= WakerState::Waiting is certain\n        let mut state = self.get_state_relaxed();\n        loop {\n            if state >= WakerState::Woken as u8 {\n                return WakeResult::Skip;\n            } else if state == WakerState::Waiting as u8 {\n                self.state.store(WakerState::Woken as u8, Ordering::SeqCst);\n                self.get_waker().wake_by_ref();\n                return WakeResult::Woken;\n            } else {\n                match self.state.compare_exchange_weak(\n                    WakerState::Init as u8,\n                    WakerState::Woken as u8,\n                    Ordering::SeqCst,\n                    Ordering::Acquire,\n                ) {\n                    Ok(_) => {\n                        self.get_waker().wake_by_ref();\n                        return WakeResult::Next;\n                    }\n                    Err(s) => {\n                        state = s;\n                    }\n                }\n            }\n        }\n    }\n\n    #[inline(always)]\n    pub fn will_wake(&self, ctx: &mut Context) -> bool {\n        self.get_waker().will_wake(ctx)\n    }\n}\n\nimpl<T> WakerInner<*const T> {\n    #[inline(always)]\n    fn get_payload(&self) -> *const T {\n        *self.get_payload_mut()\n    }\n\n    #[inline(always)]\n    pub fn wake_or_copy<F: FlavorImpl<Item = T>>(&self, flavor: &F) -> WakeResult {\n        // This is after we get waker from waker_registry, which already happen before relationship.\n        // both >= WakerState::Waiting is certain\n        let mut state = self.get_state_relaxed();\n        loop {\n            if state >= WakerState::Woken as u8 {\n                return WakeResult::Skip;\n            } else if state == WakerState::Waiting as u8 {\n                let p = self.get_payload();\n                if p.is_null() {\n                    self.state.store(WakerState::Woken as u8, Ordering::SeqCst);\n                    self.get_waker().wake_by_ref();\n                    return WakeResult::Woken;\n                }\n                state = if let Some(true) = flavor.try_send_oneshot(p) {\n                    WakerState::Done as u8\n                } else {\n                    WakerState::Woken as u8\n                };\n                self.state.store(state, Ordering::SeqCst);\n                self.get_waker().wake_by_ref();\n                if state == WakerState::Done as u8 {\n                    return WakeResult::Sent;\n                } else {\n                    return WakeResult::Woken;\n                }\n            } else {\n                match self.state.compare_exchange_weak(\n                    WakerState::Init as u8,\n                    WakerState::Woken as u8,\n                    Ordering::SeqCst,\n                    Ordering::Acquire,\n                ) {\n                    Ok(_) => {\n                        self.get_waker().wake_by_ref();\n                        return WakeResult::Next;\n                    }\n                    Err(s) => {\n                        state = s;\n                    }\n                }\n            }\n        }\n    }\n}\n\npub struct WakerCache<P: Copy>(ArcCell<WakerInner<P>>);\n\nimpl<P: Copy> WakerCache<P> {\n    #[inline(always)]\n    pub(crate) fn new() -> Self {\n        Self(ArcCell::new())\n    }\n\n    #[inline(always)]\n    pub fn new_blocking(&self, payload: P) -> ArcWaker<P> {\n        if let Some(inner) = self.0.pop() {\n            inner.update_thread_handle();\n            inner.reset(payload);\n            return ArcWaker::<P>::from_arc(inner);\n        }\n        ArcWaker::new_blocking(payload)\n    }\n\n    #[inline(always)]\n    pub(crate) fn push(&self, waker: ArcWaker<P>) {\n        debug_assert!(waker.get_state() >= WakerState::Woken as u8);\n        let a = waker.to_arc();\n        if Arc::weak_count(&a) == 0 && Arc::strong_count(&a) == 1 {\n            self.0.try_put(a);\n        }\n    }\n\n    #[allow(dead_code)]\n    #[inline(always)]\n    pub(crate) fn is_empty(&self) -> bool {\n        !self.0.exists()\n    }\n}\n\n#[cfg(test)]\nmod tests {\n\n    use super::*;\n\n    #[test]\n    fn test_waker_size() {\n        use std::mem::size_of;\n        println!(\"wakertype {}\", size_of::<ThinWaker>());\n        println!(\"waker inner {}\", size_of::<WakerInner<()>>());\n    }\n}\n"
  },
  {
    "path": "src/waker_registry.rs",
    "content": "#[allow(unused_imports)]\nuse crate::collections::WeakCell;\n#[allow(unused_imports)]\nuse crate::flavor::{Flavor, FlavorImpl};\n#[cfg(feature = \"trace_log\")]\nuse crate::tokio_task_id;\nuse crate::trace_log;\nuse crate::waker::*;\nuse parking_lot::Mutex;\nuse std::cell::UnsafeCell;\nuse std::collections::VecDeque;\nuse std::fmt::Debug;\nuse std::sync::{\n    atomic::{compiler_fence, AtomicU8, AtomicUsize, Ordering},\n    Arc, Weak,\n};\nuse std::task::{Context, Poll};\n\n// pub(crate) on type alias does not matter, mpmc::List alias works because RegistryMulti is pub\npub(crate) type RegistryMultiSend<T> = RegistryMulti<*const T>;\npub(crate) type RegistryMultiRecv = RegistryMulti<()>;\n\npub(crate) trait Registry: Send + Sync + 'static {\n    type Waker: Send + Unpin + 'static + Debug;\n\n    fn get_waker_state(&self, o_waker: &Option<Self::Waker>, order: Ordering) -> u8;\n\n    #[inline(always)]\n    fn clear_wakers(&self, _waker: &Self::Waker) {}\n\n    fn close(&self);\n\n    #[inline(always)]\n    fn len(&self) -> usize {\n        0\n    }\n\n    #[inline(always)]\n    fn commit_waiting(&self, _o_waker: &Option<Self::Waker>) -> u8 {\n        WakerState::Init as u8\n    }\n\n    #[inline(always)]\n    fn cancel_waker(&self, o_waker: &mut Option<Self::Waker>) {\n        let _ = o_waker.take();\n    }\n\n    #[inline(always)]\n    fn abandon_waker(&self, _waker: &Self::Waker) -> Result<(), u8> {\n        Ok(())\n    }\n}\n\npub(crate) trait RegistrySend<T>: Registry {\n    fn new() -> Self;\n\n    #[inline(always)]\n    fn use_direct_copy(&self) -> bool {\n        false\n    }\n\n    #[inline(always)]\n    fn reg_waker_blocking(\n        &self, _o_waker: &mut Option<<Self as Registry>::Waker>, _cache: &WakerCache<*const T>,\n        _payload: *const T,\n    ) {\n        unreachable!();\n    }\n\n    #[inline(always)]\n    fn reg_waker_async(\n        &self, _ctx: &mut Context, _o_waker: &mut Option<<Self as Registry>::Waker>,\n    ) -> Option<Poll<()>> {\n        unreachable!();\n    }\n\n    /// remove outdated waker, make sure it does not accumulate.\n    ///\n    /// It's ok to set state with Relaxed here, two scenario:\n    /// * set Done while the state is Init, does not matter other thread see it or not.\n    /// * other thread might have wake it in the process, but we are dropping it anyway, and then\n    ///   reg_waker with a new one.\n    #[inline(always)]\n    fn cancel_reuse_waker(\n        &self, o_waker: &mut Option<<Self as Registry>::Waker>, state: WakerState,\n    ) -> u8 {\n        let _ = o_waker.take();\n        state as u8\n    }\n\n    #[inline(always)]\n    fn fire<F>(&self, _flavor: &F) -> WakeResult\n    where\n        F: FlavorImpl<Item = T>,\n    {\n        WakeResult::Next\n    }\n\n    #[inline(always)]\n    fn cache_waker(\n        &self, _o_waker: Option<<Self as Registry>::Waker>, _cache: &WakerCache<*const T>,\n    ) {\n    }\n}\n\npub(crate) trait RegistryRecv: Registry {\n    fn new() -> Self;\n\n    #[inline(always)]\n    fn fire(&self) {}\n\n    #[inline(always)]\n    fn reg_waker_blocking(\n        &self, _o_waker: &mut Option<<Self as Registry>::Waker>, _cache: &WakerCache<()>,\n    ) {\n        unreachable!();\n    }\n\n    #[inline(always)]\n    fn reg_waker_async(\n        &self, _ctx: &mut Context, _o_waker: &mut Option<<Self as Registry>::Waker>,\n    ) -> Option<Poll<()>> {\n        unreachable!();\n    }\n\n    #[inline(always)]\n    fn cache_waker(&self, _o_waker: Option<<Self as Registry>::Waker>, _cache: &WakerCache<()>) {}\n\n    fn reg_select_waker(&self, channel_id: usize, waker: &Arc<SelectWaker>) -> bool;\n\n    #[inline(always)]\n    fn cancel_select_waker(&self, _waker: &Arc<SelectWaker>) {}\n}\n\n#[derive(Debug)]\npub struct RegistryDummy();\n\nimpl Registry for RegistryDummy {\n    type Waker = ();\n\n    #[inline(always)]\n    fn get_waker_state(&self, _o_waker: &Option<Self::Waker>, _order: Ordering) -> u8 {\n        unreachable!();\n    }\n\n    #[inline(always)]\n    fn close(&self) {}\n}\n\nimpl<T> RegistrySend<T> for RegistryDummy {\n    #[inline(always)]\n    fn new() -> Self {\n        Self()\n    }\n}\n\ntype SingleWaker = ArcWaker<()>;\n//type SingleWaker = ThinWaker;\n\npub struct RegistrySingle {\n    cell: WeakCell<WakerInner<()>>,\n    // OneSpmc has comparable speed as WeakCell and does not allocate on waker registration,\n    // but since miri will report datarace issue, commented out for now.\n    //cell: OneSpmc<ThinWaker>,\n    _tag: &'static str,\n}\n\nimpl RegistrySingle {\n    #[inline(always)]\n    fn _fire(&self) {\n        if let Some(waker) = self.cell.pop() {\n            waker.wake();\n            trace_log!(\"{} wake\", self._tag);\n        }\n    }\n\n    #[inline(always)]\n    fn _reg_waker_async(&self, ctx: &mut Context, o_waker: &mut Option<SingleWaker>) {\n        // XXX don't know what the waker was, always generate new\n        let waker = ArcWaker::<()>::new_async(ctx, ());\n        //let waker = ThinWaker::Async(ctx.waker().clone());\n        trace_log!(\"{}{:?}: reg {:?}\", self._tag, tokio_task_id!(), waker);\n        self.cell.replace(waker.weak());\n        o_waker.replace(waker);\n        //self.cell.replace(waker);\n        // should store into o_waker, AsyncTx need to drop item when SendFuture drop\n    }\n\n    #[inline(always)]\n    fn _reg_waker_blocking(&self, o_waker: &mut Option<SingleWaker>) {\n        let waker = ArcWaker::<()>::new_blocking(());\n        //        let waker = ThinWaker::Blocking(thread::current());\n        trace_log!(\"{}{:?}: reg {:?}\", self._tag, tokio_task_id!(), waker);\n        self.cell.replace(waker.weak());\n        o_waker.replace(waker);\n        //self.cell.replace(waker);\n    }\n}\n\nimpl Registry for RegistrySingle {\n    type Waker = SingleWaker;\n\n    #[inline(always)]\n    fn get_waker_state(&self, _o_waker: &Option<SingleWaker>, _order: Ordering) -> u8 {\n        if self.cell.is_empty() {\n            WakerState::Woken as u8\n        } else {\n            WakerState::Init as u8\n        }\n    }\n\n    #[inline(always)]\n    fn close(&self) {\n        self._fire();\n    }\n}\n\nimpl<T> RegistrySend<T> for RegistrySingle {\n    #[inline(always)]\n    fn new() -> Self {\n        //Self { cell: _OneSpmc::new(), _tag: \"tx\" }\n        Self { cell: WeakCell::new(), _tag: \"tx\" }\n    }\n\n    #[inline(always)]\n    fn fire<F>(&self, _flavor: &F) -> WakeResult\n    where\n        F: FlavorImpl<Item = T>,\n    {\n        self._fire();\n        WakeResult::Next\n    }\n\n    #[inline(always)]\n    fn reg_waker_blocking(\n        &self, o_waker: &mut Option<SingleWaker>, _cache: &WakerCache<*const T>, _payload: *const T,\n    ) {\n        self._reg_waker_blocking(o_waker);\n    }\n\n    #[inline(always)]\n    fn reg_waker_async(\n        &self, ctx: &mut Context, o_waker: &mut Option<SingleWaker>,\n    ) -> Option<Poll<()>> {\n        self._reg_waker_async(ctx, o_waker);\n        None\n    }\n}\n\nimpl RegistryRecv for RegistrySingle {\n    #[inline(always)]\n    fn new() -> Self {\n        //Self { cell: OneSpmc::new(), _tag: \"rx\" }\n        Self { cell: WeakCell::new(), _tag: \"rx\" }\n    }\n\n    #[inline(always)]\n    fn fire(&self) {\n        self._fire();\n    }\n\n    #[inline(always)]\n    fn reg_waker_blocking(&self, o_waker: &mut Option<SingleWaker>, _cache: &WakerCache<()>) {\n        self._reg_waker_blocking(o_waker)\n    }\n\n    #[inline(always)]\n    fn reg_waker_async(\n        &self, ctx: &mut Context, o_waker: &mut Option<SingleWaker>,\n    ) -> Option<Poll<()>> {\n        self._reg_waker_async(ctx, o_waker);\n        None\n    }\n\n    #[inline(always)]\n    fn reg_select_waker(&self, _channel_id: usize, waker: &Arc<SelectWaker>) -> bool {\n        trace_log!(\"{}: reg for select\", self._tag);\n        self.cell.replace(waker.clone_weak());\n        false\n    }\n}\n\nstruct RegistryMultiInner<P> {\n    queue: VecDeque<Weak<WakerInner<P>>>,\n    selectors: Vec<SelectWakerWrapper>,\n    seq: u32,\n}\n\nimpl<P> RegistryMultiInner<P> {\n    #[inline(always)]\n    fn new() -> Self {\n        Self { queue: VecDeque::with_capacity(32), selectors: Vec::with_capacity(32), seq: 0 }\n    }\n\n    // it's better to use non-atomic than fetch_XXX\n    #[inline(always)]\n    fn check_select(&self) -> u8 {\n        if self.selectors.is_empty() {\n            0\n        } else {\n            MULTI_HAS_SELECT\n        }\n    }\n\n    // it's better to use non-atomic than fetch_XXX\n    #[inline(always)]\n    fn check_waker(&self) -> u8 {\n        if self.queue.is_empty() {\n            0\n        } else {\n            MULTI_HAS_WAKER\n        }\n    }\n}\n\nconst MULTI_EMPTY: u8 = 0;\nconst MULTI_HAS_SELECT: u8 = 1;\nconst MULTI_HAS_WAKER: u8 = 2;\n\npub struct RegistryMulti<P> {\n    state: AtomicU8,\n    inner: Mutex<RegistryMultiInner<P>>,\n    _tag: &'static str,\n}\n\nimpl<P: Copy> RegistryMulti<P> {\n    #[inline(always)]\n    fn reg_waker(&self, waker: &ArcWaker<P>) {\n        let weak = waker.weak();\n        {\n            let mut guard = self.inner.lock();\n            let seq = guard.seq.wrapping_add(1);\n            guard.seq = seq;\n            waker.set_seq(seq);\n            if guard.queue.is_empty() {\n                self.state.store(guard.check_select() | MULTI_HAS_WAKER, Ordering::SeqCst);\n            }\n            guard.queue.push_back(weak);\n        }\n    }\n\n    #[inline(always)]\n    fn _reg_waker_async(\n        &self, ctx: &mut Context, o_waker: &mut Option<ArcWaker<P>>, payload: P,\n    ) -> Option<Poll<()>> {\n        if let Some(waker) = o_waker.as_ref() {\n            match waker.try_change_state(WakerState::Woken, WakerState::Init) {\n                Ok(_) => {\n                    if waker.will_wake(ctx) {\n                        self.reg_waker(waker);\n                        return None;\n                    }\n                }\n                Err(state) => {\n                    if state < WakerState::Woken as u8 {\n                        if waker.will_wake(ctx) {\n                            trace_log!(\n                                \"{} {:?}: will_wake {:?}\",\n                                self._tag,\n                                tokio_task_id!(),\n                                waker\n                            );\n                            // Normally only selection or multiplex future will get here.\n                            // No need to reg again, since waker is not consumed.\n                            return Some(Poll::Pending);\n                        } else {\n                            // Spurious woken by runtime, waker can not be re-used (issue 38)\n                            // If we se Woken here, only possible otherside has woken it\n                            if waker.get_state_relaxed() < WakerState::Woken as u8 {\n                                self._clear_wakers(waker, true);\n                            }\n                            trace_log!(\n                                \"{} {:?}: drop waker {:?}\",\n                                self._tag,\n                                tokio_task_id!(),\n                                waker\n                            );\n                        }\n                    } else if state == WakerState::Closed as u8 {\n                        return Some(Poll::Ready(()));\n                    } else {\n                        panic!(\"state: impossible for async {:?}\", state);\n                    }\n                }\n            }\n        }\n        let waker = ArcWaker::<P>::new_async(ctx, payload);\n        self.reg_waker(&waker);\n        o_waker.replace(waker);\n        None\n    }\n\n    #[inline(always)]\n    fn _reg_waker_blocking(\n        &self, o_waker: &mut Option<ArcWaker<P>>, _cache: &WakerCache<P>, payload: P,\n    ) {\n        if let Some(waker) = o_waker.as_ref() {\n            waker.reset_init();\n            self.reg_waker(waker);\n            trace_log!(\"{}{:?}: re-reg {:?}\", self._tag, tokio_task_id!(), waker);\n        } else {\n            debug_assert!(o_waker.is_none());\n            //let waker = cache.new_blocking(payload);\n            let waker = ArcWaker::<P>::new_blocking(payload);\n            self.reg_waker(&waker);\n            trace_log!(\"{}{:?}: reg {:?}\", self._tag, tokio_task_id!(), waker);\n            o_waker.replace(waker);\n        }\n    }\n\n    /// If trigger all selector while not empty.\n    /// return Some((waker, again))\n    /// if there's more waker after pop_first, again=true\n    #[inline(always)]\n    fn pop_first(&self) -> Option<(ArcWaker<P>, Option<u32>)> {\n        // This is a snapshot, it's safe to ignore the new situation after acquire lock\n        let flag = self.state.load(Ordering::SeqCst);\n        if flag == MULTI_EMPTY {\n            return None;\n        }\n        {\n            let mut guard = self.inner.lock();\n            if flag & MULTI_HAS_SELECT > 0 {\n                for select in &guard.selectors {\n                    select.wake();\n                }\n            }\n            if flag & MULTI_HAS_WAKER > 0 {\n                let mut has_pop = false;\n                loop {\n                    if let Some(weak) = guard.queue.pop_front() {\n                        has_pop = true;\n                        if let Some(inner) = weak.upgrade() {\n                            if guard.queue.is_empty() {\n                                self.state.store(guard.check_select(), Ordering::SeqCst);\n                                return Some((ArcWaker::from_arc(inner), None));\n                            } else {\n                                return Some((ArcWaker::from_arc(inner), Some(guard.seq)));\n                            }\n                        }\n                    } else {\n                        if has_pop {\n                            // might upgrade encounter weak previous loop\n                            self.state.store(guard.check_select(), Ordering::SeqCst);\n                        }\n                        return None;\n                    }\n                }\n            }\n            // nothing changed, don't need to touch the state\n            None\n        }\n    }\n\n    /// ignore the selectors (since triggered in pop_first())\n    /// return the flags\n    #[inline(always)]\n    fn pop_again(&self) -> Option<ArcWaker<P>> {\n        // This is a snapshot, it's safe to ignore the new situation after acquire lock\n        let flag = self.state.load(Ordering::Acquire);\n        if flag == MULTI_EMPTY {\n            return None;\n        }\n        {\n            let mut guard = self.inner.lock();\n            let mut has_pop = false;\n            loop {\n                if let Some(weak) = guard.queue.pop_front() {\n                    has_pop = true;\n                    if let Some(inner) = weak.upgrade() {\n                        if guard.queue.is_empty() {\n                            self.state.store(guard.check_select(), Ordering::SeqCst);\n                        }\n                        return Some(ArcWaker::from_arc(inner));\n                    }\n                } else {\n                    if has_pop {\n                        // might upgrade encounter weak previous loop\n                        self.state.store(guard.check_select(), Ordering::SeqCst);\n                    }\n                    return None;\n                }\n            }\n        }\n    }\n\n    /// Call when waker is cancelled\n    #[inline(always)]\n    fn _clear_wakers(&self, old_waker: &ArcWaker<P>, oneshot: bool) {\n        // Don't need accurate, it's optional\n        if self.state.load(Ordering::Acquire) & MULTI_HAS_WAKER == 0 {\n            return;\n        }\n        let old_seq = old_waker.get_seq();\n        // the macro yield true to stop, false to continue\n        macro_rules! process {\n            ($guard: expr, $weak: expr) => {{\n                if let Some(waker) = $weak.upgrade() {\n                    let _seq = waker.get_seq();\n                    if _seq == old_seq {\n                        trace_log!(\"{}: clear {:?} hit\", self._tag, waker);\n                        // XXX, it's possible to reuse the waker, leave it for future review\n                        true\n                    } else if _seq > old_seq {\n                        $guard.queue.push_front($weak);\n                        true\n                    } else {\n                        // There might be later waker cancel due to success sending before commit_waiting.\n                        // While earlier waker is still waiting.\n                        let state = waker.get_state();\n                        if state < WakerState::Woken as u8 {\n                            $guard.queue.push_front($weak);\n                            true\n                        } else {\n                            if oneshot {\n                                trace_log!(\"{}: cancel {:?} one {}\", self._tag, waker, old_seq);\n                                true\n                            } else {\n                                trace_log!(\"{}: cancel {:?}<{}\", self._tag, waker, old_seq);\n                                false\n                            }\n                        }\n                    }\n                } else {\n                    false\n                }\n            }};\n        }\n        let mut guard = self.inner.lock();\n        if let Some(weak) = guard.queue.pop_front() {\n            if process!(guard, weak) {\n                if guard.queue.is_empty() {\n                    self.state.store(guard.check_select(), Ordering::SeqCst);\n                }\n                return;\n            }\n            loop {\n                if let Some(_weak) = guard.queue.pop_front() {\n                    if process!(guard, _weak) {\n                        if guard.queue.is_empty() {\n                            self.state.store(guard.check_select(), Ordering::SeqCst);\n                        }\n                        return;\n                    }\n                } else {\n                    // might upgrade encounter weak previous loop\n                    self.state.store(guard.check_select(), Ordering::SeqCst);\n                    return;\n                }\n            }\n        }\n    }\n\n    #[inline(always)]\n    fn _cache_waker(_o_waker: Option<ArcWaker<P>>, _cache: &WakerCache<P>) {\n        // XXX: skip cache for now, until we find out miri report of race\n        //if let Some(waker) = o_waker {\n        //    if waker.get_state() >= WakerState::Woken as u8 {\n        //        cache.push(waker);\n        //    }\n        //}\n    }\n}\n\nimpl<P: 'static + Copy> Registry for RegistryMulti<P> {\n    type Waker = ArcWaker<P>;\n\n    #[inline(always)]\n    fn get_waker_state(&self, o_waker: &Option<ArcWaker<P>>, order: Ordering) -> u8 {\n        if let Some(waker) = o_waker {\n            waker._get_state(order)\n        } else {\n            unreachable!();\n        }\n    }\n\n    /// Cancel outdated wakers until me, make sure it does not accumulate\n    #[inline(always)]\n    fn clear_wakers(&self, waker: &ArcWaker<P>) {\n        self._clear_wakers(waker, false);\n    }\n\n    #[inline(always)]\n    fn close(&self) {\n        let mut guard = self.inner.lock();\n        for selector in &guard.selectors {\n            selector.wake();\n        }\n        while let Some(weak) = guard.queue.pop_front() {\n            if let Some(waker) = weak.upgrade() {\n                let _r = waker.close_wake();\n                trace_log!(\"close {} wake {:?} {}\", self._tag, waker, _r);\n            }\n        }\n        self.state.store(0, Ordering::SeqCst);\n    }\n\n    /// return waker queue size\n    #[inline]\n    fn len(&self) -> usize {\n        let guard = self.inner.lock();\n        guard.queue.len()\n    }\n\n    #[inline(always)]\n    fn commit_waiting(&self, o_waker: &Option<ArcWaker<P>>) -> u8 {\n        if let Some(waker) = &o_waker {\n            waker.commit_waiting()\n        } else {\n            unreachable!();\n        }\n    }\n\n    /// return false when waker is none\n    #[inline(always)]\n    fn abandon_waker(&self, waker: &ArcWaker<P>) -> Result<(), u8> {\n        // which change Waiting/Init to Closed\n        match waker.abandon() {\n            Ok(()) => {\n                trace_log!(\"{}: abandon cancel {:?}\", self._tag, waker);\n                self.clear_wakers(waker);\n                Ok(())\n            }\n            Err(state) => Err(state),\n        }\n    }\n\n    /// cancel one outdated waker, make sure it does not accumulate\n    #[inline(always)]\n    fn cancel_waker(&self, o_waker: &mut Option<ArcWaker<P>>) {\n        if let Some(waker) = o_waker.take() {\n            // If we se Woken here, only possible otherside has woken it\n            if waker.get_state_relaxed() >= WakerState::Woken as u8 {\n                return;\n            }\n            self._clear_wakers(&waker, true);\n        }\n    }\n}\n\nimpl<T: 'static> RegistrySend<T> for RegistryMultiSend<T> {\n    #[inline(always)]\n    fn new() -> Self {\n        Self { inner: Mutex::new(RegistryMultiInner::new()), state: AtomicU8::new(0), _tag: \"tx\" }\n    }\n\n    #[inline(always)]\n    fn use_direct_copy(&self) -> bool {\n        self.state.load(Ordering::Relaxed) != MULTI_EMPTY\n    }\n\n    #[inline(always)]\n    fn reg_waker_blocking(\n        &self, o_waker: &mut Option<ArcWaker<*const T>>, cache: &WakerCache<*const T>,\n        payload: *const T,\n    ) {\n        self._reg_waker_blocking(o_waker, cache, payload)\n    }\n\n    #[inline(always)]\n    fn reg_waker_async(\n        &self, ctx: &mut Context, o_waker: &mut Option<ArcWaker<*const T>>,\n    ) -> Option<Poll<()>> {\n        self._reg_waker_async(ctx, o_waker, std::ptr::null_mut())\n    }\n\n    /// remove outdated waker, make sure it does not accumulate.\n    ///\n    /// It's ok to set state with Relaxed here, two scenario:\n    /// * set Done while the state is Init, does not matter other thread see it or not.\n    /// * other thread might have wake it in the process, but we are dropping it anyway, and then\n    ///   reg_waker with a new one.\n    #[inline(always)]\n    fn cancel_reuse_waker(\n        &self, o_waker: &mut Option<ArcWaker<*const T>>, state: WakerState,\n    ) -> u8 {\n        if let Some(waker) = o_waker.as_ref() {\n            let cur_state = waker.get_state();\n            // If we se Woken here, only possible otherside has woken it\n            if cur_state >= WakerState::Woken as u8 {\n                trace_log!(\"{}: cancel_reuse {:?} {}\", self._tag, waker, cur_state);\n                if cur_state < state as u8 {\n                    state as u8\n                } else {\n                    cur_state\n                }\n            } else {\n                self._clear_wakers(waker, true);\n                let _ = o_waker.take();\n                state as u8\n            }\n        } else {\n            unreachable!();\n        }\n    }\n\n    #[inline(always)]\n    fn fire<F>(&self, _flavor: &F) -> WakeResult\n    where\n        F: FlavorImpl<Item = T>,\n    {\n        if let Some((waker, _last_seq)) = self.pop_first() {\n            let r = waker.wake();\n            trace_log!(\"wake {} {:?} {:?}\", self._tag, waker, r);\n            if r.is_done() {\n                return r;\n            }\n            drop(waker);\n            if let Some(mut last_seq) = _last_seq {\n                last_seq = last_seq.wrapping_sub(1);\n                while let Some(_waker) = self.pop_again() {\n                    let r = _waker.wake();\n                    trace_log!(\"wake {} {:?} {:?}\", self._tag, _waker, r);\n                    if r.is_done() {\n                        return r;\n                    }\n                    // The latest seq in RegistryMulti is always last_waker.get_seq() +1\n                    // Because some waker (issued by sink / stream) might be INIT all the time,\n                    // prevent to dead loop situation when they are wake up and re-register again.\n                    if _waker.get_seq() >= last_seq {\n                        trace_log!(\"wake {} stop at {}\", self._tag, last_seq);\n                        return WakeResult::Next;\n                    }\n                }\n            }\n        }\n        WakeResult::Next\n    }\n\n    #[inline(always)]\n    fn cache_waker(&self, o_waker: Option<ArcWaker<*const T>>, cache: &WakerCache<*const T>) {\n        Self::_cache_waker(o_waker, cache);\n    }\n}\n\nimpl RegistryRecv for RegistryMultiRecv {\n    #[inline(always)]\n    fn new() -> Self {\n        Self { inner: Mutex::new(RegistryMultiInner::new()), state: AtomicU8::new(0), _tag: \"rx\" }\n    }\n\n    #[inline(always)]\n    fn reg_waker_blocking(&self, o_waker: &mut Option<ArcWaker<()>>, cache: &WakerCache<()>) {\n        self._reg_waker_blocking(o_waker, cache, ())\n    }\n\n    #[inline(always)]\n    fn reg_waker_async(\n        &self, ctx: &mut Context, o_waker: &mut Option<ArcWaker<()>>,\n    ) -> Option<Poll<()>> {\n        self._reg_waker_async(ctx, o_waker, ())\n    }\n\n    #[inline(always)]\n    fn fire(&self) {\n        if let Some((waker, _last_seq)) = self.pop_first() {\n            let r = waker.wake();\n            trace_log!(\"wake {} {:?} {:?}\", self._tag, waker, r);\n            if r.is_done() {\n                return;\n            }\n            drop(waker);\n            if let Some(mut last_seq) = _last_seq {\n                last_seq = last_seq.wrapping_sub(1);\n                while let Some(_waker) = self.pop_again() {\n                    let r = _waker.wake();\n                    trace_log!(\"wake {} {:?} {:?}\", self._tag, _waker, r);\n                    if r.is_done() {\n                        return;\n                    }\n                    // The latest seq in RegistryMulti is always last_waker.get_seq() +1\n                    // Because some waker (issued by sink / stream) might be INIT all the time,\n                    // prevent to dead loop situation when they are wake up and re-register again.\n                    if _waker.get_seq() >= last_seq {\n                        trace_log!(\"wake {} stop at {}\", self._tag, last_seq);\n                        return;\n                    }\n                }\n            }\n        }\n    }\n\n    #[inline(always)]\n    fn cache_waker(&self, o_waker: Option<ArcWaker<()>>, cache: &WakerCache<()>) {\n        Self::_cache_waker(o_waker, cache);\n    }\n\n    #[inline(always)]\n    fn reg_select_waker(&self, channel_id: usize, waker: &Arc<SelectWaker>) -> bool {\n        trace_log!(\"{}: reg for select\", self._tag);\n        let mut guard = self.inner.lock();\n        if guard.selectors.is_empty() {\n            self.state.store(guard.check_waker() | MULTI_HAS_SELECT, Ordering::SeqCst);\n        }\n        guard.selectors.push(SelectWaker::to_wrapper(waker.clone(), channel_id));\n        true\n    }\n\n    #[inline(always)]\n    fn cancel_select_waker(&self, waker: &Arc<SelectWaker>) {\n        let mut guard = self.inner.lock();\n        if let Some((i, _)) = guard.selectors.iter().enumerate().find(|&(_, entry)| entry.eq(waker))\n        {\n            guard.selectors.remove(i);\n        }\n        if guard.selectors.is_empty() {\n            self.state.store(guard.check_waker(), Ordering::SeqCst);\n        }\n    }\n}\n\n// Due to it's type alias in crate::select::Mux, should be pub\npub struct SelectWakerWrapper(Arc<SelectWaker>, usize);\n\nimpl SelectWakerWrapper {\n    #[inline(always)]\n    pub(crate) fn wake(&self) {\n        if let Some(waker) = self.0.cell.pop() {\n            trace_log!(\"rx: wake select\");\n            self.0.hint.store(self.1, Ordering::Release);\n            waker.wake();\n        }\n    }\n\n    #[inline(always)]\n    pub(crate) fn eq(&self, waker: &Arc<SelectWaker>) -> bool {\n        Arc::ptr_eq(&self.0, waker)\n    }\n}\n\n// For multiplex\nimpl Registry for SelectWakerWrapper {\n    type Waker = ArcWaker<()>;\n\n    #[inline(always)]\n    fn get_waker_state(&self, _o_waker: &Option<ArcWaker<()>>, _order: Ordering) -> u8 {\n        unreachable!();\n    }\n\n    #[inline(always)]\n    fn close(&self) {\n        // decrease the opened_channels count to hint Multiplex\n        self.0.close();\n        self.wake();\n    }\n}\n\n// For multiplex\nimpl RegistryRecv for SelectWakerWrapper {\n    fn new() -> Self {\n        unreachable!();\n    }\n\n    #[inline(always)]\n    fn fire(&self) {\n        self.wake();\n    }\n\n    fn reg_select_waker(&self, _channel_id: usize, _waker: &Arc<SelectWaker>) -> bool {\n        unreachable!();\n    }\n}\n\npub(crate) struct SelectWaker {\n    cell: WeakCell<WakerInner<()>>,\n    // does not need to be correct, just a hint for the try_select\n    hint: AtomicUsize,\n    o_waker: UnsafeCell<Option<ArcWaker<()>>>,\n    // For multiplex, not for select\n    opened_channels: AtomicUsize,\n}\n\nunsafe impl Send for SelectWaker {}\nunsafe impl Sync for SelectWaker {}\n\nimpl SelectWaker {\n    #[inline(always)]\n    pub fn new() -> Self {\n        Self {\n            cell: WeakCell::new(),\n            hint: AtomicUsize::new(0),\n            o_waker: UnsafeCell::new(None),\n            opened_channels: AtomicUsize::new(0),\n        }\n    }\n\n    #[inline(always)]\n    pub fn init_blocking(&self) {\n        let weak = if let Some(waker) = self.get_waker().as_ref() {\n            waker.reset_init();\n            waker.weak()\n        } else {\n            let waker = ArcWaker::new_blocking(());\n            let weak = waker.weak();\n            self.get_waker().replace(waker);\n            weak\n        };\n        self.cell.replace(weak);\n        self.hint.store(0, Ordering::Release)\n    }\n\n    #[allow(dead_code)]\n    #[inline(always)]\n    pub fn init_async(&self, ctx: &mut Context) {\n        let waker = ArcWaker::new_async(ctx, ());\n        let weak = waker.weak();\n        self.get_waker().replace(waker);\n        self.cell.replace(weak);\n        self.hint.store(0, Ordering::Release)\n    }\n\n    #[inline(always)]\n    fn get_waker(&self) -> &mut Option<ArcWaker<()>> {\n        unsafe { &mut *self.o_waker.get() }\n    }\n\n    #[inline(always)]\n    fn clone_weak(&self) -> Weak<WakerInner<()>> {\n        self.get_waker().as_ref().unwrap().weak()\n    }\n\n    #[inline(always)]\n    pub fn add_opened(&self) {\n        self.opened_channels.fetch_add(1, Ordering::SeqCst);\n    }\n\n    #[inline(always)]\n    pub fn get_opened_count(&self) -> usize {\n        self.opened_channels.load(Ordering::SeqCst)\n    }\n\n    #[inline(always)]\n    pub fn to_wrapper(self: Arc<SelectWaker>, idx: usize) -> SelectWakerWrapper {\n        SelectWakerWrapper(self, idx)\n    }\n\n    #[inline(always)]\n    pub fn get_hint(&self) -> usize {\n        compiler_fence(Ordering::AcqRel);\n        self.hint.load(Ordering::Relaxed)\n    }\n\n    #[inline(always)]\n    pub fn close(&self) {\n        self.opened_channels.fetch_sub(1, Ordering::SeqCst);\n    }\n\n    #[inline(always)]\n    pub fn get_waker_state(&self, order: Ordering) -> u8 {\n        self.get_waker().as_ref().unwrap()._get_state(order)\n    }\n}\n\n#[cfg(test)]\nmod tests {\n\n    use super::*;\n\n    use crate::waker::ArcWaker;\n\n    #[test]\n    fn print_waker_registry_size() {\n        use std::mem::size_of;\n        println!(\"RegistryMultiSend<usize> size {}\", size_of::<RegistryMultiSend<usize>>());\n        println!(\"RegistryMultiRecv size {}\", size_of::<RegistryMultiRecv>());\n        println!(\"RegistrySingle size {}\", size_of::<RegistrySingle>());\n        println!(\"RegistryMulti<()> size {}\", size_of::<RegistryMultiRecv>());\n    }\n\n    #[test]\n    fn test_registry_multi_pop() {\n        let reg = RegistryMultiRecv::new();\n\n        // test push\n        let waker1 = ArcWaker::new_blocking(());\n        assert_eq!(reg.len(), 0);\n        reg.reg_waker(&waker1);\n        assert_eq!(waker1.get_state(), WakerState::Init as u8);\n        assert_eq!(waker1.get_seq(), 1);\n        assert_eq!(reg.len(), 1);\n\n        let waker2 = ArcWaker::new_blocking(());\n        reg.reg_waker(&waker2);\n        waker2.commit_waiting();\n        assert_eq!(waker2.get_seq(), 2);\n        assert_eq!(reg.len(), 2);\n        assert_eq!(waker2.get_seq(), waker1.get_seq() + 1);\n        assert_eq!(waker2.get_state(), WakerState::Waiting as u8);\n\n        if let Some((w, seq)) = reg.pop_first() {\n            assert!(w.wake() == WakeResult::Next);\n            assert!(seq.is_some());\n        }\n        assert_eq!(waker1.get_state(), WakerState::Woken as u8);\n        assert_eq!(reg.len(), 1);\n        if let Some(w) = reg.pop_again() {\n            assert!(w.wake() == WakeResult::Woken);\n        }\n        assert_eq!(waker2.get_state(), WakerState::Woken as u8);\n        assert_eq!(reg.len(), 0);\n    }\n\n    #[test]\n    fn test_registry_multi_clear_waiting() {\n        let reg = RegistryMultiRecv::new();\n        // test seq\n        let waker3 = ArcWaker::new_blocking(());\n        reg.reg_waker(&waker3);\n        waker3.commit_waiting();\n        assert_eq!(waker3.get_state(), WakerState::Waiting as u8);\n        let waker4 = ArcWaker::new_blocking(());\n        reg.reg_waker(&waker4); // Init\n        assert_eq!(waker4.get_state(), WakerState::Init as u8);\n        let num_workers = reg.len();\n        // Because waker3 not woken up, waker4 is not clear\n        reg.clear_wakers(&waker4);\n        assert_eq!(reg.len(), num_workers);\n        for _ in 0..10 {\n            let _waker = ArcWaker::new_blocking(());\n            reg.reg_waker(&_waker);\n        }\n        let num_workers = reg.len();\n        assert_eq!(reg.len(), num_workers);\n    }\n\n    #[test]\n    fn test_registry_multi_clear_oneshot() {\n        let reg = RegistryMultiRecv::new();\n        // test seq\n        let waker1 = ArcWaker::new_blocking(());\n        reg.reg_waker(&waker1);\n        assert_eq!(waker1.get_state(), WakerState::Init as u8);\n        let waker2 = ArcWaker::new_blocking(());\n        reg.reg_waker(&waker2); // Init\n        waker2.commit_waiting();\n        assert_eq!(waker2.get_state(), WakerState::Waiting as u8);\n        for _ in 0..10 {\n            let _waker = ArcWaker::new_blocking(());\n            reg.reg_waker(&_waker);\n        }\n        let num_workers = reg.len();\n        println!(\"clear waker2 oneshot seq {}\", waker2.get_seq());\n        reg.cancel_waker(&mut Some(waker2));\n        assert_eq!(reg.len(), num_workers); // Only nothing happen.\n        reg.cancel_waker(&mut Some(waker1));\n        assert_eq!(reg.len(), num_workers - 1); // Only waker1 is removed.\n    }\n\n    #[test]\n    fn test_registry_multi_clear() {\n        let reg = RegistryMultiRecv::new();\n        // test seq\n        let waker1 = ArcWaker::new_blocking(());\n        reg.reg_waker(&waker1);\n        assert_eq!(waker1.get_state(), WakerState::Init as u8);\n        let waker2 = ArcWaker::new_blocking(());\n        reg.reg_waker(&waker2); // Init\n        drop(waker2); // waker4 is dropped, weak is left\n        for _ in 0..10 {\n            let _waker = ArcWaker::new_blocking(());\n            reg.reg_waker(&_waker);\n        }\n        let waker3 = ArcWaker::new_blocking(());\n        reg.reg_waker(&waker3);\n        let _num_workers = reg.len(); // Keep for debugging context, though not used in assertion\n        println!(\"clear waker3 seq={}\", waker3.get_seq());\n        reg.clear_wakers(&waker3); // nothing happen, because waker3 is there\n        assert_eq!(reg.len(), 13);\n        reg.clear_wakers(&waker1);\n        assert_eq!(reg.len(), 12);\n        reg.clear_wakers(&waker3);\n        assert_eq!(reg.len(), 0);\n    }\n\n    #[test]\n    fn test_registry_multi_close() {\n        let reg = RegistryMultiRecv::new();\n        println!(\"test close\");\n        for _ in 0..10 {\n            let _waker = ArcWaker::new_blocking(());\n            reg.reg_waker(&_waker);\n        }\n        assert!(reg.len() > 0);\n        reg.close();\n        assert_eq!(reg.len(), 0);\n    }\n}\n"
  },
  {
    "path": "src/weak.rs",
    "content": "use crate::flavor::FlavorMP;\nuse crate::{shared::*, SenderType};\nuse std::sync::Arc;\n\n/// A weak reference of SenderType\n///\n/// Can be obtain from [MTx::downgrade](crate::MTx::downgrade) or [MAsyncTx::downgrade](crate::MAsyncTx::downgrade).\n/// When the number of valid sender is non-zero, can try [upgrade](WeakTx::upgrade) to a [MTx](crate::MTx) or [MAsyncTx](crate::MAsyncTx).\npub struct WeakTx<F: Flavor + FlavorMP>(pub(crate) Arc<ChannelShared<F>>);\n\nimpl<F: Flavor + FlavorMP> WeakTx<F> {\n    /// Upgrade to MTx or MAsyncTx (Only allow for mpsc or mpmc)\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// use crossfire::*;\n    /// let (tx, rx) = mpsc::bounded_blocking::<usize>(100);\n    /// let weak_tx = tx.downgrade();\n    /// let tx_clone = weak_tx.upgrade::<MTx<_>>().unwrap();\n    /// drop(tx);\n    /// drop(tx_clone);\n    /// assert!(weak_tx.upgrade::<MTx<_>>().is_none());\n    /// assert_eq!(weak_tx.get_tx_count(), 0);\n    /// drop(rx);\n    /// ```\n    #[inline]\n    pub fn upgrade<S: SenderType<Flavor = F>>(&self) -> Option<S> {\n        if self.0.try_add_tx() {\n            Some(S::new(self.0.clone()))\n        } else {\n            None\n        }\n    }\n\n    #[inline(always)]\n    pub fn get_tx_count(&self) -> usize {\n        self.0.get_tx_count()\n    }\n\n    #[inline(always)]\n    pub fn get_rx_count(&self) -> usize {\n        self.0.get_rx_count()\n    }\n}\n"
  },
  {
    "path": "test-suite/Cargo.toml",
    "content": "[package]\nname = \"crossfire-test\"\nversion = \"0.0.1\"\nauthors = [\"plan <frostyplanet@gmail.com>\"]\nedition = \"2021\"\nlicense = \"Apache-2.0\"\nreadme = \"README.md\"\n\n[dependencies]\ncrossfire = {path=\"../\"}\nasync-std = {version = \"1\", optional=true}\nlog = { version=\"0\"}\nsmol = {version = \"2\", optional=true }\ncompio = { version = \"0.17\", optional = true, features = [\"runtime\", \"dispatcher\", \"polling\"], default-features = false}\ntokio = { version = \"1\", optional = true, features = [\"sync\", \"rt-multi-thread\", \"rt\", \"macros\"] }\nfastrand = \"2.3\"\nrstest = \"0\"\ncaptains-log = {version=\"0.13\", features = [\"ringfile\", \"tracing\"] }\ncriterion2 = { version=\"3.0.2\", features = [\"async\"]}\ncrossbeam-channel = \"0.5\"\ncrossbeam-utils = \"0.8\"\nflume = {version=\"0.11\", features= [\"async\"] }\nkanal = {version=\"0.1\"}\nasync-channel = {version=\"2.5.0\"}\nfutures-util = {version=\"0.3\", default-features = false}\nasync-oneshot = \"0.5\"\noneshot = \"0.1\"\n\n[features]\ndefault = []\n\ntokio = [\"crossfire/tokio\", \"dep:tokio\"]\n\nasync_std = [\"dep:async-std\", \"crossfire/async_std\"]\n\nsmol = [\"dep:smol\"]\n\ncompio = [\"dep:compio\"]\n\n# This switch on multi thread test for compio\ncompio_dispatcher = [\"dep:compio\"]\n\n# for test workflow debugging\ntrace_log = [\"crossfire/trace_log\"]\n\n# test invoking timer function in async runtime, try to opt-out time driver for miri\ntime = [\"compio?/time\", \"tokio?/time\"]\n\n[[bench]]\nname = \"crossfire\"\nharness = false\n\n[[bench]]\nname = \"crossfire_select\"\nharness = false\n\n[[bench]]\nname = \"crossbeam\"\nharness = false\n\n[[bench]]\nname = \"flume\"\nharness = false\n\n[[bench]]\nname = \"kanal\"\nharness = false\n\n[[bench]]\nname = \"tokio\"\nharness = false\n\n[[bench]]\nname = \"async_channel\"\nharness = false\n\n[[bench]]\nname = \"extra\"\nharness = false\n"
  },
  {
    "path": "test-suite/benches/async_channel.rs",
    "content": "use criterion::*;\nuse std::time::Duration;\n\n#[allow(unused_imports)]\nmod common;\nuse common::*;\n\nasync fn _async_channel_unbounded_async(tx_count: usize, rx_count: usize, msg_count: usize) {\n    let (tx, rx) = async_channel::unbounded();\n    let mut th_tx = Vec::new();\n    let mut th_rx = Vec::new();\n    let mut send_counter: usize = 0;\n    let _send_counter = msg_count / tx_count;\n    for _tx_i in 0..tx_count {\n        send_counter += _send_counter;\n        let _tx = tx.clone();\n        th_tx.push(async_spawn!(async move {\n            for i in 0.._send_counter {\n                if let Err(e) = _tx.send(i).await {\n                    panic!(\"send error: {:?}\", e);\n                }\n            }\n        }));\n    }\n    drop(tx);\n    let mut recv_counter = 0;\n    for _ in 0..(rx_count - 1) {\n        let _rx = rx.clone();\n        th_rx.push(async_spawn!(async move {\n            let mut i = 0;\n            loop {\n                match _rx.recv().await {\n                    Ok(_) => {\n                        i += 1;\n                    }\n                    Err(_) => {\n                        break;\n                    }\n                }\n            }\n            i\n        }));\n    }\n    loop {\n        match rx.recv().await {\n            Ok(_) => {\n                recv_counter += 1;\n            }\n            Err(_) => {\n                break;\n            }\n        }\n    }\n    for th in th_tx {\n        let _ = th.await;\n    }\n    for th in th_rx {\n        recv_counter += async_join_result!(th);\n    }\n    assert_eq!(send_counter, recv_counter);\n}\n\nasync fn _async_channel_bounded_async(\n    bound: usize, tx_count: usize, rx_count: usize, msg_count: usize,\n) {\n    let (tx, rx) = async_channel::bounded(bound);\n    let mut th_tx = Vec::new();\n    let mut th_rx = Vec::new();\n    let mut send_counter: usize = 0;\n    let _send_counter = msg_count / tx_count;\n    for _tx_i in 0..tx_count {\n        send_counter += _send_counter;\n        let _tx = tx.clone();\n        th_tx.push(async_spawn!(async move {\n            for i in 0.._send_counter {\n                if let Err(e) = _tx.send(i).await {\n                    panic!(\"send error: {:?}\", e);\n                }\n            }\n        }));\n    }\n    drop(tx);\n    let mut recv_counter = 0;\n    for _ in 0..(rx_count - 1) {\n        let _rx = rx.clone();\n        th_rx.push(async_spawn!(async move {\n            let mut i = 0;\n            loop {\n                match _rx.recv().await {\n                    Ok(_) => {\n                        i += 1;\n                    }\n                    Err(_) => {\n                        break;\n                    }\n                }\n            }\n            i\n        }));\n    }\n    loop {\n        match rx.recv().await {\n            Ok(_) => {\n                recv_counter += 1;\n            }\n            Err(_) => {\n                break;\n            }\n        }\n    }\n    for th in th_tx {\n        let _ = th.await;\n    }\n    for th in th_rx {\n        recv_counter += async_join_result!(th);\n    }\n    assert_eq!(send_counter, recv_counter);\n}\n\nfn bench_async_channel_unbounded_async(c: &mut Criterion) {\n    let mut group = c.benchmark_group(\"async_channel_unbounded_async\");\n    group.significance_level(0.1).sample_size(50);\n    group.measurement_time(Duration::from_secs(20));\n    for input in n_1() {\n        let param = Concurrency { tx_count: input, rx_count: 1 };\n        group.throughput(Throughput::Elements(ONE_MILLION as u64));\n        group.bench_with_input(BenchmarkId::new(\"mpsc\", &param), &param, |b, i| {\n            b.to_async(BenchExecutor())\n                .iter(|| _async_channel_unbounded_async(i.tx_count, i.rx_count, ONE_MILLION))\n        });\n    }\n    for input in n_n() {\n        let param = Concurrency { tx_count: input.0, rx_count: input.1 };\n        group.throughput(Throughput::Elements(ONE_MILLION as u64));\n        group.bench_with_input(BenchmarkId::new(\"mpmc\", &param), &param, |b, i| {\n            b.to_async(BenchExecutor())\n                .iter(|| _async_channel_unbounded_async(i.tx_count, i.rx_count, ONE_MILLION))\n        });\n    }\n}\n\nfn bench_async_channel_bounded_async(c: &mut Criterion) {\n    let mut group = c.benchmark_group(\"async_channel_bounded_async\");\n    group.significance_level(0.1).sample_size(50);\n    group.measurement_time(Duration::from_secs(20));\n    for input in n_1() {\n        let param = Concurrency { tx_count: input, rx_count: 1 };\n        group.throughput(Throughput::Elements(TEN_THOUSAND as u64));\n        group.bench_with_input(BenchmarkId::new(\"mpsc size 1\", &param), &param, |b, i| {\n            b.to_async(BenchExecutor())\n                .iter(|| _async_channel_bounded_async(1, i.tx_count, i.rx_count, TEN_THOUSAND))\n        });\n    }\n\n    for input in n_1() {\n        let param = Concurrency { tx_count: input, rx_count: 1 };\n        group.throughput(Throughput::Elements(ONE_MILLION as u64));\n        group.bench_with_input(BenchmarkId::new(\"mpsc size 100\", &param), &param, |b, i| {\n            b.to_async(BenchExecutor())\n                .iter(|| _async_channel_bounded_async(100, i.tx_count, i.rx_count, ONE_MILLION))\n        });\n    }\n    for input in n_n() {\n        let param = Concurrency { tx_count: input.0, rx_count: input.1 };\n        group.throughput(Throughput::Elements(ONE_MILLION as u64));\n        group.bench_with_input(BenchmarkId::new(\"mpmc size 100\", &param), &param, |b, i| {\n            b.to_async(BenchExecutor())\n                .iter(|| _async_channel_bounded_async(100, i.tx_count, i.rx_count, ONE_MILLION))\n        });\n    }\n}\n\ncriterion_group!(benches, bench_async_channel_bounded_async, bench_async_channel_unbounded_async,);\ncriterion_main!(benches);\n"
  },
  {
    "path": "test-suite/benches/common.rs",
    "content": "use std::fmt;\nuse std::future::Future;\n\nuse criterion::async_executor::AsyncExecutor;\n\n#[allow(dead_code)]\npub const ONE_MILLION: usize = 1000000;\n#[allow(dead_code)]\npub const TEN_THOUSAND: usize = 10000;\n\n#[allow(dead_code)]\npub struct Concurrency {\n    pub tx_count: usize,\n    pub rx_count: usize,\n}\n\nimpl fmt::Display for Concurrency {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        write!(f, \"{}x{}\", self.tx_count, self.rx_count)\n    }\n}\n\npub struct BenchExecutor();\n\nimpl AsyncExecutor for BenchExecutor {\n    fn block_on<T>(&self, future: impl Future<Output = T>) -> T {\n        #[cfg(feature = \"smol\")]\n        {\n            use std::num::NonZero;\n            use std::thread;\n            let num_threads = thread::available_parallelism().unwrap_or(NonZero::new(1).unwrap());\n            unsafe { std::env::set_var(\"SMOL_THREADS\", num_threads.to_string()) };\n            smol::block_on(future)\n        }\n        #[cfg(not(feature = \"smol\"))]\n        {\n            #[cfg(feature = \"async_std\")]\n            {\n                async_std::task::block_on(future)\n            }\n            #[cfg(not(feature = \"async_std\"))]\n            {\n                tokio::runtime::Builder::new_multi_thread()\n                    .enable_all()\n                    .build()\n                    .unwrap()\n                    .block_on(future)\n            }\n        }\n    }\n}\n\n#[allow(unused_macros)]\nmacro_rules! async_spawn {\n    ($f: expr) => {{\n        #[cfg(feature = \"smol\")]\n        {\n            smol::spawn($f)\n        }\n        #[cfg(not(feature = \"smol\"))]\n        {\n            #[cfg(feature = \"async_std\")]\n            {\n                async_std::task::spawn($f)\n            }\n            #[cfg(any(feature = \"tokio\", not(feature = \"async_std\")))]\n            {\n                tokio::spawn($f)\n            }\n        }\n    }};\n}\npub(super) use async_spawn;\n\n#[allow(unused_macros)]\nmacro_rules! async_join_result {\n    ($th: expr) => {{\n        #[cfg(feature = \"smol\")]\n        {\n            $th.await\n        }\n        #[cfg(not(feature = \"smol\"))]\n        {\n            #[cfg(feature = \"async_std\")]\n            {\n                $th.await\n            }\n            #[cfg(not(feature = \"async_std\"))]\n            {\n                $th.await.expect(\"join\")\n            }\n        }\n    }};\n}\npub(super) use async_join_result;\n\n#[allow(dead_code)]\n#[inline(always)]\npub fn n_n() -> Vec<(usize, usize)> {\n    vec![(2, 2), (4, 4), (8, 8), (16, 16)]\n}\n\n#[inline(always)]\npub fn n_1() -> Vec<usize> {\n    vec![1, 2, 4, 8, 16]\n}\n"
  },
  {
    "path": "test-suite/benches/crossbeam.rs",
    "content": "use criterion::*;\nuse crossbeam_utils::sync::WaitGroup;\nuse std::thread;\nuse std::time::Duration;\n\n#[allow(unused_imports)]\nmod common;\nuse common::*;\n\nfn _crossbeam_bounded_sync(bound: usize, tx_count: usize, rx_count: usize, msg_count: usize) {\n    let (tx, rx) = crossbeam_channel::bounded::<usize>(bound);\n    let mut th_tx = Vec::new();\n    let mut th_rx = Vec::new();\n    let mut send_counter: usize = 0;\n    let _send_counter = msg_count / tx_count;\n    for _ in 0..tx_count {\n        send_counter += _send_counter;\n        let _tx = tx.clone();\n        th_tx.push(thread::spawn(move || {\n            for i in 0.._send_counter {\n                _tx.send(i).expect(\"send\");\n            }\n        }));\n    }\n    drop(tx);\n    let mut recv_counter = 0;\n    for _ in 0..(rx_count - 1) {\n        let _rx = rx.clone();\n        th_rx.push(thread::spawn(move || -> usize {\n            let mut i = 0;\n            loop {\n                match _rx.recv() {\n                    Ok(_) => {\n                        i += 1;\n                    }\n                    Err(_) => {\n                        break;\n                    }\n                }\n            }\n            i\n        }));\n    }\n    loop {\n        match rx.recv() {\n            Ok(_) => {\n                recv_counter += 1;\n            }\n            Err(_) => {\n                break;\n            }\n        }\n    }\n    for th in th_tx {\n        let _ = th.join();\n    }\n    for th in th_rx {\n        if let Ok(count) = th.join() {\n            recv_counter += count;\n        }\n    }\n    assert_eq!(send_counter, recv_counter);\n}\n\nfn _crossbeam_unbounded_sync(tx_count: usize, rx_count: usize, msg_count: usize) {\n    let (tx, rx) = crossbeam_channel::unbounded::<usize>();\n    let mut th_tx = Vec::new();\n    let mut th_rx = Vec::new();\n    let mut send_counter: usize = 0;\n    let _send_counter = msg_count / tx_count;\n    for _ in 0..tx_count {\n        send_counter += _send_counter;\n        let _tx = tx.clone();\n        th_tx.push(thread::spawn(move || {\n            for i in 0.._send_counter {\n                _tx.send(i).expect(\"send\");\n            }\n        }));\n    }\n    drop(tx);\n    let mut recv_counter = 0;\n    for _ in 0..(rx_count - 1) {\n        let _rx = rx.clone();\n        th_rx.push(thread::spawn(move || -> usize {\n            let mut i = 0;\n            loop {\n                match _rx.recv() {\n                    Ok(_) => {\n                        i += 1;\n                    }\n                    Err(_) => {\n                        break;\n                    }\n                }\n            }\n            i\n        }));\n    }\n    loop {\n        match rx.recv() {\n            Ok(_) => {\n                recv_counter += 1;\n            }\n            Err(_) => {\n                break;\n            }\n        }\n    }\n    for th in th_tx {\n        let _ = th.join();\n    }\n    for th in th_rx {\n        if let Ok(count) = th.join() {\n            recv_counter += count;\n        }\n    }\n    assert_eq!(send_counter, recv_counter);\n}\n\nfn _crossbeam_select_mpsc(num_channels: usize, bound: usize, total_msgs: usize, is_bias: bool) {\n    let msg_count_per_channel = total_msgs / num_channels;\n    let mut rxs = Vec::new();\n    let mut th_tx = Vec::new();\n    for _ in 0..num_channels {\n        let (tx, rx) = crossbeam_channel::bounded::<usize>(bound);\n        rxs.push(rx);\n        th_tx.push(thread::spawn(move || {\n            for i in 0..msg_count_per_channel {\n                tx.send(i).expect(\"send\");\n            }\n        }));\n    }\n\n    // Receive all messages using select - reuse Select instance\n    let mut recv_counter = 0;\n\n    let mut select = if is_bias {\n        crossbeam_channel::Select::new_biased()\n    } else {\n        crossbeam_channel::Select::new()\n    };\n    let mut handles = Vec::with_capacity(num_channels);\n    for rx in &rxs {\n        let op = select.recv(rx);\n        handles.push(op);\n    }\n    while recv_counter < total_msgs {\n        // Perform the selection\n        let oper = select.select();\n        let i = oper.index();\n        match oper.recv(&rxs[i]) {\n            Ok(_) => recv_counter += 1,\n            Err(_) => {\n                // https://docs.rs/crossbeam-channel/latest/crossbeam_channel/struct.Select.html#method.remove\n                // If new operations are added after removing some, the indices of removed operations will not be reused\n                select.remove(i);\n            }\n        }\n    }\n    assert_eq!(total_msgs, recv_counter);\n    // Wait for all senders to finish before receiving\n    for th in th_tx {\n        let _ = th.join();\n    }\n}\n\nfn bench_crossbeam_bounded_sync(c: &mut Criterion) {\n    let mut group = c.benchmark_group(\"crossbeam_bounded\");\n    group.significance_level(0.1).sample_size(50);\n    group.measurement_time(Duration::from_secs(20));\n    for input in n_1() {\n        let param = Concurrency { tx_count: input, rx_count: 1 };\n        group.throughput(Throughput::Elements(ONE_MILLION as u64));\n        group.bench_with_input(BenchmarkId::new(\"mpsc size 1\", input), &param, |b, i| {\n            b.iter(|| _crossbeam_bounded_sync(1, i.tx_count, i.rx_count, ONE_MILLION))\n        });\n    }\n    for input in n_1() {\n        let param = Concurrency { tx_count: input, rx_count: 1 };\n        group.throughput(Throughput::Elements(ONE_MILLION as u64));\n        group.bench_with_input(BenchmarkId::new(\"mpsc size 100\", input), &param, |b, i| {\n            b.iter(|| _crossbeam_bounded_sync(100, i.tx_count, i.rx_count, ONE_MILLION))\n        });\n    }\n    for input in n_n() {\n        let param = Concurrency { tx_count: input.0, rx_count: input.1 };\n        group.throughput(Throughput::Elements(ONE_MILLION as u64));\n        group.bench_with_input(\n            BenchmarkId::new(\"mpmc size 100\", param.to_string()),\n            &param,\n            |b, i| b.iter(|| _crossbeam_bounded_sync(100, i.tx_count, i.rx_count, ONE_MILLION)),\n        );\n    }\n    group.finish();\n}\n\nfn bench_crossbeam_unbounded_sync(c: &mut Criterion) {\n    let mut group = c.benchmark_group(\"crossbeam_unbounded\");\n    group.significance_level(0.1).sample_size(50);\n    group.measurement_time(Duration::from_secs(20));\n    for input in n_1() {\n        let param = Concurrency { tx_count: input, rx_count: 1 };\n        group.throughput(Throughput::Elements(ONE_MILLION as u64));\n        group.bench_with_input(BenchmarkId::new(\"mpsc\", input), &param, |b, i| {\n            b.iter(|| _crossbeam_unbounded_sync(i.tx_count, i.rx_count, ONE_MILLION))\n        });\n    }\n    for input in n_n() {\n        let param = Concurrency { tx_count: input.0, rx_count: input.1 };\n        group.throughput(Throughput::Elements(ONE_MILLION as u64));\n        group.bench_with_input(BenchmarkId::new(\"mpmc\", param.to_string()), &param, |b, i| {\n            b.iter(|| _crossbeam_unbounded_sync(i.tx_count, i.rx_count, ONE_MILLION))\n        });\n    }\n    group.finish();\n}\n\nfn bench_crossbeam_select_mpsc(c: &mut Criterion) {\n    let mut group = c.benchmark_group(\"crossbeam_select\");\n    group.significance_level(0.1).sample_size(50);\n    group.measurement_time(Duration::from_secs(20));\n\n    let param = (4, 100, ONE_MILLION); // 3 channels, bound=100, 1M/3 messages per channel\n    group.throughput(Throughput::Elements(ONE_MILLION as u64));\n    group.bench_with_input(\n        BenchmarkId::new(\"select_mpsc_4_channels_bias\", \"4\"),\n        &param,\n        |b, &(num_channels, bound, msg_count_per_channel)| {\n            b.iter(|| _crossbeam_select_mpsc(num_channels, bound, msg_count_per_channel, true))\n        },\n    );\n    group.bench_with_input(\n        BenchmarkId::new(\"select_mpsc_4_channels_fair\", \"4\"),\n        &param,\n        |b, &(num_channels, bound, msg_count_per_channel)| {\n            b.iter(|| _crossbeam_select_mpsc(num_channels, bound, msg_count_per_channel, false))\n        },\n    );\n\n    group.finish();\n}\n\nfn bench_crossbeam_wait_group(c: &mut Criterion) {\n    let mut group = c.benchmark_group(\"crossbeam_wait_group\");\n    let count = TEN_THOUSAND;\n    group.throughput(Throughput::Elements(count as u64));\n    group.bench_function(\"add_guard\", |b| {\n        let wg = WaitGroup::new();\n        b.iter(|| {\n            let mut guards: Vec<crossbeam_utils::sync::WaitGroup> = Vec::with_capacity(count);\n            for _i in 0..count {\n                guards.push(wg.clone());\n            }\n            // guards are dropped here\n        });\n    });\n    group.finish();\n}\n\ncriterion_group!(\n    benches,\n    bench_crossbeam_bounded_sync,\n    bench_crossbeam_unbounded_sync,\n    bench_crossbeam_select_mpsc,\n    bench_crossbeam_wait_group\n);\ncriterion_main!(benches);\n"
  },
  {
    "path": "test-suite/benches/crossfire.rs",
    "content": "use criterion::*;\nuse crossfire::waitgroup::{WaitGroup, WaitGroupGuard};\nuse crossfire::*;\nuse std::thread;\nuse std::time::Duration;\n\n#[allow(unused_imports)]\nmod common;\nuse common::*;\n\n// Initialize logger for benchmarks\nfn init_logger() {\n    #[cfg(feature = \"trace_log\")]\n    {\n        use captains_log::*;\n        use std::sync::Once;\n        static INIT: Once = Once::new();\n        INIT.call_once(|| {\n            let format = recipe::LOG_FORMAT_THREADED_DEBUG;\n            let ring = ringfile::LogRingFile::new(\n                \"/tmp/crossfire_ring.log\",\n                500 * 1024 * 1024,\n                Level::Debug,\n                format,\n            );\n            let mut config = Builder::default()\n                .signal(signal_consts::SIGINT)\n                .signal(signal_consts::SIGTERM)\n                .add_sink(ring)\n                .add_sink(LogConsole::new(\n                    ConsoleTarget::Stdout,\n                    Level::Info,\n                    recipe::LOG_FORMAT_DEBUG,\n                ));\n            config.dynamic = true;\n            config.panic = true;\n            config.build().expect(\"log_setup\");\n        });\n    }\n}\n\nmacro_rules! bench_bounded_blocking {\n    ($group: expr, $name: expr, $tx: expr, $rx: expr, $new: expr, $size: expr, $count: expr) => {\n        bench_bounded_blocking!($group, $name, $tx, $rx, $new, $size, $count, 20, 100);\n    };\n    ($group: expr, $name: expr, $tx: expr, $rx: expr, $new: expr, $size: expr, $count: expr, $time: expr, $sample: expr) => {\n        $group.throughput(Throughput::Elements($count as u64));\n        $group.significance_level(0.1).sample_size($sample);\n        $group.measurement_time(Duration::from_secs($time));\n        let param = Concurrency { tx_count: $tx, rx_count: $rx };\n        $group.bench_with_input(\n            BenchmarkId::new(format!(\"{}_{}\", $name, $size).to_string(), &param),\n            &param,\n            |b, i| {\n                b.iter(move || {\n                    let (tx, rx) = $new($size);\n                    _crossfire_blocking(\n                        tx.clone_to_vec(i.tx_count),\n                        rx.clone_to_vec(i.rx_count),\n                        $count,\n                    );\n                })\n            },\n        );\n    };\n}\n\nmacro_rules! bench_unbounded_blocking {\n    ($group: expr, $name: expr, $tx: expr, $rx: expr, $new: expr, $count: expr) => {\n        bench_unbounded_blocking!($group, $name, $tx, $rx, $new, $count, 20, 100);\n    };\n    ($group: expr, $name: expr, $tx: expr, $rx: expr, $new: expr, $count: expr, $time: expr, $sample: expr) => {\n        $group.throughput(Throughput::Elements($count as u64));\n        $group.significance_level(0.1).sample_size($sample);\n        $group.measurement_time(Duration::from_secs($time));\n        let param = Concurrency { tx_count: $tx, rx_count: $rx };\n        $group.bench_with_input(\n            BenchmarkId::new(format!(\"{}\", $name).to_string(), &param),\n            &param,\n            |b, i| {\n                b.iter(move || {\n                    let (tx, rx) = $new();\n                    _crossfire_blocking(\n                        tx.clone_to_vec(i.tx_count),\n                        rx.clone_to_vec(i.rx_count),\n                        $count,\n                    );\n                })\n            },\n        );\n    };\n}\n\nmacro_rules! bench_bounded_async {\n    ($group: expr, $name: expr, $tx: expr, $rx: expr, $new: expr, $size: expr, $count: expr) => {\n        bench_bounded_async!($group, $name, $tx, $rx, $new, $size, $count, 20, 100);\n    };\n    ($group: expr, $name: expr, $tx: expr, $rx: expr, $new: expr, $size: expr, $count: expr, $time: expr, $sample: expr) => {\n        $group.throughput(Throughput::Elements($count as u64));\n        $group.significance_level(0.1).sample_size($sample);\n        $group.measurement_time(Duration::from_secs($time));\n        let param = Concurrency { tx_count: $tx, rx_count: $rx };\n        $group.bench_with_input(\n            BenchmarkId::new(format!(\"{}_{}\", $name, $size).to_string(), &param),\n            &param,\n            |b, i| {\n                b.to_async(BenchExecutor()).iter(async || {\n                    let (tx, rx) = $new($size);\n                    _crossfire_bounded_async(\n                        tx.clone_to_vec(i.tx_count),\n                        rx.clone_to_vec(i.rx_count),\n                        $count,\n                    )\n                    .await;\n                })\n            },\n        );\n    };\n}\n\nmacro_rules! bench_unbounded_async {\n    ($group: expr, $name: expr, $tx: expr, $rx: expr, $new: expr, $count: expr) => {\n        bench_unbounded_async!($group, $name, $tx, $rx, $new, $count, 20, 100);\n    };\n    ($group: expr, $name: expr, $tx: expr, $rx: expr, $new: expr, $count: expr, $time: expr, $sample: expr) => {\n        $group.throughput(Throughput::Elements($count as u64));\n        $group.significance_level(0.1).sample_size($sample);\n        $group.measurement_time(Duration::from_secs($time));\n        let param = Concurrency { tx_count: $tx, rx_count: $rx };\n        $group.bench_with_input(\n            BenchmarkId::new(format!(\"{}\", $name).to_string(), &param),\n            &param,\n            |b, i| {\n                b.to_async(BenchExecutor()).iter(async || {\n                    let (tx, rx) = $new();\n                    _crossfire_blocking_async(\n                        tx.clone_to_vec(i.tx_count),\n                        rx.clone_to_vec(i.rx_count),\n                        $count,\n                    )\n                    .await;\n                })\n            },\n        );\n    };\n}\n\nfn _crossfire_blocking<T: BlockingTxTrait<usize>, R: BlockingRxTrait<usize>>(\n    txs: Vec<T>, mut rxs: Vec<R>, msg_count: usize,\n) {\n    let mut th_tx = Vec::new();\n    let mut th_rx = Vec::new();\n    let mut send_counter: usize = 0;\n    let _send_counter = msg_count / txs.len();\n    for (i, _tx) in txs.into_iter().enumerate() {\n        send_counter += _send_counter;\n        let th_builder = thread::Builder::new().name(format!(\"sender{}\", i));\n        th_tx.push(\n            th_builder\n                .spawn(move || {\n                    for i in 0.._send_counter {\n                        _tx.send(i).expect(\"send\");\n                    }\n                    crossfire::trace_log!(\"sender exit {:?}\", _tx);\n                })\n                .expect(\"spawn\"),\n        );\n    }\n    let rx_count = rxs.len();\n    for i in 0..(rx_count - 1) {\n        let _rx = rxs.pop().unwrap();\n        let th_builder = thread::Builder::new().name(format!(\"receiver{}\", i));\n        th_rx.push(\n            th_builder\n                .spawn(move || -> usize {\n                    let mut i = 0;\n                    loop {\n                        match _rx.recv() {\n                            Ok(_) => {\n                                i += 1;\n                            }\n                            Err(_) => {\n                                break;\n                            }\n                        }\n                    }\n                    i\n                })\n                .expect(\"spawn\"),\n        );\n    }\n    let rx = rxs.pop().unwrap();\n    let mut recv_counter = 0;\n    loop {\n        match rx.recv() {\n            Ok(_) => {\n                recv_counter += 1;\n            }\n            Err(_) => {\n                break;\n            }\n        }\n    }\n    for th in th_tx {\n        let _ = th.join().unwrap();\n    }\n    for th in th_rx {\n        recv_counter += th.join().unwrap();\n    }\n    assert_eq!(send_counter, recv_counter);\n    crossfire::trace_log!(\"---\");\n}\n\nasync fn _crossfire_blocking_async<T: BlockingTxTrait<usize>, R: AsyncRxTrait<usize>>(\n    txs: Vec<T>, mut rxs: Vec<R>, msg_count: usize,\n) {\n    let mut send_counter: usize = 0;\n    let _send_counter = msg_count / txs.len();\n    let mut th_tx = Vec::new();\n    for tx in txs {\n        send_counter += _send_counter;\n        th_tx.push(thread::spawn(move || {\n            for i in 0.._send_counter {\n                if let Err(e) = tx.send(i) {\n                    panic!(\"send error: {:?}\", e);\n                }\n            }\n        }));\n    }\n    let mut recv_counter = 0;\n    let rx_count = rxs.len();\n    let mut th_rx = Vec::new();\n    for _ in 0..(rx_count - 1) {\n        let _rx = rxs.pop().unwrap();\n        th_rx.push(async_spawn!(async move {\n            let mut i = 0;\n            loop {\n                match _rx.recv().await {\n                    Ok(_) => {\n                        i += 1;\n                    }\n                    Err(_) => {\n                        break;\n                    }\n                }\n            }\n            i\n        }));\n    }\n    let rx = rxs.pop().unwrap();\n    loop {\n        match rx.recv().await {\n            Ok(_) => {\n                recv_counter += 1;\n            }\n            Err(_) => {\n                break;\n            }\n        }\n    }\n    assert_eq!(rxs.len(), 0);\n    for th in th_tx {\n        let _ = th.join().unwrap();\n    }\n    for th in th_rx {\n        recv_counter += async_join_result!(th);\n    }\n    assert_eq!(send_counter, recv_counter);\n}\n\nasync fn _crossfire_bounded_async<T: AsyncTxTrait<usize>, R: AsyncRxTrait<usize>>(\n    txs: Vec<T>, mut rxs: Vec<R>, msg_count: usize,\n) {\n    let mut send_counter: usize = 0;\n    let _send_counter = msg_count / txs.len();\n    let mut th_tx = Vec::new();\n    let mut th_rx = Vec::new();\n    for tx in txs {\n        send_counter += _send_counter;\n        th_tx.push(async_spawn!(async move {\n            for i in 0.._send_counter {\n                if let Err(e) = tx.send(i).await {\n                    panic!(\"send error: {:?}\", e);\n                }\n            }\n        }));\n    }\n    let mut recv_counter = 0;\n    let rx_count = rxs.len();\n    for _ in 0..(rx_count - 1) {\n        let _rx = rxs.pop().unwrap();\n        th_rx.push(async_spawn!(async move {\n            let mut i = 0;\n            loop {\n                match _rx.recv().await {\n                    Ok(_) => {\n                        i += 1;\n                    }\n                    Err(_) => {\n                        break;\n                    }\n                }\n            }\n            i\n        }));\n    }\n    let rx = rxs.pop().unwrap();\n    loop {\n        match rx.recv().await {\n            Ok(_) => {\n                recv_counter += 1;\n            }\n            Err(_) => {\n                break;\n            }\n        }\n    }\n    for th in th_tx {\n        let _ = th.await;\n    }\n    for th in th_rx {\n        recv_counter += async_join_result!(th);\n    }\n    assert_eq!(send_counter, recv_counter);\n}\n\nfn crossfire_bounded_1_blocking_1_1(c: &mut Criterion) {\n    detect_backoff_cfg();\n    init_logger();\n    let mut group = c.benchmark_group(\"crossfire_bounded_1_blocking_1_1\");\n    bench_bounded_blocking!(group, \"spsc\", 1, 1, spsc::bounded_blocking, 1, TEN_THOUSAND, 10, 100);\n    bench_bounded_blocking!(group, \"mpsc\", 1, 1, mpsc::bounded_blocking, 1, TEN_THOUSAND, 10, 100);\n    bench_bounded_blocking!(group, \"mpmc\", 1, 1, mpmc::bounded_blocking, 1, TEN_THOUSAND, 10, 100);\n    group.finish();\n}\n\nfn crossfire_bounded_1_blocking_n_1(c: &mut Criterion) {\n    detect_backoff_cfg();\n    init_logger();\n    let mut group = c.benchmark_group(\"crossfire_bounded_1_blocking_n_1\");\n    for input in n_1() {\n        bench_bounded_blocking!(\n            group,\n            \"mpsc\",\n            input,\n            1,\n            mpsc::bounded_blocking,\n            1,\n            TEN_THOUSAND,\n            10,\n            100\n        );\n    }\n    for input in n_1() {\n        bench_bounded_blocking!(\n            group,\n            \"mpmc\",\n            input,\n            1,\n            mpmc::bounded_blocking,\n            1,\n            TEN_THOUSAND,\n            10,\n            100\n        );\n    }\n    group.finish();\n}\n\nfn crossfire_bounded_1_blocking_n_n(c: &mut Criterion) {\n    detect_backoff_cfg();\n    init_logger();\n    let mut group = c.benchmark_group(\"crossfire_bounded_1_blocking_n_n\");\n    for input in n_n() {\n        bench_bounded_blocking!(\n            group,\n            \"mpmc\",\n            input.0,\n            input.1,\n            mpmc::bounded_blocking,\n            1,\n            TEN_THOUSAND,\n            10,\n            100\n        );\n    }\n    group.finish();\n}\n\nfn crossfire_bounded_100_blocking_1_1(c: &mut Criterion) {\n    detect_backoff_cfg();\n    init_logger();\n    let mut group = c.benchmark_group(\"crossfire_bounded_100_blocking_1_1\");\n    bench_bounded_blocking!(group, \"spsc\", 1, 1, spsc::bounded_blocking, 100, ONE_MILLION);\n    bench_bounded_blocking!(group, \"mpsc\", 1, 1, mpsc::bounded_blocking, 100, ONE_MILLION);\n    bench_bounded_blocking!(group, \"mpmc\", 1, 1, mpmc::bounded_blocking, 100, ONE_MILLION);\n    group.finish();\n}\n\nfn crossfire_bounded_100_blocking_n_1(c: &mut Criterion) {\n    detect_backoff_cfg();\n    init_logger();\n    let mut group = c.benchmark_group(\"crossfire_bounded_100_blocking_n_1\");\n    for input in n_1() {\n        bench_bounded_blocking!(group, \"mpsc\", input, 1, mpsc::bounded_blocking, 100, ONE_MILLION);\n    }\n    for input in n_1() {\n        bench_bounded_blocking!(group, \"mpmc\", input, 1, mpmc::bounded_blocking, 100, ONE_MILLION);\n    }\n    group.finish();\n}\n\nfn crossfire_bounded_100_blocking_n_n(c: &mut Criterion) {\n    detect_backoff_cfg();\n    init_logger();\n    let mut group = c.benchmark_group(\"crossfire_bounded_100_blocking_n_n\");\n    for input in n_n() {\n        bench_bounded_blocking!(\n            group,\n            \"mpmc\",\n            input.0,\n            input.1,\n            mpmc::bounded_blocking,\n            100,\n            ONE_MILLION\n        );\n    }\n    group.finish();\n}\n\nfn crossfire_bounded_1_async_1_1(c: &mut Criterion) {\n    detect_backoff_cfg();\n    init_logger();\n    let mut group = c.benchmark_group(\"crossfire_bounded_1_async_1_1\");\n    bench_bounded_async!(group, \"spsc\", 1, 1, spsc::bounded_async, 1, TEN_THOUSAND, 10, 100);\n    bench_bounded_async!(group, \"mpsc\", 1, 1, mpsc::bounded_async, 1, TEN_THOUSAND, 10, 100);\n    bench_bounded_async!(group, \"mpmc\", 1, 1, mpmc::bounded_async, 1, TEN_THOUSAND, 10, 100);\n    group.finish();\n}\n\nfn crossfire_bounded_1_async_n_1(c: &mut Criterion) {\n    detect_backoff_cfg();\n    init_logger();\n    let mut group = c.benchmark_group(\"crossfire_bounded_1_async_n_1\");\n    for input in n_1() {\n        bench_bounded_async!(\n            group,\n            \"mpsc\",\n            input,\n            1,\n            mpsc::bounded_async,\n            1,\n            TEN_THOUSAND,\n            10,\n            100\n        );\n    }\n    for input in n_1() {\n        bench_bounded_async!(\n            group,\n            \"mpmc\",\n            input,\n            1,\n            mpmc::bounded_async,\n            1,\n            TEN_THOUSAND,\n            10,\n            100\n        );\n    }\n    group.finish();\n}\n\nfn crossfire_bounded_1_async_n_n(c: &mut Criterion) {\n    detect_backoff_cfg();\n    init_logger();\n    let mut group = c.benchmark_group(\"crossfire_bounded_1_async_n_n\");\n    for input in n_n() {\n        bench_bounded_async!(\n            group,\n            \"mpmc\",\n            input.0,\n            input.1,\n            mpmc::bounded_async,\n            1,\n            TEN_THOUSAND,\n            10,\n            100\n        );\n    }\n    group.finish();\n}\n\nfn crossfire_bounded_100_async_1_1(c: &mut Criterion) {\n    detect_backoff_cfg();\n    init_logger();\n    let mut group = c.benchmark_group(\"crossfire_bounded_100_async_1_1\");\n    bench_bounded_async!(group, \"spsc\", 1, 1, spsc::bounded_async, 100, ONE_MILLION);\n    bench_bounded_async!(group, \"mpsc\", 1, 1, mpsc::bounded_async, 100, ONE_MILLION);\n    bench_bounded_async!(group, \"mpmc\", 1, 1, mpmc::bounded_async, 100, ONE_MILLION);\n    group.finish();\n}\n\nfn crossfire_bounded_100_async_n_1(c: &mut Criterion) {\n    detect_backoff_cfg();\n    init_logger();\n    let mut group = c.benchmark_group(\"crossfire_bounded_100_async_n_1\");\n    for input in n_1() {\n        bench_bounded_async!(group, \"mpsc\", input, 1, mpsc::bounded_async, 100, ONE_MILLION);\n    }\n\n    for input in n_1() {\n        bench_bounded_async!(group, \"mpmc\", input, 1, mpmc::bounded_async, 100, ONE_MILLION);\n    }\n    group.finish();\n}\n\nfn crossfire_bounded_100_async_n_n(c: &mut Criterion) {\n    detect_backoff_cfg();\n    init_logger();\n    let mut group = c.benchmark_group(\"crossfire_bounded_100_async_n_n\");\n    for input in n_n() {\n        bench_bounded_async!(\n            group,\n            \"mpmc\",\n            input.0,\n            input.1,\n            mpmc::bounded_async,\n            100,\n            ONE_MILLION\n        );\n    }\n    group.finish();\n}\n\nfn crossfire_unbounded_blocking_1_1(c: &mut Criterion) {\n    detect_backoff_cfg();\n    init_logger();\n    let mut group = c.benchmark_group(\"crossfire_unbounded_blocking_1_1\");\n    bench_unbounded_blocking!(group, \"spsc\", 1, 1, spsc::unbounded_blocking, ONE_MILLION);\n    bench_unbounded_blocking!(group, \"mpsc\", 1, 1, mpsc::unbounded_blocking, ONE_MILLION);\n    bench_unbounded_blocking!(group, \"mpmc\", 1, 1, mpmc::unbounded_blocking, ONE_MILLION);\n    group.finish();\n}\n\nfn crossfire_unbounded_blocking_n_1(c: &mut Criterion) {\n    detect_backoff_cfg();\n    init_logger();\n    let mut group = c.benchmark_group(\"crossfire_unbounded_blocking_n_1\");\n    for input in n_1() {\n        bench_unbounded_blocking!(group, \"mpsc\", input, 1, mpsc::unbounded_blocking, ONE_MILLION);\n    }\n    for input in n_1() {\n        bench_unbounded_blocking!(group, \"mpmc\", input, 1, mpmc::unbounded_blocking, ONE_MILLION);\n    }\n    group.finish();\n}\n\nfn crossfire_unbounded_blocking_n_n(c: &mut Criterion) {\n    detect_backoff_cfg();\n    init_logger();\n    let mut group = c.benchmark_group(\"crossfire_unbounded_blocking_n_n\");\n    for input in n_n() {\n        bench_unbounded_blocking!(\n            group,\n            \"mpmc\",\n            input.0,\n            input.1,\n            mpmc::unbounded_blocking,\n            ONE_MILLION\n        );\n    }\n    group.finish();\n}\n\nfn crossfire_unbounded_async_1_1(c: &mut Criterion) {\n    detect_backoff_cfg();\n    init_logger();\n    let mut group = c.benchmark_group(\"crossfire_unbounded_async_1_1\");\n    bench_unbounded_async!(group, \"spsc\", 1, 1, spsc::unbounded_async, ONE_MILLION);\n    bench_unbounded_async!(group, \"mpsc\", 1, 1, mpsc::unbounded_async, ONE_MILLION);\n    bench_unbounded_async!(group, \"mpmc\", 1, 1, mpmc::unbounded_async, ONE_MILLION);\n    group.finish();\n}\n\nfn crossfire_unbounded_async_mpsc(c: &mut Criterion) {\n    detect_backoff_cfg();\n    init_logger();\n    let mut group = c.benchmark_group(\"crossfire_unbounded_async_n_1\");\n    for input in n_1() {\n        bench_unbounded_async!(group, \"mpsc\", input, 1, mpsc::unbounded_async, ONE_MILLION);\n    }\n    for input in n_1() {\n        bench_unbounded_async!(group, \"mpmc\", input, 1, mpmc::unbounded_async, ONE_MILLION);\n    }\n    group.finish();\n}\n\nfn crossfire_unbounded_async_mpmc(c: &mut Criterion) {\n    detect_backoff_cfg();\n    init_logger();\n    let mut group = c.benchmark_group(\"crossfire_unbounded_async_n_n\");\n    for input in n_n() {\n        bench_unbounded_async!(group, \"mpmc\", input.0, input.1, mpmc::unbounded_async, ONE_MILLION);\n    }\n    group.finish();\n}\n\nfn crossfire_oneshot_blocking(c: &mut Criterion) {\n    detect_backoff_cfg();\n    init_logger();\n    let mut group = c.benchmark_group(\"crossfire_oneshot_blocking\");\n    let count = TEN_THOUSAND;\n    group.throughput(Throughput::Elements(count as u64));\n    group.bench_function(\"spawn\", |b| {\n        b.iter(|| {\n            let mut txs = Vec::with_capacity(count);\n            let mut rxs = Vec::with_capacity(count);\n            for _i in 0..count {\n                let (tx, rx) = crossfire::oneshot::oneshot();\n                txs.push(tx);\n                rxs.push(rx);\n            }\n            thread::spawn(move || {\n                for tx in txs {\n                    let _ = tx.send(0);\n                }\n            });\n            for rx in rxs {\n                let _ = rx.recv();\n            }\n        })\n    });\n    group.finish();\n}\n\nfn crossfire_oneshot_async(c: &mut Criterion) {\n    detect_backoff_cfg();\n    init_logger();\n    let mut group = c.benchmark_group(\"crossfire_oneshot_async\");\n    let count = TEN_THOUSAND;\n    group.throughput(Throughput::Elements(count as u64));\n    group.bench_function(\"spawn\", |b| {\n        b.to_async(BenchExecutor()).iter(|| async move {\n            let mut txs = Vec::with_capacity(count);\n            let mut rxs = Vec::with_capacity(count);\n            for _i in 0..count {\n                let (tx, rx) = crossfire::oneshot::oneshot();\n                txs.push(tx);\n                rxs.push(rx);\n            }\n            let th = async_spawn!(async move {\n                for tx in txs {\n                    tx.send(0);\n                }\n            });\n            for rx in rxs {\n                let _ = rx.await;\n            }\n            let _ = async_join_result!(th);\n        })\n    });\n    group.finish();\n}\n\nfn bench_crossfire_wait_group(c: &mut Criterion) {\n    let mut group = c.benchmark_group(\"crossfire_wait_group\");\n    let count = TEN_THOUSAND; // Or some appropriate number for throughput\n    group.throughput(Throughput::Elements(count as u64));\n    group.bench_function(\"add_guard\", |b| {\n        let wg = WaitGroup::new(0);\n        b.iter(|| {\n            let mut guards: Vec<WaitGroupGuard> = Vec::with_capacity(count);\n            for _i in 0..count {\n                guards.push(wg.add_guard());\n            }\n            // guards are dropped here\n        });\n    });\n    group.finish();\n}\n\ncriterion_group!(\n    benches,\n    crossfire_bounded_1_blocking_1_1,\n    crossfire_bounded_1_blocking_n_1,\n    crossfire_bounded_1_blocking_n_n,\n    crossfire_bounded_100_blocking_1_1,\n    crossfire_bounded_100_blocking_n_1,\n    crossfire_bounded_100_blocking_n_n,\n    crossfire_unbounded_blocking_1_1,\n    crossfire_unbounded_blocking_n_1,\n    crossfire_unbounded_blocking_n_n,\n    crossfire_bounded_1_async_1_1,\n    crossfire_bounded_1_async_n_1,\n    crossfire_bounded_1_async_n_n,\n    crossfire_bounded_100_async_1_1,\n    crossfire_bounded_100_async_n_1,\n    crossfire_bounded_100_async_n_n,\n    crossfire_unbounded_async_1_1,\n    crossfire_unbounded_async_mpsc,\n    crossfire_unbounded_async_mpmc,\n    crossfire_oneshot_blocking,\n    crossfire_oneshot_async,\n    bench_crossfire_wait_group,\n);\ncriterion_main!(benches);\n"
  },
  {
    "path": "test-suite/benches/crossfire_select.rs",
    "content": "use criterion::*;\nuse crossfire::{\n    mpsc::Array,\n    select::{Multiplex, Mux, Select, SelectMode},\n    *,\n};\nuse std::thread;\nuse std::time::Duration;\n\n#[allow(unused_imports, dead_code)]\nmod common;\nuse common::*;\n\n// Initialize logger for benchmarks\nfn init_logger() {\n    #[cfg(feature = \"trace_log\")]\n    {\n        use captains_log::*;\n        use std::sync::Once;\n        static INIT: Once = Once::new();\n        INIT.call_once(|| {\n            let format = recipe::LOG_FORMAT_THREADED_DEBUG;\n            let ring = ringfile::LogRingFile::new(\n                \"/tmp/crossfire_ring.log\",\n                500 * 1024 * 1024,\n                Level::Debug,\n                format,\n            );\n            let mut config = Builder::default()\n                .signal(signal_consts::SIGINT)\n                .signal(signal_consts::SIGTERM)\n                .add_sink(ring)\n                .add_sink(LogConsole::new(\n                    ConsoleTarget::Stdout,\n                    Level::Info,\n                    recipe::LOG_FORMAT_DEBUG,\n                ));\n            config.dynamic = true;\n            config.build().expect(\"log_setup\");\n        });\n    }\n}\n\nconst NUM_CHANNELS: usize = 4;\nconst BOUND: usize = 100;\n\nfn spawn_senders<T>(txs: Vec<T>, total_msgs: usize) -> Vec<thread::JoinHandle<()>>\nwhere\n    T: BlockingTxTrait<usize> + Send + Clone + 'static,\n{\n    let msgs_per_channel = total_msgs / txs.len();\n    txs.into_iter()\n        .map(|tx| {\n            thread::spawn(move || {\n                for i in 0..msgs_per_channel {\n                    tx.send(i).expect(\"send\");\n                }\n            })\n        })\n        .collect()\n}\n\nfn run_select(mode: SelectMode, total_msgs: usize) {\n    let mut receivers = Vec::with_capacity(NUM_CHANNELS);\n    let mut senders = Vec::with_capacity(NUM_CHANNELS);\n    for _ in 0..NUM_CHANNELS {\n        let (tx, rx) = mpsc::bounded_blocking::<usize>(BOUND);\n        receivers.push(rx);\n        senders.push(tx);\n    }\n    let mut select = Select::new_with(mode);\n    for rx in &receivers {\n        select.add(rx);\n    }\n    let handles = spawn_senders(senders, total_msgs);\n    let mut recv_counter = 0;\n    while recv_counter < total_msgs {\n        match select.select() {\n            Ok(res) => {\n                for rx in &receivers {\n                    if res == *rx {\n                        match rx.read_select(res) {\n                            Ok(_) => {\n                                recv_counter += 1;\n                            }\n                            Err(RecvError) => {\n                                select.remove(rx);\n                            }\n                        }\n                        break;\n                    }\n                }\n            }\n            Err(RecvError) => break,\n        }\n    }\n    assert_eq!(total_msgs, recv_counter);\n    for h in handles {\n        h.join().unwrap();\n    }\n}\n\nfn run_multiplex(total_msgs: usize) {\n    let mut mp = Multiplex::<Array<usize>>::new();\n    let mut senders: Vec<MTx<Mux<Array<usize>>>> = Vec::with_capacity(NUM_CHANNELS);\n    for _ in 0..NUM_CHANNELS {\n        let tx = mp.bounded_tx(BOUND);\n        senders.push(tx);\n    }\n    let handles = spawn_senders(senders, total_msgs);\n    let mut recv_counter = 0;\n    while recv_counter < total_msgs {\n        match mp.recv() {\n            Ok(_) => {\n                recv_counter += 1;\n            }\n            Err(RecvError) => break,\n        }\n    }\n    assert_eq!(total_msgs, recv_counter);\n    for h in handles {\n        h.join().unwrap();\n    }\n}\n\nfn bench_select(c: &mut Criterion) {\n    init_logger();\n    let mut group = c.benchmark_group(\"select\");\n    group.significance_level(0.1).sample_size(50);\n    group.measurement_time(Duration::from_secs(20));\n    group.throughput(Throughput::Elements(ONE_MILLION as u64));\n\n    group.bench_function(\"select_rr\", |b| b.iter(|| run_select(SelectMode::RR, ONE_MILLION)));\n    group.bench_function(\"select_rand\", |b| b.iter(|| run_select(SelectMode::Rand, ONE_MILLION)));\n    group.bench_function(\"select_bias\", |b| b.iter(|| run_select(SelectMode::Bias, ONE_MILLION)));\n\n    group.finish();\n}\n\nfn bench_multiplex(c: &mut Criterion) {\n    init_logger();\n    let mut group = c.benchmark_group(\"multiplex\");\n    group.significance_level(0.1).sample_size(50);\n    group.measurement_time(Duration::from_secs(20));\n    group.throughput(Throughput::Elements(ONE_MILLION as u64));\n\n    group.bench_function(\"multiplex\", |b| b.iter(|| run_multiplex(ONE_MILLION)));\n    group.finish();\n}\n\ncriterion_group!(benches, bench_select, bench_multiplex);\ncriterion_main!(benches);\n"
  },
  {
    "path": "test-suite/benches/extra.rs",
    "content": "use criterion::*;\nuse std::thread;\nmod common;\nuse common::*;\n\nfn bench_async_oneshot_async(c: &mut Criterion) {\n    let mut group = c.benchmark_group(\"async_oneshot_async\");\n    let count = TEN_THOUSAND;\n    group.throughput(Throughput::Elements(count as u64));\n    group.bench_function(\"spawn\", |b| {\n        b.to_async(BenchExecutor()).iter(|| async move {\n            let mut txs = Vec::with_capacity(count);\n            let mut rxs = Vec::with_capacity(count);\n            for _i in 0..count {\n                let (tx, rx) = async_oneshot::oneshot();\n                txs.push(tx);\n                rxs.push(rx);\n            }\n            async_spawn!(async move {\n                for mut tx in txs {\n                    let _ = tx.send(0);\n                }\n            });\n            for rx in rxs {\n                let _ = rx.await;\n            }\n        })\n    });\n    group.finish();\n}\n\nfn bench_oneshot_async(c: &mut Criterion) {\n    let mut group = c.benchmark_group(\"oneshot_async\");\n    let count = TEN_THOUSAND;\n    group.throughput(Throughput::Elements(count as u64));\n    group.bench_function(\"spawn\", |b| {\n        b.to_async(BenchExecutor()).iter(|| async move {\n            let mut txs = Vec::with_capacity(count);\n            let mut rxs = Vec::with_capacity(count);\n            for _i in 0..count {\n                let (tx, rx) = oneshot::channel();\n                txs.push(tx);\n                rxs.push(rx);\n            }\n            async_spawn!(async move {\n                for tx in txs {\n                    let _ = tx.send(0);\n                }\n            });\n            for rx in rxs {\n                let _ = rx.await;\n            }\n        })\n    });\n    group.finish();\n}\n\nfn bench_oneshot_thread(c: &mut Criterion) {\n    let mut group = c.benchmark_group(\"oneshot_thread\");\n    let count = TEN_THOUSAND;\n    group.throughput(Throughput::Elements(count as u64));\n    group.bench_function(\"thread\", |b| {\n        b.iter(|| {\n            let mut txs = Vec::with_capacity(count);\n            let mut rxs = Vec::with_capacity(count);\n            for _i in 0..count {\n                let (tx, rx) = oneshot::channel();\n                txs.push(tx);\n                rxs.push(rx);\n            }\n            let t = thread::spawn(move || {\n                for tx in txs {\n                    let _ = tx.send(0);\n                }\n            });\n            for rx in rxs {\n                let _ = rx.recv();\n            }\n            t.join().unwrap();\n        })\n    });\n    group.finish();\n}\n\ncriterion_group!(\n    extra_benches,\n    bench_async_oneshot_async,\n    bench_oneshot_async,\n    bench_oneshot_thread\n);\ncriterion_main!(extra_benches);\n"
  },
  {
    "path": "test-suite/benches/flume.rs",
    "content": "use criterion::*;\nuse std::thread;\nuse std::time::Duration;\n\n#[allow(unused_imports)]\nmod common;\nuse common::*;\n\nfn _flume_bounded_sync(bound: usize, tx_count: usize, rx_count: usize, msg_count: usize) {\n    let (tx, rx) = flume::bounded(bound);\n    let mut th_tx = Vec::new();\n    let mut th_rx = Vec::new();\n    let mut send_counter: usize = 0;\n    let _send_counter = msg_count / tx_count;\n    for _tx_i in 0..tx_count {\n        send_counter += _send_counter;\n        let _tx = tx.clone();\n        th_tx.push(thread::spawn(move || {\n            for i in 0.._send_counter {\n                if let Err(e) = _tx.send(i) {\n                    panic!(\"send error: {:?}\", e);\n                }\n            }\n        }));\n    }\n    drop(tx);\n    let mut recv_counter = 0;\n    for _ in 0..(rx_count - 1) {\n        let _rx = rx.clone();\n        th_rx.push(thread::spawn(move || -> usize {\n            let mut i = 0;\n            loop {\n                match _rx.recv() {\n                    Ok(_) => {\n                        i += 1;\n                    }\n                    Err(_) => {\n                        break;\n                    }\n                }\n            }\n            i\n        }));\n    }\n    loop {\n        match rx.recv() {\n            Ok(_) => {\n                recv_counter += 1;\n            }\n            Err(_) => {\n                break;\n            }\n        }\n    }\n    for th in th_tx {\n        let _ = th.join();\n    }\n    for th in th_rx {\n        if let Ok(count) = th.join() {\n            recv_counter += count;\n        }\n    }\n    assert_eq!(send_counter, recv_counter);\n}\n\nfn _flume_unbounded_sync(tx_count: usize, rx_count: usize, msg_count: usize) {\n    let (tx, rx) = flume::unbounded();\n    let mut th_tx = Vec::new();\n    let mut th_rx = Vec::new();\n    let mut send_counter: usize = 0;\n    let _send_counter = msg_count / tx_count;\n    for _tx_i in 0..tx_count {\n        send_counter += _send_counter;\n        let _tx = tx.clone();\n        th_tx.push(thread::spawn(move || {\n            for i in 0.._send_counter {\n                if let Err(e) = _tx.send(i) {\n                    panic!(\"send error: {:?}\", e);\n                }\n            }\n        }));\n    }\n    drop(tx);\n    let mut recv_counter = 0;\n    for _ in 0..(rx_count - 1) {\n        let _rx = rx.clone();\n        th_rx.push(thread::spawn(move || -> usize {\n            let mut i = 0;\n            loop {\n                match _rx.recv() {\n                    Ok(_) => {\n                        i += 1;\n                    }\n                    Err(_) => {\n                        break;\n                    }\n                }\n            }\n            i\n        }));\n    }\n    loop {\n        match rx.recv() {\n            Ok(_) => {\n                recv_counter += 1;\n            }\n            Err(_) => {\n                break;\n            }\n        }\n    }\n    for th in th_tx {\n        let _ = th.join();\n    }\n    for th in th_rx {\n        if let Ok(count) = th.join() {\n            recv_counter += count;\n        }\n    }\n    assert_eq!(send_counter, recv_counter);\n}\n\nasync fn _flume_unbounded_async(tx_count: usize, rx_count: usize, msg_count: usize) {\n    let (tx, rx) = flume::unbounded();\n    let mut th_tx = Vec::new();\n    let mut th_rx = Vec::new();\n    let mut send_counter: usize = 0;\n    let _send_counter = msg_count / tx_count;\n    for _tx_i in 0..tx_count {\n        send_counter += _send_counter;\n        let _tx = tx.clone();\n        th_tx.push(async_spawn!(async move {\n            for i in 0.._send_counter {\n                if let Err(e) = _tx.send(i) {\n                    panic!(\"send error: {:?}\", e);\n                }\n            }\n        }));\n    }\n    drop(tx);\n    let mut recv_counter = 0;\n    for _ in 0..(rx_count - 1) {\n        let _rx = rx.clone();\n        th_rx.push(async_spawn!(async move {\n            let mut i = 0;\n            loop {\n                match _rx.recv_async().await {\n                    Ok(_) => {\n                        i += 1;\n                    }\n                    Err(_) => {\n                        break;\n                    }\n                }\n            }\n            i\n        }));\n    }\n    loop {\n        match rx.recv_async().await {\n            Ok(_) => {\n                recv_counter += 1;\n            }\n            Err(_) => {\n                break;\n            }\n        }\n    }\n    for th in th_tx {\n        let _ = th.await;\n    }\n    for th in th_rx {\n        recv_counter += async_join_result!(th);\n    }\n    assert_eq!(send_counter, recv_counter);\n}\n\nasync fn _flume_bounded_async(bound: usize, tx_count: usize, rx_count: usize, msg_count: usize) {\n    let (tx, rx) = flume::bounded(bound);\n    let mut th_tx = Vec::new();\n    let mut th_rx = Vec::new();\n    let mut send_counter: usize = 0;\n    let _send_counter = msg_count / tx_count;\n    for _tx_i in 0..tx_count {\n        send_counter += _send_counter;\n        let _tx = tx.clone();\n        th_tx.push(async_spawn!(async move {\n            for i in 0.._send_counter {\n                if let Err(e) = _tx.send_async(i).await {\n                    panic!(\"send error: {:?}\", e);\n                }\n            }\n        }));\n    }\n    drop(tx);\n    let mut recv_counter = 0;\n    for _ in 0..(rx_count - 1) {\n        let _rx = rx.clone();\n        th_rx.push(async_spawn!(async move {\n            let mut i = 0;\n            loop {\n                match _rx.recv_async().await {\n                    Ok(_) => {\n                        i += 1;\n                    }\n                    Err(_) => {\n                        break;\n                    }\n                }\n            }\n            i\n        }));\n    }\n    loop {\n        match rx.recv_async().await {\n            Ok(_) => {\n                recv_counter += 1;\n            }\n            Err(_) => {\n                break;\n            }\n        }\n    }\n    for th in th_tx {\n        let _ = th.await;\n    }\n    for th in th_rx {\n        recv_counter += async_join_result!(th);\n    }\n    assert_eq!(send_counter, recv_counter);\n}\n\nfn bench_flume_bounded_sync(c: &mut Criterion) {\n    let mut group = c.benchmark_group(\"flume_bounded_blocking\");\n    group.significance_level(0.1).sample_size(50);\n    group.throughput(Throughput::Elements(ONE_MILLION as u64));\n    group.measurement_time(Duration::from_secs(15));\n    for input in n_1() {\n        let param = Concurrency { tx_count: input, rx_count: 1 };\n        group.throughput(Throughput::Elements(TEN_THOUSAND as u64));\n        group.bench_with_input(BenchmarkId::new(\"mpsc size 1\", input), &param, |b, i| {\n            b.iter(|| _flume_bounded_sync(1, i.tx_count, i.rx_count, TEN_THOUSAND))\n        });\n    }\n    for input in n_1() {\n        let param = Concurrency { tx_count: input, rx_count: 1 };\n        group.throughput(Throughput::Elements(ONE_MILLION as u64));\n        group.bench_with_input(BenchmarkId::new(\"mpsc size 100\", input), &param, |b, i| {\n            b.iter(|| _flume_bounded_sync(100, i.tx_count, i.rx_count, ONE_MILLION))\n        });\n    }\n    for input in n_n() {\n        let param = Concurrency { tx_count: input.0, rx_count: input.1 };\n        group.throughput(Throughput::Elements(ONE_MILLION as u64));\n        group.bench_with_input(\n            BenchmarkId::new(\"mpmc size 100\", param.to_string()),\n            &param,\n            |b, i| b.iter(|| _flume_bounded_sync(100, i.tx_count, i.rx_count, ONE_MILLION)),\n        );\n    }\n    group.finish();\n}\n\nfn bench_flume_unbounded_async(c: &mut Criterion) {\n    let mut group = c.benchmark_group(\"flume_unbounded_async\");\n    group.significance_level(0.1).sample_size(50);\n    group.measurement_time(Duration::from_secs(20));\n    for input in n_1() {\n        let param = Concurrency { tx_count: input, rx_count: 1 };\n        group.throughput(Throughput::Elements(ONE_MILLION as u64));\n        group.bench_with_input(BenchmarkId::new(\"mpsc\", &param), &param, |b, i| {\n            b.to_async(BenchExecutor())\n                .iter(|| _flume_unbounded_async(i.tx_count, i.rx_count, ONE_MILLION))\n        });\n    }\n    for input in n_n() {\n        let param = Concurrency { tx_count: input.0, rx_count: input.1 };\n        group.throughput(Throughput::Elements(ONE_MILLION as u64));\n        group.bench_with_input(BenchmarkId::new(\"mpmc\", &param), &param, |b, i| {\n            b.to_async(BenchExecutor())\n                .iter(|| _flume_unbounded_async(i.tx_count, i.rx_count, ONE_MILLION))\n        });\n    }\n}\n\nfn bench_flume_bounded_async(c: &mut Criterion) {\n    let mut group = c.benchmark_group(\"flume_bounded_async\");\n    group.significance_level(0.1).sample_size(50);\n    group.measurement_time(Duration::from_secs(20));\n    for input in n_1() {\n        let param = Concurrency { tx_count: input, rx_count: 1 };\n        group.throughput(Throughput::Elements(TEN_THOUSAND as u64));\n        group.bench_with_input(BenchmarkId::new(\"mpsc size 1\", &param), &param, |b, i| {\n            b.to_async(BenchExecutor())\n                .iter(|| _flume_bounded_async(1, i.tx_count, i.rx_count, TEN_THOUSAND))\n        });\n    }\n\n    for input in n_1() {\n        let param = Concurrency { tx_count: input, rx_count: 1 };\n        group.throughput(Throughput::Elements(ONE_MILLION as u64));\n        group.bench_with_input(BenchmarkId::new(\"mpsc size 100\", &param), &param, |b, i| {\n            b.to_async(BenchExecutor())\n                .iter(|| _flume_bounded_async(100, i.tx_count, i.rx_count, ONE_MILLION))\n        });\n    }\n    for input in n_n() {\n        let param = Concurrency { tx_count: input.0, rx_count: input.1 };\n        group.throughput(Throughput::Elements(ONE_MILLION as u64));\n        group.bench_with_input(BenchmarkId::new(\"mpmc size 100\", &param), &param, |b, i| {\n            b.to_async(BenchExecutor())\n                .iter(|| _flume_bounded_async(100, i.tx_count, i.rx_count, ONE_MILLION))\n        });\n    }\n}\n\nfn bench_flume_unbounded_sync(c: &mut Criterion) {\n    let mut group = c.benchmark_group(\"flume_unbounded_blocking\");\n    group.significance_level(0.1).sample_size(50);\n    group.measurement_time(Duration::from_secs(20));\n    for input in n_1() {\n        let param = Concurrency { tx_count: input, rx_count: 1 };\n        group.throughput(Throughput::Elements(ONE_MILLION as u64));\n        group.bench_with_input(BenchmarkId::new(\"mpsc\", &param), &param, |b, i| {\n            b.iter(|| _flume_unbounded_sync(i.tx_count, i.rx_count, ONE_MILLION))\n        });\n    }\n    for input in n_n() {\n        let param = Concurrency { tx_count: input.0, rx_count: input.1 };\n        group.throughput(Throughput::Elements(ONE_MILLION as u64));\n        group.bench_with_input(BenchmarkId::new(\"mpmc\", &param), &param, |b, i| {\n            b.iter(|| _flume_unbounded_sync(i.tx_count, i.rx_count, ONE_MILLION))\n        });\n    }\n}\n\ncriterion_group!(\n    benches,\n    bench_flume_bounded_sync,\n    bench_flume_bounded_async,\n    bench_flume_unbounded_async,\n    bench_flume_unbounded_sync,\n);\ncriterion_main!(benches);\n"
  },
  {
    "path": "test-suite/benches/kanal.rs",
    "content": "use criterion::*;\nuse std::thread;\nuse std::time::Duration;\n\n#[allow(unused_imports)]\nmod common;\nuse common::*;\n\nfn _kanal_bounded_blocking(bound: usize, tx_count: usize, rx_count: usize, msg_count: usize) {\n    let (tx, rx) = kanal::bounded::<usize>(bound);\n    let mut th_tx = Vec::new();\n    let mut th_rx = Vec::new();\n    let mut send_counter: usize = 0;\n    let _send_counter = msg_count / tx_count;\n    for _ in 0..tx_count {\n        send_counter += _send_counter;\n        let _tx = tx.clone();\n        th_tx.push(thread::spawn(move || {\n            for i in 0.._send_counter {\n                _tx.send(i).expect(\"send\");\n            }\n        }));\n    }\n    drop(tx);\n    let mut recv_counter = 0;\n    for _ in 0..(rx_count - 1) {\n        let _rx = rx.clone();\n        th_rx.push(thread::spawn(move || -> usize {\n            let mut i = 0;\n            loop {\n                match _rx.recv() {\n                    Ok(_) => {\n                        i += 1;\n                    }\n                    Err(_) => {\n                        break;\n                    }\n                }\n            }\n            i\n        }));\n    }\n    loop {\n        match rx.recv() {\n            Ok(_) => {\n                recv_counter += 1;\n            }\n            Err(_) => {\n                break;\n            }\n        }\n    }\n    for th in th_tx {\n        let _ = th.join();\n    }\n    for th in th_rx {\n        if let Ok(count) = th.join() {\n            recv_counter += count;\n        }\n    }\n    assert_eq!(send_counter, recv_counter);\n}\n\nfn _kanal_unbounded_blocking(tx_count: usize, rx_count: usize, msg_count: usize) {\n    let (tx, rx) = kanal::unbounded::<usize>();\n    let mut th_tx = Vec::new();\n    let mut th_rx = Vec::new();\n    let mut send_counter: usize = 0;\n    let _send_counter = msg_count / tx_count;\n    for _ in 0..tx_count {\n        send_counter += _send_counter;\n        let _tx = tx.clone();\n        th_tx.push(thread::spawn(move || {\n            for i in 0.._send_counter {\n                _tx.send(i).expect(\"send\");\n            }\n        }));\n    }\n    drop(tx);\n    let mut recv_counter = 0;\n    for _ in 0..(rx_count - 1) {\n        let _rx = rx.clone();\n        th_rx.push(thread::spawn(move || -> usize {\n            let mut i = 0;\n            loop {\n                match _rx.recv() {\n                    Ok(_) => {\n                        i += 1;\n                    }\n                    Err(_) => {\n                        break;\n                    }\n                }\n            }\n            i\n        }));\n    }\n    loop {\n        match rx.recv() {\n            Ok(_) => {\n                recv_counter += 1;\n            }\n            Err(_) => {\n                break;\n            }\n        }\n    }\n    for th in th_tx {\n        let _ = th.join();\n    }\n    for th in th_rx {\n        if let Ok(count) = th.join() {\n            recv_counter += count;\n        }\n    }\n    assert_eq!(send_counter, recv_counter);\n}\n\nasync fn _kanal_bounded_async(bound: usize, tx_count: usize, rx_count: usize, msg_count: usize) {\n    let (tx, rx) = kanal::bounded_async(bound);\n    let mut th_tx = Vec::new();\n    let mut th_rx = Vec::new();\n    let mut send_counter: usize = 0;\n    let _send_counter = msg_count / tx_count;\n    for _tx_i in 0..tx_count {\n        send_counter += _send_counter;\n        let _tx = tx.clone();\n        th_tx.push(async_spawn!(async move {\n            for i in 0.._send_counter {\n                if let Err(e) = _tx.send(i).await {\n                    panic!(\"send error: {:?}\", e);\n                }\n            }\n        }));\n    }\n    drop(tx);\n    let mut recv_counter = 0;\n    for _ in 0..(rx_count - 1) {\n        let _rx = rx.clone();\n        th_rx.push(async_spawn!(async move {\n            let mut i = 0;\n            loop {\n                match _rx.recv().await {\n                    Ok(_) => {\n                        i += 1;\n                    }\n                    Err(_) => {\n                        break;\n                    }\n                }\n            }\n            i\n        }));\n    }\n    loop {\n        match rx.recv().await {\n            Ok(_) => {\n                recv_counter += 1;\n            }\n            Err(_) => {\n                break;\n            }\n        }\n    }\n    for th in th_tx {\n        let _ = async_join_result!(th);\n    }\n    for th in th_rx {\n        recv_counter += async_join_result!(th);\n    }\n    assert_eq!(send_counter, recv_counter);\n}\n\nasync fn _kanal_unbounded_async(tx_count: usize, rx_count: usize, msg_count: usize) {\n    let (tx, rx) = kanal::unbounded_async();\n    let mut th_tx = Vec::new();\n    let mut th_rx = Vec::new();\n    let mut send_counter: usize = 0;\n    let _send_counter = msg_count / tx_count;\n    for _tx_i in 0..tx_count {\n        send_counter += _send_counter;\n        let _tx = tx.clone();\n        th_tx.push(async_spawn!(async move {\n            for i in 0.._send_counter {\n                if let Err(e) = _tx.send(i).await {\n                    panic!(\"send error: {:?}\", e);\n                }\n            }\n        }));\n    }\n    drop(tx);\n    let mut recv_counter = 0;\n    for _ in 0..(rx_count - 1) {\n        let _rx = rx.clone();\n        th_rx.push(async_spawn!(async move {\n            let mut i = 0;\n            loop {\n                match _rx.recv().await {\n                    Ok(_) => {\n                        i += 1;\n                    }\n\n                    Err(_) => {\n                        break;\n                    }\n                }\n            }\n            i\n        }));\n    }\n    loop {\n        match rx.recv().await {\n            Ok(_) => {\n                recv_counter += 1;\n            }\n            Err(_) => {\n                break;\n            }\n        }\n    }\n    for th in th_tx {\n        let _ = async_join_result!(th);\n    }\n    for th in th_rx {\n        recv_counter += async_join_result!(th);\n    }\n    assert_eq!(send_counter, recv_counter);\n}\n\nfn bench_kanal_bounded_blocking(c: &mut Criterion) {\n    let mut group = c.benchmark_group(\"kanal_bounded_blocking\");\n    group.significance_level(0.1).sample_size(50);\n    group.measurement_time(Duration::from_secs(20));\n    for input in n_1() {\n        let param = Concurrency { tx_count: input, rx_count: 1 };\n        group.throughput(Throughput::Elements(TEN_THOUSAND as u64));\n        group.bench_with_input(BenchmarkId::new(\"mpsc size 1\", &param), &param, |b, i| {\n            b.iter(|| _kanal_bounded_blocking(1, i.tx_count, i.rx_count, TEN_THOUSAND))\n        });\n    }\n    for input in n_1() {\n        let param = Concurrency { tx_count: input, rx_count: 1 };\n        group.throughput(Throughput::Elements(ONE_MILLION as u64));\n        group.bench_with_input(BenchmarkId::new(\"mpsc size 100\", &param), &param, |b, i| {\n            b.iter(|| _kanal_bounded_blocking(100, i.tx_count, i.rx_count, ONE_MILLION))\n        });\n    }\n    for input in n_n() {\n        let param = Concurrency { tx_count: input.0, rx_count: input.1 };\n        group.throughput(Throughput::Elements(ONE_MILLION as u64));\n        group.bench_with_input(BenchmarkId::new(\"mpmc size 100\", &param), &param, |b, i| {\n            b.iter(|| _kanal_bounded_blocking(100, i.tx_count, i.rx_count, ONE_MILLION))\n        });\n    }\n}\n\nfn bench_kanal_unbounded_blocking(c: &mut Criterion) {\n    let mut group = c.benchmark_group(\"kanal_unbounded_blocking\");\n    group.significance_level(0.1).sample_size(50);\n    group.measurement_time(Duration::from_secs(20));\n    for input in n_1() {\n        let param = Concurrency { tx_count: input, rx_count: 1 };\n        group.throughput(Throughput::Elements(ONE_MILLION as u64));\n        group.bench_with_input(BenchmarkId::new(\"mpsc\", &param), &param, |b, i| {\n            b.iter(|| _kanal_unbounded_blocking(i.tx_count, i.rx_count, ONE_MILLION))\n        });\n    }\n    for input in n_n() {\n        let param = Concurrency { tx_count: input.0, rx_count: input.1 };\n        group.throughput(Throughput::Elements(ONE_MILLION as u64));\n        group.bench_with_input(BenchmarkId::new(\"mpmc\", &param), &param, |b, i| {\n            b.iter(|| _kanal_unbounded_blocking(i.tx_count, i.rx_count, ONE_MILLION))\n        });\n    }\n}\n\nfn bench_kanal_bounded_async(c: &mut Criterion) {\n    let mut group = c.benchmark_group(\"kanal_bounded_async\");\n    group.significance_level(0.1).sample_size(50);\n    group.measurement_time(Duration::from_secs(20));\n    for input in n_1() {\n        let param = Concurrency { tx_count: input, rx_count: 1 };\n        group.throughput(Throughput::Elements(TEN_THOUSAND as u64));\n        group.bench_with_input(BenchmarkId::new(\"mpsc size 1\", &param), &param, |b, i| {\n            b.to_async(BenchExecutor())\n                .iter(|| _kanal_bounded_async(1, i.tx_count, i.rx_count, TEN_THOUSAND))\n        });\n    }\n\n    for input in n_1() {\n        let param = Concurrency { tx_count: input, rx_count: 1 };\n        group.throughput(Throughput::Elements(ONE_MILLION as u64));\n        group.bench_with_input(BenchmarkId::new(\"mpsc size 100\", &param), &param, |b, i| {\n            b.to_async(BenchExecutor())\n                .iter(|| _kanal_bounded_async(100, i.tx_count, i.rx_count, ONE_MILLION))\n        });\n    }\n    for input in n_n() {\n        let param = Concurrency { tx_count: input.0, rx_count: input.1 };\n        group.throughput(Throughput::Elements(ONE_MILLION as u64));\n        group.bench_with_input(BenchmarkId::new(\"mpmc size 100\", &param), &param, |b, i| {\n            b.to_async(BenchExecutor())\n                .iter(|| _kanal_bounded_async(100, i.tx_count, i.rx_count, ONE_MILLION))\n        });\n    }\n}\n\nfn bench_kanal_unbounded_async(c: &mut Criterion) {\n    let mut group = c.benchmark_group(\"kanal_unbounded_async\");\n    group.significance_level(0.1).sample_size(50);\n    group.measurement_time(Duration::from_secs(20));\n    for input in n_1() {\n        let param = Concurrency { tx_count: input, rx_count: 1 };\n        group.throughput(Throughput::Elements(ONE_MILLION as u64));\n        group.bench_with_input(BenchmarkId::new(\"mpsc\", &param), &param, |b, i| {\n            b.to_async(BenchExecutor())\n                .iter(|| _kanal_unbounded_async(i.tx_count, i.rx_count, ONE_MILLION))\n        });\n    }\n    for input in n_n() {\n        let param = Concurrency { tx_count: input.0, rx_count: input.1 };\n        group.throughput(Throughput::Elements(ONE_MILLION as u64));\n        group.bench_with_input(BenchmarkId::new(\"mpmc\", &param), &param, |b, i| {\n            b.to_async(BenchExecutor())\n                .iter(|| _kanal_unbounded_async(i.tx_count, i.rx_count, ONE_MILLION))\n        });\n    }\n}\n\ncriterion_group!(\n    benches,\n    bench_kanal_bounded_async,\n    bench_kanal_unbounded_async,\n    bench_kanal_bounded_blocking,\n    bench_kanal_unbounded_blocking\n);\ncriterion_main!(benches);\n"
  },
  {
    "path": "test-suite/benches/tokio.rs",
    "content": "use criterion::*;\nuse std::time::Duration;\n\n#[allow(unused_imports)]\nmod common;\nuse common::*;\n\nasync fn _tokio_bounded_mpsc(bound: usize, tx_count: usize, msg_count: usize) {\n    let (tx, mut rx) = tokio::sync::mpsc::channel::<usize>(bound);\n\n    let _send_counter = msg_count / tx_count;\n    for _tx_i in 0..tx_count {\n        let _tx = tx.clone();\n        async_spawn!(async move {\n            for i in 0.._send_counter {\n                let _ = _tx.send(i).await;\n            }\n        });\n    }\n    drop(tx);\n    for _ in 0..(tx_count * _send_counter) {\n        if let Some(_msg) = rx.recv().await {\n            //    println!(\"recv {}\", _msg);\n        } else {\n            panic!(\"recv error\");\n        }\n    }\n}\n\nasync fn _tokio_unbounded_mpsc(tx_count: usize, msg_count: usize) {\n    let (tx, mut rx) = tokio::sync::mpsc::unbounded_channel::<usize>();\n\n    let _send_counter = msg_count / tx_count;\n    for _tx_i in 0..tx_count {\n        let _tx = tx.clone();\n        async_spawn!(async move {\n            for i in 0.._send_counter {\n                let _ = _tx.send(i);\n            }\n        });\n    }\n    drop(tx);\n    for _ in 0..(tx_count * _send_counter) {\n        if let Some(_msg) = rx.recv().await {\n            //    println!(\"recv {}\", _msg);\n        } else {\n            panic!(\"recv error\");\n        }\n    }\n}\n\nfn bench_tokio_bounded(c: &mut Criterion) {\n    let mut group = c.benchmark_group(\"tokio_bounded_100\");\n    group.significance_level(0.1).sample_size(50);\n    group.measurement_time(Duration::from_secs(10));\n    for input in n_1() {\n        let param = Concurrency { tx_count: input, rx_count: 1 };\n        group.throughput(Throughput::Elements(ONE_MILLION as u64));\n        group.bench_with_input(BenchmarkId::new(\"mpsc\", input), &param, |b, i| {\n            b.to_async(BenchExecutor()).iter(|| _tokio_bounded_mpsc(100, i.tx_count, ONE_MILLION))\n        });\n    }\n    group.finish();\n}\n\nfn bench_tokio_unbounded(c: &mut Criterion) {\n    let mut group = c.benchmark_group(\"tokio_unbounded\");\n    group.significance_level(0.1).sample_size(50);\n    group.measurement_time(Duration::from_secs(10));\n    for input in n_1() {\n        let param = Concurrency { tx_count: input, rx_count: 1 };\n        group.throughput(Throughput::Elements(ONE_MILLION as u64));\n        group.bench_with_input(BenchmarkId::new(\"mpsc\", input), &param, |b, i| {\n            b.to_async(BenchExecutor()).iter(|| _tokio_unbounded_mpsc(i.tx_count, ONE_MILLION))\n        });\n    }\n    group.finish();\n}\n\nfn bench_tokio_oneshot(c: &mut Criterion) {\n    let mut group = c.benchmark_group(\"tokio_oneshot\");\n    let count = TEN_THOUSAND;\n    group.throughput(Throughput::Elements(count as u64));\n    group.bench_function(\"oneshot\", |b| {\n        b.to_async(BenchExecutor()).iter(|| async move {\n            let mut txs = Vec::with_capacity(count);\n            let mut rxs = Vec::with_capacity(count);\n            for _i in 0..count {\n                let (tx, rx) = tokio::sync::oneshot::channel();\n                txs.push(tx);\n                rxs.push(rx);\n            }\n            async_spawn!(async move {\n                for tx in txs {\n                    let _ = tx.send(0);\n                }\n            });\n            for rx in rxs {\n                let _ = rx.await;\n            }\n        })\n    });\n    group.finish();\n}\n\ncriterion_group!(benches, bench_tokio_bounded, bench_tokio_unbounded, bench_tokio_oneshot);\ncriterion_main!(benches);\n"
  },
  {
    "path": "test-suite/scripts/miri.sh",
    "content": "#!/bin/bash\n# -Zmiri-no-short-fd-operations is to prevent short write perform by miri, which breaks to atomic appending in log\n# -Zmiri-permissive-provenance is to disable warning about parking_lot\n\n# By default log is off, if you need to enable, pass the option with the script: --features trace_log\n\nif [ -z \"$MIRI_SEED\" ]; then\n\tMIRI_SEED=\"$(shuf -i 1-1000 -n 1)\"\nfi\necho \"MIRI_SEED\" $MIRI_SEED\n\nMIRIFLAGS=\"$MIRIFLAGS -Zmiri-seed=$MIRI_SEED -Zmiri-disable-isolation -Zmiri-no-short-fd-operations -Zmiri-backtrace=full -Zmiri-permissive-provenance\"\nexport MIRIFLAGS\necho $MIRIFLAGS\n# --lib: to skip doctest\nRUSTFLAGS=\"--cfg tokio_unstable\" RUST_BACKTRACE=1 cargo +${NIGHTLY_VERSION:-nightly} miri test --lib $@ -- --no-capture --test-threads=1\n"
  },
  {
    "path": "test-suite/src/lib.rs",
    "content": "#[cfg(test)]\nmod test_async;\n#[cfg(test)]\nmod test_async_blocking;\n#[cfg(test)]\nmod test_blocking_async;\n#[cfg(test)]\nmod test_blocking_context;\n#[cfg(test)]\nmod test_oneshot;\n#[cfg(test)]\nmod test_select_async;\n#[cfg(test)]\nmod test_select_blocking;\n#[cfg(test)]\nmod test_waitgroup;\n\n// we don't want to import smol-timeout\n#[cfg(test)]\n#[cfg(all(feature = \"time\", not(feature = \"smol\")))]\nmod test_type_switch;\n\nuse captains_log::*;\nuse std::sync::atomic::{AtomicUsize, Ordering};\n\n#[cfg(not(miri))]\npub const ROUND: usize = 10000;\n#[cfg(miri)]\npub const ROUND: usize = 20;\n\n#[cfg(feature = \"compio_dispatcher\")]\nuse std::sync::OnceLock;\n\n#[cfg(feature = \"compio_dispatcher\")]\nuse compio::dispatcher::Dispatcher;\n\n#[cfg(feature = \"compio_dispatcher\")]\npub static COMPIO_DISPATCHER: OnceLock<Dispatcher> = OnceLock::new();\n\npub fn _setup_log() {\n    #[cfg(feature = \"trace_log\")]\n    {\n        let format = recipe::LOG_FORMAT_THREADED_DEBUG;\n        #[cfg(miri)]\n        {\n            let _ = std::fs::remove_file(\"/tmp/crossfire_miri.log\");\n            let file = LogRawFile::new(\"/tmp\", \"crossfire_miri.log\", Level::Debug, format);\n            captains_log::Builder::default()\n                //                .tracing_global()\n                .add_sink(file)\n                .test()\n                .build()\n                .expect(\"log setup\");\n        }\n        #[cfg(not(miri))]\n        {\n            let ring = ringfile::LogRingFile::new(\n                \"/tmp/crossfire_ring.log\",\n                500 * 1024 * 1024,\n                Level::Debug,\n                format,\n            );\n            let mut config = Builder::default()\n                .signal(signal_consts::SIGINT)\n                .signal(signal_consts::SIGTERM)\n                //                .tracing_global()\n                .add_sink(ring)\n                .add_sink(LogConsole::new(\n                    ConsoleTarget::Stdout,\n                    Level::Info,\n                    recipe::LOG_FORMAT_DEBUG,\n                ));\n            config.dynamic = true;\n            config.build().expect(\"log_setup\");\n        }\n    }\n    #[cfg(not(feature = \"trace_log\"))]\n    {\n        let _ = recipe::env_logger(\"LOG_FILE\", \"LOG_LEVEL\").build().expect(\"log setup\");\n    }\n}\n\n#[macro_export]\nmacro_rules! runtime_block_on {\n    ($f: expr) => {{\n        #[cfg(feature = \"smol\")]\n        {\n            log::info!(\"run with smol\");\n            smol::block_on($f)\n        }\n        #[cfg(feature = \"async_std\")]\n        {\n            log::info!(\"run with async_std\");\n            async_std::task::block_on($f)\n        }\n        #[cfg(any(feature = \"compio\", feature = \"compio_dispatcher\"))]\n        {\n            log::info!(\"run with compio\");\n\n            let rt = compio::runtime::Runtime::new().unwrap();\n            rt.block_on($f)\n        }\n        #[cfg(not(any(\n            feature = \"compio\",\n            feature = \"compio_dispatcher\",\n            feature = \"async_std\",\n            feature = \"smol\"\n        )))]\n        {\n            let runtime_flag = std::env::var(\"SINGLE_THREAD_RUNTIME\").unwrap_or(\"\".to_string());\n            let mut rt = if runtime_flag.len() > 0 {\n                log::info!(\"run with tokio current thread\");\n                tokio::runtime::Builder::new_current_thread()\n            } else {\n                log::info!(\"run with tokio multi thread\");\n                tokio::runtime::Builder::new_multi_thread()\n            };\n            rt.enable_all().build().unwrap().block_on($f)\n        }\n    }};\n}\n\n#[macro_export]\nmacro_rules! async_spawn {\n    ($f: expr) => {{\n        #[cfg(feature = \"smol\")]\n        {\n            smol::spawn($f)\n        }\n        #[cfg(feature = \"async_std\")]\n        {\n            async_std::task::spawn($f)\n        }\n        #[cfg(feature = \"compio\")]\n        {\n            compio::runtime::spawn($f)\n        }\n        #[cfg(feature = \"compio_dispatcher\")]\n        {\n            let disp = COMPIO_DISPATCHER.get_or_init(|| {\n                compio::dispatcher::DispatcherBuilder::new()\n                    .worker_threads(std::num::NonZero::new(8).unwrap())\n                    .build()\n                    .expect(\"create dispatcher\")\n            });\n            disp.dispatch(move || $f).expect(\"dispatch\")\n        }\n        #[cfg(not(any(\n            feature = \"compio\",\n            feature = \"compio_dispatcher\",\n            feature = \"async_std\",\n            feature = \"smol\"\n        )))]\n        {\n            tokio::spawn($f)\n        }\n    }};\n}\n\n#[macro_export]\nmacro_rules! async_join_result {\n    ($th: expr) => {{\n        #[cfg(any(feature = \"async_std\", feature = \"smol\"))]\n        {\n            $th.await\n        }\n        #[cfg(not(any(feature = \"async_std\", feature = \"smol\")))]\n        {\n            // compio and tokio are the same\n            $th.await.expect(\"join\")\n        }\n    }};\n}\n\nstatic DROP_COUNTER: AtomicUsize = AtomicUsize::new(0);\n\npub trait TestDropMsg: Unpin + Send + 'static {\n    fn new(v: usize) -> Self;\n\n    fn get_value(&self) -> usize;\n}\n\npub struct SmallMsg(pub usize);\n\nimpl Drop for SmallMsg {\n    fn drop(&mut self) {\n        DROP_COUNTER.fetch_add(1, Ordering::SeqCst);\n    }\n}\n\nimpl TestDropMsg for SmallMsg {\n    fn new(v: usize) -> Self {\n        Self(v)\n    }\n\n    fn get_value(&self) -> usize {\n        self.0\n    }\n}\n\npub struct LargeMsg([usize; 4]);\n\nimpl TestDropMsg for LargeMsg {\n    fn new(v: usize) -> Self {\n        Self([v, v, v, v])\n    }\n\n    fn get_value(&self) -> usize {\n        self.0[0]\n    }\n}\n\nimpl Drop for LargeMsg {\n    fn drop(&mut self) {\n        DROP_COUNTER.fetch_add(1, Ordering::SeqCst);\n    }\n}\n\npub fn get_drop_counter() -> usize {\n    DROP_COUNTER.load(Ordering::SeqCst)\n}\n\npub fn reset_drop_counter() {\n    DROP_COUNTER.store(0, Ordering::SeqCst);\n}\n\n#[cfg(feature = \"time\")]\npub async fn sleep(duration: std::time::Duration) {\n    #[cfg(feature = \"smol\")]\n    {\n        smol::Timer::after(duration).await;\n    }\n    #[cfg(feature = \"async_std\")]\n    {\n        async_std::task::sleep(duration).await;\n    }\n    #[cfg(any(feature = \"compio\", feature = \"compio_dispatcher\"))]\n    {\n        compio::time::sleep(duration).await;\n    }\n    #[cfg(not(any(\n        feature = \"compio\",\n        feature = \"compio_dispatcher\",\n        feature = \"async_std\",\n        feature = \"smol\"\n    )))]\n    {\n        tokio::time::sleep(duration).await;\n    }\n}\n\n#[cfg(all(feature = \"time\", not(feature = \"smol\")))]\npub async fn timeout<F, T>(duration: std::time::Duration, future: F) -> Result<T, String>\nwhere\n    F: std::future::Future<Output = T>,\n{\n    #[cfg(feature = \"async_std\")]\n    {\n        return async_std::future::timeout(duration, future)\n            .await\n            .map_err(|_| format!(\"Test timed out after {:?}\", duration));\n    }\n    #[cfg(any(feature = \"compio\", feature = \"compio_dispatcher\"))]\n    {\n        return compio::time::timeout(duration, future)\n            .await\n            .map_err(|_| format!(\"Test timed out after {:?}\", duration));\n    }\n    #[cfg(not(any(\n        feature = \"compio\",\n        feature = \"compio_dispatcher\",\n        feature = \"async_std\",\n        feature = \"smol\"\n    )))]\n    {\n        return tokio::time::timeout(duration, future)\n            .await\n            .map_err(|_| format!(\"Test timed out after {:?}\", duration));\n    }\n}\n\npub fn spawn_named_thread<F, T>(name: &str, f: F) -> std::thread::JoinHandle<T>\nwhere\n    F: FnOnce() -> T + Send + 'static,\n    T: Send + 'static,\n{\n    std::thread::Builder::new()\n        .name(name.to_string())\n        .spawn(f)\n        .unwrap_or_else(|e| panic!(\"Failed to spawn thread '{}': {:?}\", name, e))\n}\n"
  },
  {
    "path": "test-suite/src/test_async.rs",
    "content": "use crate::*;\nuse captains_log::{logfn, *};\nuse crossfire::flavor::Flavor;\nuse crossfire::tokio_task_id;\nuse crossfire::{sink::*, stream::*, *};\nuse futures_util::{\n    pin_mut, select,\n    stream::{Stream, StreamExt},\n    FutureExt,\n};\nuse rstest::*;\nuse std::future::Future;\nuse std::pin::Pin;\nuse std::sync::Arc;\nuse std::sync::Mutex;\nuse std::task::*;\nuse std::thread;\nuse std::time::Duration;\n\n#[fixture]\nfn setup_log() {\n    _setup_log();\n}\n\n#[logfn]\n#[rstest]\nfn test_basic_weak(setup_log: ()) {\n    runtime_block_on!(async move {\n        let (tx, rx) = mpsc::bounded_async::<usize>(100);\n        assert_eq!(tx.get_tx_count(), 1);\n        let weak_tx = tx.downgrade();\n        let tx_clone = weak_tx.upgrade::<MAsyncTx<_>>().unwrap();\n        tx_clone.send(1).await.expect(\"ok\");\n        assert_eq!(tx.get_tx_count(), 2);\n        drop(tx);\n        drop(tx_clone);\n        assert!(weak_tx.upgrade::<MAsyncTx<_>>().is_none());\n        assert_eq!(weak_tx.get_tx_count(), 0);\n        drop(rx);\n    });\n}\n\n#[logfn]\n#[rstest]\n#[case(spsc::bounded_async(1))]\n#[case(mpsc::bounded_async(1))]\n#[case(mpmc::bounded_async(1))]\nfn test_basic_bounded_empty_full_drop_rx<T: AsyncTxTrait<usize>, R: AsyncRxTrait<usize>>(\n    setup_log: (), #[case] channel: (T, R),\n) {\n    let (tx, rx) = channel;\n    assert!(tx.is_empty());\n    assert!(rx.is_empty());\n    assert_eq!(tx.capacity(), Some(1));\n    assert_eq!(rx.capacity(), Some(1));\n    tx.try_send(1).expect(\"Ok\");\n    assert!(tx.is_full());\n    assert!(rx.is_full());\n    assert!(!tx.is_empty());\n    assert_eq!(tx.is_disconnected(), false);\n    assert_eq!(rx.is_disconnected(), false);\n    drop(rx);\n    assert_eq!(tx.is_disconnected(), true);\n    assert_eq!(tx.get_rx_count(), 0);\n    assert_eq!(tx.get_tx_count(), 1);\n}\n\n#[logfn]\n#[rstest]\n#[case(spsc::bounded_async(1))]\n#[case(mpsc::bounded_async(1))]\n#[case(mpmc::bounded_async(1))]\nfn test_basic_bounded_empty_full_drop_tx<T: AsyncTxTrait<usize>, R: AsyncRxTrait<usize>>(\n    setup_log: (), #[case] channel: (T, R),\n) {\n    let (tx, rx) = channel;\n    assert!(tx.is_empty());\n    assert!(rx.is_empty());\n    assert_eq!(tx.capacity(), Some(1));\n    assert_eq!(rx.capacity(), Some(1));\n    tx.try_send(1).expect(\"Ok\");\n    assert!(tx.is_full());\n    assert!(rx.is_full());\n    assert!(!tx.is_empty());\n    assert_eq!(tx.is_disconnected(), false);\n    assert_eq!(rx.is_disconnected(), false);\n    drop(tx);\n    assert_eq!(rx.is_disconnected(), true);\n    assert_eq!(rx.get_tx_count(), 0);\n    assert_eq!(rx.get_rx_count(), 1);\n}\n\n#[logfn]\n#[rstest]\nfn test_basic_compile_bounded_empty_full() {\n    let (tx, rx) = mpmc::bounded_async(1);\n    assert!(tx.is_empty());\n    assert!(rx.is_empty());\n    tx.try_send(1).expect(\"ok\");\n    assert!(tx.is_full());\n    assert!(!tx.is_empty());\n    assert!(rx.is_full());\n    assert_eq!(tx.get_tx_count(), 1);\n    assert_eq!(rx.get_tx_count(), 1);\n    assert_eq!(tx.is_disconnected(), false);\n    assert_eq!(rx.is_disconnected(), false);\n    drop(rx);\n    assert_eq!(tx.is_disconnected(), true);\n}\n\n#[cfg(feature = \"time\")]\n#[logfn]\n#[rstest]\nfn test_sync() {\n    runtime_block_on!(async move {\n        let (tx, rx) = spsc::bounded_async::<usize>(100);\n        //  Example1: should fail to compile with Arc\n        //    let tx = Arc::new(tx);\n        let _task = async_spawn!(async move {\n            let _ = tx.send(2).await;\n        });\n        drop(rx);\n\n        let (tx, rx) = mpsc::bounded_async::<usize>(100);\n        //  example2: should fail to compile with Arc\n        //    let rx = Arc::new(rx);\n        let _task = async_spawn!(async move {\n            let _ = rx.recv().await;\n        });\n        drop(tx);\n\n        let (tx, rx) = mpsc::bounded_blocking::<usize>(100);\n        ////  example3: should fail to compile with Arc\n        //    let rx = Arc::new(rx);\n        let _task = std::thread::spawn(move || {\n            let _ = rx.recv();\n        });\n        drop(tx);\n\n        let (tx, rx) = spsc::bounded_blocking::<usize>(100);\n        ////  example4: should fail to compile after Arc\n        //   let tx = Arc::new(tx);\n        std::thread::spawn(move || {\n            let _ = tx.send(1);\n        });\n        drop(rx);\n\n        let (tx, rx) = mpmc::bounded_blocking::<usize>(100);\n        // MRx can put in Arc\n        let rx = Arc::new(rx);\n        std::thread::spawn(move || {\n            let _ = rx.try_recv();\n        });\n        // MTx can put in Arc\n        let tx = Arc::new(tx);\n        std::thread::spawn(move || {\n            let _ = tx.try_send(1);\n        });\n\n        let (tx, rx) = spsc::bounded_async::<usize>(100);\n        let th = async_spawn!(async move {\n            let mut i = 0;\n            loop {\n                sleep(Duration::from_secs(1)).await;\n                i += 1;\n                if let Err(_) = tx.send(i).await {\n                    println!(\"rx dropped\");\n                    return;\n                }\n            }\n        });\n        'LOOP: for _ in 0..10 {\n            select! {\n                _ = sleep(Duration::from_millis(500)).fuse() =>{\n                    println!(\"tick\");\n                },\n                r = rx.recv().fuse() => {\n                    match r {\n                        Ok(item)=>{\n                            println!(\"recv {}\", item);\n                        }\n                        Err(e)=>{\n                            println!(\"tx dropped {:?}\", e);\n                            break 'LOOP;\n                        }\n                    }\n                }\n            }\n        }\n        drop(rx);\n        let _ = async_join_result!(th);\n    });\n}\n\n#[logfn]\n#[rstest]\n#[case(spsc::bounded_async(100))]\n#[case(mpsc::bounded_async(100))]\n#[case(mpmc::bounded_async(100))]\nfn test_basic_bounded_rx_drop<T: AsyncTxTrait<usize>, R: AsyncRxTrait<usize>>(\n    setup_log: (), #[case] channel: (T, R),\n) {\n    runtime_block_on!(async move {\n        let tx = {\n            let (tx, _rx) = channel;\n            tx.send(1).await.expect(\"ok\");\n            tx.send(2).await.expect(\"ok\");\n            tx.send(3).await.expect(\"ok\");\n            tx\n        };\n        {\n            info!(\"try to send after rx dropped\");\n            assert_eq!(tx.send(4).await.unwrap_err(), SendError(4));\n            drop(tx);\n            info!(\"dropped tx\");\n        }\n    });\n}\n\n#[logfn]\n#[rstest]\n#[case(spsc::unbounded_async())]\n#[case(mpsc::unbounded_async())]\n#[case(mpmc::unbounded_async())]\nfn test_basic_unbounded_rx_drop<T: BlockingTxTrait<usize>, R: AsyncRxTrait<usize>>(\n    setup_log: (), #[case] channel: (T, R),\n) {\n    runtime_block_on!(async move {\n        let tx = {\n            let (tx, _rx) = channel;\n            tx.send(1).expect(\"ok\");\n            tx.send(2).expect(\"ok\");\n            tx.send(3).expect(\"ok\");\n            tx\n        };\n        {\n            info!(\"try to send after rx dropped\");\n            assert_eq!(tx.send(4).unwrap_err(), SendError(4));\n            drop(tx);\n            info!(\"dropped tx\");\n        }\n    });\n}\n\n#[cfg(feature = \"time\")]\n#[logfn]\n#[rstest]\n#[case(spsc::bounded_async(10))]\n#[case(mpsc::bounded_async(10))]\n#[case(mpmc::bounded_async(10))]\nfn test_basic_bounded_1_thread<T: AsyncTxTrait<usize>, R: AsyncRxTrait<usize>>(\n    setup_log: (), #[case] channel: (T, R),\n) {\n    let (tx, rx) = channel;\n    runtime_block_on!(async move {\n        let rx_res = rx.try_recv();\n        assert!(rx_res.is_err());\n        assert!(rx_res.unwrap_err().is_empty());\n        for i in 0usize..10 {\n            let tx_res = tx.try_send(i);\n            assert!(tx_res.is_ok());\n        }\n        let tx_res = tx.try_send(11);\n        assert!(tx_res.is_err());\n        assert!(tx_res.unwrap_err().is_full());\n\n        let th = async_spawn!(async move {\n            for i in 0usize..12 {\n                match rx.recv().await {\n                    Ok(j) => {\n                        trace!(\"recv {}\", i);\n                        assert_eq!(i, j);\n                    }\n                    Err(e) => {\n                        panic!(\"error {}\", e);\n                    }\n                }\n            }\n            let res = rx.recv().await;\n            assert!(res.is_err());\n            debug!(\"rx close\");\n        });\n        assert!(tx.send(10).await.is_ok());\n        sleep(Duration::from_secs(1)).await;\n        assert!(tx.send(11).await.is_ok());\n        drop(tx);\n        let _ = async_join_result!(th);\n    });\n}\n\n#[cfg(feature = \"time\")]\n#[logfn]\n#[rstest]\n#[case(spsc::unbounded_async())]\n#[case(mpsc::unbounded_async())]\n#[case(mpmc::unbounded_async())]\nfn test_basic_unbounded_1_thread<T: BlockingTxTrait<usize>, R: AsyncRxTrait<usize>>(\n    setup_log: (), #[case] channel: (T, R),\n) {\n    let (tx, rx) = channel;\n    assert_eq!(tx.capacity(), None);\n    assert_eq!(rx.capacity(), None);\n    runtime_block_on!(async move {\n        let rx_res = rx.try_recv();\n        assert!(rx_res.is_err());\n        assert!(rx_res.unwrap_err().is_empty());\n        for i in 0usize..10 {\n            let tx_res = tx.try_send(i);\n            assert!(tx_res.is_ok());\n        }\n\n        let th = async_spawn!(async move {\n            for i in 0usize..12 {\n                match rx.recv().await {\n                    Ok(j) => {\n                        trace!(\"recv {}\", i);\n                        assert_eq!(i, j);\n                    }\n                    Err(e) => {\n                        panic!(\"error {}\", e);\n                    }\n                }\n            }\n            let res = rx.recv().await;\n            assert!(res.is_err());\n            debug!(\"rx close\");\n        });\n        assert!(tx.send(10).is_ok());\n        sleep(Duration::from_secs(1)).await;\n        assert!(tx.send(11).is_ok());\n        drop(tx);\n        let _ = async_join_result!(th);\n    });\n}\n\n#[cfg(feature = \"time\")]\n#[logfn]\n#[rstest]\n#[case(spsc::unbounded_async())]\n#[case(mpsc::unbounded_async())]\n#[case(mpmc::unbounded_async())]\nfn test_basic_unbounded_idle_select<T: BlockingTxTrait<usize>, R: AsyncRxTrait<usize>>(\n    setup_log: (), #[case] channel: (T, R),\n) {\n    let (_tx, rx) = channel;\n    let round = {\n        #[cfg(miri)]\n        {\n            10\n        }\n        #[cfg(not(miri))]\n        {\n            200\n        }\n    };\n\n    runtime_block_on!(async move {\n        let c = rx.recv().fuse();\n        pin_mut!(c);\n        for _ in 0..round {\n            {\n                let f = sleep(Duration::from_millis(1)).fuse();\n                pin_mut!(f);\n                select! {\n                    _ = f => {\n                        let (_tx_wakers, _rx_wakers) = rx.get_wakers_count();\n                        trace!(\"waker tx {} rx {}\", _tx_wakers, _rx_wakers);\n                    },\n                    _ = c => {\n                        unreachable!()\n                    },\n                }\n            }\n        }\n        let (tx_wakers, rx_wakers) = rx.get_wakers_count();\n        assert_eq!(tx_wakers, 0);\n        info!(\"waker rx {}\", rx_wakers);\n    });\n}\n\n#[logfn]\n#[rstest]\n#[case(spsc::bounded_async(10))]\n#[case(mpsc::bounded_async(10))]\n#[case(mpmc::bounded_async(10))]\nfn test_basic_bounded_recv_after_sender_close<T: AsyncTxTrait<usize>, R: AsyncRxTrait<usize>>(\n    setup_log: (), #[case] channel: (T, R),\n) {\n    let (tx, rx) = channel;\n    let total_msg_count = 5;\n    for i in 0..total_msg_count {\n        let _ = tx.try_send(i).expect(\"send ok\");\n    }\n    drop(tx);\n\n    runtime_block_on!(async move {\n        // NOTE: 5 < 10\n        let mut recv_msg_count = 0;\n        loop {\n            match rx.recv().await {\n                Ok(_) => {\n                    recv_msg_count += 1;\n                }\n                Err(_) => {\n                    break;\n                }\n            }\n        }\n        assert_eq!(recv_msg_count, total_msg_count);\n    });\n}\n\n#[logfn]\n#[rstest]\n#[case(spsc::unbounded_async())]\n#[case(mpsc::unbounded_async())]\n#[case(mpmc::unbounded_async())]\nfn test_basic_unbounded_recv_after_sender_close<\n    T: BlockingTxTrait<usize>,\n    R: AsyncRxTrait<usize>,\n>(\n    setup_log: (), #[case] channel: (T, R),\n) {\n    let (tx, rx) = channel;\n    let total_msg_count = 500;\n    for i in 0..total_msg_count {\n        let _ = tx.send(i).expect(\"send ok\");\n    }\n    drop(tx);\n    runtime_block_on!(async move {\n        let mut recv_msg_count = 0;\n        loop {\n            match rx.recv().await {\n                Ok(_) => {\n                    recv_msg_count += 1;\n                }\n                Err(_) => {\n                    break;\n                }\n            }\n        }\n        assert_eq!(recv_msg_count, total_msg_count);\n    });\n}\n\n#[cfg(feature = \"time\")]\n#[logfn]\n#[rstest]\n#[case(spsc::bounded_async(100))]\n#[case(mpsc::bounded_async(100))]\n#[case(mpmc::bounded_async(100))]\nfn test_basic_timeout_recv_async_waker<T: AsyncTxTrait<usize>, R: AsyncRxTrait<usize>>(\n    setup_log: (), #[case] channel: (T, R),\n) {\n    let (tx, rx) = channel;\n    let rounds = {\n        #[cfg(miri)]\n        {\n            10\n        }\n        #[cfg(not(miri))]\n        {\n            1000\n        }\n    };\n    runtime_block_on!(async move {\n        for _ in 0..rounds {\n            assert!(rx.recv_with_timer(sleep(Duration::from_millis(1))).await.is_err());\n        }\n        let (tx_wakers, rx_wakers) = rx.get_wakers_count();\n        println!(\"wakers: {}, {}\", tx_wakers, rx_wakers);\n        assert!(tx_wakers <= 1);\n        assert!(rx_wakers <= 1);\n        sleep(Duration::from_secs(1)).await;\n        let _ = tx.send(1).await;\n        assert_eq!(rx.recv().await.unwrap(), 1);\n        let (tx_wakers, rx_wakers) = rx.get_wakers_count();\n        println!(\"wakers: {}, {}\", tx_wakers, rx_wakers);\n        assert!(tx_wakers <= 1);\n        assert!(rx_wakers <= 1);\n    });\n}\n\n#[cfg(feature = \"time\")]\n#[logfn]\n#[rstest]\n#[case(spsc::unbounded_async())]\n#[case(mpsc::unbounded_async())]\n#[case(mpmc::unbounded_async())]\nfn test_basic_unbounded_recv_timeout_async<T: BlockingTxTrait<usize>, R: AsyncRxTrait<usize>>(\n    setup_log: (), #[case] _channel: (T, R),\n) {\n    let (tx, rx) = _channel;\n    runtime_block_on!(async move {\n        let th = async_spawn!(async move {\n            sleep(Duration::from_millis(50)).await;\n            let _ = tx.send(1);\n        });\n        let _r = rx.recv_with_timer(sleep(Duration::from_millis(1))).await;\n        #[cfg(not(miri))]\n        {\n            assert_eq!(_r.unwrap_err(), RecvTimeoutError::Timeout);\n        }\n        let _ = async_join_result!(th);\n        let (tx_wakers, rx_wakers) = rx.get_wakers_count();\n        println!(\"wakers: {}, {}\", tx_wakers, rx_wakers);\n        assert_eq!(tx_wakers, 0);\n        assert_eq!(rx_wakers, 0);\n        let _r = rx.recv_with_timer(sleep(Duration::from_millis(200))).await;\n        #[cfg(not(miri))]\n        {\n            assert_eq!(_r.unwrap(), 1);\n        }\n    });\n}\n\n#[cfg(feature = \"time\")]\n#[logfn]\n#[rstest]\n#[case(spsc::bounded_async(10))]\n#[case(mpsc::bounded_async(10))]\n#[case(mpmc::bounded_async(10))]\nfn test_basic_send_timeout_async<T: AsyncTxTrait<usize>, R: AsyncRxTrait<usize>>(\n    setup_log: (), #[case] _channel: (T, R),\n) {\n    let (tx, rx) = _channel;\n    for i in 0..10 {\n        assert!(tx.try_send(i).is_ok());\n    }\n\n    runtime_block_on!(async move {\n        assert_eq!(\n            tx.send_with_timer(11, sleep(Duration::from_millis(1))).await.unwrap_err(),\n            SendTimeoutError::Timeout(11)\n        );\n        let th = async_spawn!(async move {\n            loop {\n                sleep(Duration::from_millis(2)).await;\n                if let Err(_) = rx.recv().await {\n                    println!(\"tx dropped\");\n                    break;\n                }\n            }\n        });\n        let mut try_times = 0;\n        loop {\n            try_times += 1;\n            match tx.send_with_timer(11, sleep(Duration::from_millis(1))).await {\n                Ok(_) => {\n                    println!(\"send ok after {} tries\", try_times);\n                    break;\n                }\n                Err(SendTimeoutError::Timeout(msg)) => {\n                    println!(\"timeout\");\n                    assert_eq!(msg, 11);\n                }\n                Err(SendTimeoutError::Disconnected(_)) => {\n                    unreachable!();\n                }\n            }\n        }\n        let (tx_wakers, rx_wakers) = tx.get_wakers_count();\n        println!(\"wakers: {}, {}\", tx_wakers, rx_wakers);\n        assert!(tx_wakers <= 1, \"{:?}\", tx_wakers);\n        assert!(rx_wakers <= 1, \"{:?}\", rx_wakers);\n        drop(tx);\n        let _ = async_join_result!(th);\n    });\n}\n\n#[cfg(feature = \"time\")]\n#[logfn]\n#[rstest]\n#[case(mpmc::bounded_async(1))]\nfn test_pressure_bounded_timeout_async<F: Flavor<Item = usize> + 'static>(\n    setup_log: (), #[case] _channel: (MAsyncTx<F>, MAsyncRx<F>),\n) {\n    use std::collections::HashMap;\n    let (tx, rx) = _channel;\n    let tx_count: usize = 3;\n    let rx_count: usize = 2;\n\n    runtime_block_on!(async move {\n        assert_eq!(\n            rx.recv_with_timer(sleep(Duration::from_millis(1))).await.unwrap_err(),\n            RecvTimeoutError::Timeout\n        );\n        let (tx_wakers, rx_wakers) = rx.get_wakers_count();\n        println!(\"wakers: {}, {}\", tx_wakers, rx_wakers);\n        assert_eq!(tx_wakers, 0);\n        assert_eq!(rx_wakers, 0);\n\n        let recv_map = Arc::new(Mutex::new(HashMap::new()));\n\n        let mut th_tx = Vec::new();\n        let mut th_rx = Vec::new();\n\n        for thread_id in 0..tx_count {\n            let _recv_map = recv_map.clone();\n            let _tx = tx.clone();\n            th_tx.push(async_spawn!(async move {\n                let mut local_send_timeout_count = 0;\n                let mut i = 0;\n                // randomize start up\n                sleep(Duration::from_millis((thread_id & 3) as u64)).await;\n                loop {\n                    if i >= ROUND {\n                        return local_send_timeout_count;\n                    }\n                    {\n                        let mut guard = _recv_map.lock().unwrap();\n                        guard.insert(i, ());\n                    }\n                    if i & 2 == 0 {\n                        sleep(Duration::from_millis(3)).await;\n                    } else {\n                        sleep(Duration::from_millis(1)).await;\n                    }\n                    loop {\n                        match _tx.send_with_timer(i, sleep(Duration::from_millis(1))).await {\n                            Ok(_) => {\n                                i += 1;\n                                break;\n                            }\n                            Err(SendTimeoutError::Timeout(_i)) => {\n                                local_send_timeout_count += 1;\n                                assert_eq!(_i, i);\n                            }\n                            Err(SendTimeoutError::Disconnected(_)) => {\n                                unreachable!();\n                            }\n                        }\n                    }\n                }\n            }));\n        }\n\n        for _thread_id in 0..rx_count {\n            let _rx = rx.clone();\n            let _recv_map = recv_map.clone();\n            th_rx.push(async_spawn!(async move {\n                let mut step: usize = 0;\n                let mut local_recv_count: usize = 0;\n                let mut local_recv_timeout_count: usize = 0;\n                loop {\n                    step += 1;\n                    let timeout = if step & 2 == 0 { 1 } else { 2 };\n                    if step & 2 > 0 {\n                        sleep(Duration::from_millis(1)).await;\n                    }\n                    match _rx.recv_with_timer(sleep(Duration::from_millis(timeout))).await {\n                        Ok(item) => {\n                            local_recv_count += 1;\n                            {\n                                let mut guard = _recv_map.lock().unwrap();\n                                guard.remove(&item);\n                            }\n                        }\n                        Err(RecvTimeoutError::Timeout) => {\n                            local_recv_timeout_count += 1;\n                        }\n                        Err(RecvTimeoutError::Disconnected) => {\n                            return (local_recv_count, local_recv_timeout_count);\n                        }\n                    }\n                }\n            }));\n        }\n        drop(tx);\n        drop(rx);\n\n        let mut total_send_timeout_count = 0;\n        for th in th_tx {\n            total_send_timeout_count += async_join_result!(th);\n        }\n        let mut total_recv_count = 0;\n        let mut total_recv_timeout_count = 0;\n        for th in th_rx {\n            let (recv_count, recv_timeout_count) = async_join_result!(th);\n            total_recv_count += recv_count;\n            total_recv_timeout_count += recv_timeout_count;\n        }\n        {\n            let guard = recv_map.lock().unwrap();\n            assert!(guard.is_empty());\n        }\n        assert_eq!(ROUND * tx_count, total_recv_count);\n        println!(\"send timeout count: {}\", total_send_timeout_count);\n        println!(\"recv timeout count: {}\", total_recv_timeout_count);\n    });\n}\n\n#[logfn]\n#[rstest]\n#[case(spsc::bounded_async(1))]\n#[case(spsc::bounded_async(10))]\n#[case(spsc::bounded_async(100))]\n#[case(mpmc::bounded_async(1))]\n#[case(mpmc::bounded_async(10))]\n#[case(mpmc::bounded_async(100))]\n#[case(mpmc::bounded_async(300))]\nfn test_pressure_bounded_async_1_1<T: AsyncTxTrait<usize>, R: AsyncRxTrait<usize>>(\n    setup_log: (), #[case] channel: (T, R),\n) {\n    let (tx, rx) = channel;\n\n    runtime_block_on!(async move {\n        let mut counter: usize = 0;\n        let th = async_spawn!(async move {\n            for i in 0..ROUND {\n                if let Err(e) = tx.send(i).await {\n                    panic!(\"{:?}\", e);\n                }\n            }\n            debug!(\"tx{:?} exit\", tokio_task_id!());\n        });\n        'A: loop {\n            match rx.recv().await {\n                Ok(_i) => {\n                    assert_eq!(_i, counter);\n                    trace!(\"recv {}\", _i);\n                    counter += 1;\n                }\n                Err(_) => break 'A,\n            }\n        }\n        drop(rx);\n        let _ = async_join_result!(th);\n        assert_eq!(counter, ROUND);\n    });\n}\n\n#[logfn]\n#[rstest]\n#[case(mpsc::bounded_async(1), 5)]\n#[case(mpsc::bounded_async(1), 100)]\n#[case(mpsc::bounded_async(1), 300)]\n#[case(mpsc::bounded_async(10), 5)]\n#[case(mpsc::bounded_async(10), 100)]\n#[case(mpsc::bounded_async(10), 300)]\n#[case(mpsc::bounded_async(100), 10)]\n#[case(mpsc::bounded_async(100), 100)]\n#[case(mpsc::bounded_async(100), 300)]\n#[case(mpmc::bounded_async(1), 5)]\n#[case(mpmc::bounded_async(1), 100)]\n#[case(mpmc::bounded_async(1), 300)]\n#[case(mpmc::bounded_async(10), 5)]\n#[case(mpmc::bounded_async(10), 100)]\n#[case(mpmc::bounded_async(10), 300)]\n#[case(mpmc::bounded_async(100), 5)]\n#[case(mpmc::bounded_async(100), 100)]\n#[case(mpmc::bounded_async(100), 300)]\nfn test_pressure_bounded_async_multi_1<\n    F: Flavor<Item = usize> + 'static,\n    R: AsyncRxTrait<usize>,\n>(\n    setup_log: (), #[case] channel: (MAsyncTx<F>, R), #[case] tx_count: usize,\n) {\n    let (tx, rx) = channel;\n    #[cfg(miri)]\n    {\n        if tx_count > 10 {\n            println!(\"skip\");\n            return;\n        }\n    }\n    runtime_block_on!(async move {\n        let mut counter = 0;\n        let mut th_s = Vec::new();\n        for _tx_i in 0..tx_count {\n            let _tx = tx.clone();\n            th_s.push(async_spawn!(async move {\n                debug!(\"tx{:?} {} spawn\", tokio_task_id!(), _tx_i);\n                for i in 0..ROUND {\n                    match _tx.send(i).await {\n                        Err(e) => panic!(\"{:?}\", e),\n                        _ => {}\n                    }\n                }\n                debug!(\"tx{:?} {} exit\", tokio_task_id!(), _tx_i);\n            }));\n        }\n        drop(tx);\n        'A: loop {\n            match rx.recv().await {\n                Ok(_i) => {\n                    counter += 1;\n                    trace!(\"recv {}\", _i);\n                }\n                Err(_) => break 'A,\n            }\n        }\n        drop(rx);\n        for th in th_s {\n            let _ = async_join_result!(th);\n        }\n        assert_eq!(counter, ROUND * tx_count);\n    });\n}\n\n#[logfn]\n#[rstest]\n#[case(mpmc::bounded_async(1), 5, 5)]\n#[case(mpmc::bounded_async(1), 100, 10)]\n#[case(mpmc::bounded_async(1), 10, 100)]\n#[case(mpmc::bounded_async(1), 300, 300)]\n#[case(mpmc::bounded_async(10), 5, 5)]\n#[case(mpmc::bounded_async(10), 100, 10)]\n#[case(mpmc::bounded_async(10), 10, 100)]\n#[case(mpmc::bounded_async(10), 300, 300)]\n#[case(mpmc::bounded_async(100), 5, 5)]\n#[case(mpmc::bounded_async(100), 100, 10)]\n#[case(mpmc::bounded_async(100), 10, 100)]\n#[case(mpmc::bounded_async(100), 300, 300)]\nfn test_pressure_bounded_async_multi<F: Flavor<Item = usize> + 'static>(\n    setup_log: (), #[case] channel: (MAsyncTx<F>, MAsyncRx<F>), #[case] tx_count: usize,\n    #[case] rx_count: usize,\n) {\n    #[cfg(miri)]\n    {\n        if rx_count > 5 || tx_count > 5 {\n            println!(\"skip\");\n            return;\n        }\n    }\n    let (tx, rx) = channel;\n    runtime_block_on!(async move {\n        let mut th_tx = Vec::new();\n        let mut th_rx = Vec::new();\n        for _tx_i in 0..tx_count {\n            let _tx = tx.clone();\n            th_tx.push(async_spawn!(async move {\n                debug!(\"tx{:?} {} spawn\", tokio_task_id!(), _tx_i);\n                for i in 0..ROUND {\n                    match _tx.send(i).await {\n                        Err(e) => panic!(\"{:?}\", e),\n                        _ => {}\n                    }\n                }\n                debug!(\"tx{:?} {} exit\", tokio_task_id!(), _tx_i);\n            }));\n        }\n        for _rx_i in 0..rx_count {\n            let _rx = rx.clone();\n            th_rx.push(async_spawn!(async move {\n                debug!(\"rx{:?} {} spawn\", tokio_task_id!(), _rx_i);\n                let mut count = 0;\n                'A: loop {\n                    match _rx.recv().await {\n                        Ok(_i) => {\n                            count += 1;\n                            trace!(\"recv {} {}\", _rx_i, _i);\n                        }\n                        Err(_) => break 'A,\n                    }\n                }\n                debug!(\"rx{:?} {} exit\", tokio_task_id!(), _rx_i);\n                count\n            }));\n        }\n        drop(tx);\n        drop(rx);\n        for th in th_tx {\n            let _ = async_join_result!(th);\n        }\n        let mut recv_count = 0;\n        for th in th_rx {\n            recv_count += async_join_result!(th);\n        }\n        assert_eq!(recv_count, ROUND * tx_count);\n    });\n}\n\n#[logfn]\n#[rstest]\n#[case(mpmc::bounded_async(1))]\n#[case(mpmc::bounded_async(10))]\n#[case(mpmc::bounded_async(100))]\nfn test_pressure_bounded_mixed_async_blocking_conversion<F: Flavor<Item = usize> + 'static>(\n    setup_log: (), #[case] channel: (MAsyncTx<F>, MAsyncRx<F>),\n) {\n    let (tx, rx) = channel;\n    runtime_block_on!(async move {\n        let mut recv_counter = 0;\n        let mut th_tx = Vec::new();\n        let mut th_rx = Vec::new();\n        let mut co_tx = Vec::new();\n        let mut co_rx = Vec::new();\n        let _tx: MTx<F> = tx.clone().into();\n        th_tx.push(thread::spawn(move || {\n            for i in 0..ROUND {\n                match _tx.send(i) {\n                    Err(e) => panic!(\"{:?}\", e),\n                    _ => {}\n                }\n            }\n            debug!(\"tx blocking exit\");\n        }));\n        co_tx.push(async_spawn!(async move {\n            for i in 0..ROUND {\n                match tx.send(i).await {\n                    Err(e) => panic!(\"{:?}\", e),\n                    _ => {}\n                }\n            }\n            debug!(\"tx{:?} async exit\", tokio_task_id!());\n        }));\n        let _rx: MRx<F> = rx.clone().into();\n        th_rx.push(thread::spawn(move || {\n            let mut count: usize = 0;\n            'A: loop {\n                match _rx.recv() {\n                    Ok(_i) => {\n                        count += 1;\n                        trace!(\"recv blocking {}\", _i);\n                    }\n                    Err(_) => break 'A,\n                }\n            }\n            debug!(\"rx blocking exit\");\n            count\n        }));\n\n        co_rx.push(async_spawn!(async move {\n            let mut count: usize = 0;\n            'A: loop {\n                match rx.recv().await {\n                    Ok(_i) => {\n                        count += 1;\n                        trace!(\"recv async {}\", _i);\n                    }\n                    Err(_) => break 'A,\n                }\n            }\n            debug!(\"rx{:?} async exit\", tokio_task_id!());\n            count\n        }));\n        for th in co_tx {\n            let _ = async_join_result!(th);\n        }\n        for th in co_rx {\n            recv_counter += async_join_result!(th);\n        }\n        for th in th_tx {\n            let _ = th.join().unwrap();\n        }\n        for th in th_rx {\n            recv_counter += th.join().unwrap();\n        }\n        assert_eq!(recv_counter, ROUND * 2);\n    });\n}\n\n#[test]\nfn test_conversion() {\n    let (mtx, mrx) = mpmc::bounded_async::<usize>(1);\n    let _tx: AsyncTx<_> = mtx.into();\n    let _rx: AsyncRx<_> = mrx.into();\n    let (_mtx, rx) = mpsc::bounded_async::<usize>(1);\n    let _stream: AsyncStream<_> = rx.into(); // AsyncRx -> AsyncStream\n    let (_mtx, mrx) = mpmc::bounded_async::<usize>(1);\n    let _stream: AsyncStream<_> = mrx.into(); // AsyncRx -> AsyncStream\n}\n\n#[allow(dead_code)]\nstruct SpuriousTx<F: Flavor> {\n    sink: AsyncSink<F>,\n    normal: bool,\n    step: usize,\n}\n\nimpl<F: Flavor<Item = usize> + Unpin> Future for SpuriousTx<F> {\n    type Output = Result<usize, usize>;\n\n    fn poll(self: Pin<&mut Self>, ctx: &mut std::task::Context) -> Poll<Self::Output> {\n        let mut _self = self.get_mut();\n        if !_self.normal && _self.step > 0 {\n            return Poll::Ready(Err(_self.step));\n        }\n        match _self.sink.poll_send(ctx, _self.step) {\n            Ok(_) => {\n                let res = _self.step;\n                _self.step += 1;\n                return Poll::Ready(Ok(res));\n            }\n            Err(TrySendError::Disconnected(_)) => {\n                return Poll::Ready(Err(_self.step));\n            }\n            Err(TrySendError::Full(_)) => {\n                _self.step += 1;\n                return Poll::Pending;\n            }\n        }\n    }\n}\n\n#[allow(dead_code)]\nstruct SpuriousRx<F: Flavor> {\n    stream: AsyncStream<F>,\n    normal: bool,\n    step: usize,\n}\n\nimpl<F: Flavor<Item = usize> + Unpin> Future for SpuriousRx<F> {\n    type Output = Result<usize, usize>;\n\n    fn poll(self: Pin<&mut Self>, ctx: &mut std::task::Context) -> Poll<Self::Output> {\n        let mut _self = self.get_mut();\n        if !_self.normal && _self.step > 0 {\n            return Poll::Ready(Err(_self.step));\n        }\n        match _self.stream.poll_item(ctx) {\n            Poll::Ready(Some(item)) => {\n                _self.step += 1;\n                return Poll::Ready(Ok(item));\n            }\n            Poll::Ready(None) => {\n                return Poll::Ready(Err(_self.step));\n            }\n            Poll::Pending => {\n                _self.step += 1;\n                return Poll::Pending;\n            }\n        }\n    }\n}\n\n#[cfg(feature = \"time\")]\n#[logfn]\n#[rstest]\nfn test_spurious_sink(setup_log: ()) {\n    #[cfg(feature = \"tokio\")]\n    {\n        let (tx, rx) = mpmc::bounded_async(1);\n\n        async fn spawn_tx<F: Flavor<Item = usize> + Unpin + 'static>(\n            tx: MAsyncTx<F>, normal: bool,\n        ) {\n            let sink = tx.into_sink();\n            let _tx = SpuriousTx { sink, normal, step: 0 };\n            if normal {\n                assert_eq!(_tx.await.expect(\"send ok\"), 1);\n            } else {\n                match tokio::time::timeout(Duration::from_secs(5), _tx).await {\n                    Ok(Err(step)) => {\n                        assert_eq!(step, 1);\n                    }\n                    Ok(Ok(step)) => {\n                        panic!(\"unexpected ok in step={}\", step);\n                    }\n                    Err(_) => {\n                        panic!(\"tokio timeout\");\n                    }\n                }\n            }\n        }\n        runtime_block_on!(async move {\n            tx.send(0).await.expect(\"send\");\n            let _tx = tx.clone();\n            let mut th_s = Vec::new();\n            println!(\"spawn spurious\");\n            // Make sure its the first\n            th_s.push(tokio::spawn(async move { spawn_tx(_tx, false).await }));\n            sleep(Duration::from_secs(1)).await;\n            let _tx = tx.clone();\n            println!(\"spawn normal\");\n            th_s.push(tokio::spawn(async move { spawn_tx(_tx, true).await }));\n            sleep(Duration::from_secs(1)).await;\n            println!(\"recv 1 to wake the 2 senders in Init state\");\n            assert_eq!(rx.recv().await.expect(\"recv\"), 0);\n            for th in th_s {\n                let _ = async_join_result!(th);\n            }\n        });\n    }\n}\n\n#[cfg(feature = \"time\")]\n#[logfn]\n#[rstest]\nfn test_spurious_stream(setup_log: ()) {\n    #[cfg(feature = \"tokio\")]\n    {\n        let (tx, rx) = mpmc::bounded_async(1);\n\n        async fn spawn_rx<F: Flavor<Item = usize> + Unpin + 'static>(\n            rx: MAsyncRx<F>, normal: bool,\n        ) {\n            let stream = rx.into_stream();\n            let _rx = SpuriousRx { stream, normal, step: 0 };\n            if normal {\n                assert_eq!(_rx.await.expect(\"recv ok\"), 1);\n            } else {\n                if let Ok(Err(step)) = tokio::time::timeout(Duration::from_secs(10), _rx).await {\n                    assert_eq!(step, 1);\n                } else {\n                    unreachable!();\n                }\n            }\n        }\n        runtime_block_on!(async move {\n            let _rx = rx.clone();\n            let mut th_s = Vec::new();\n            println!(\"spawn spurious\");\n            // Make sure its the first\n            th_s.push(tokio::spawn(async move { spawn_rx(_rx, false).await }));\n            sleep(Duration::from_millis(500)).await;\n            let _rx = rx.clone();\n            println!(\"spawn normal\");\n            th_s.push(tokio::spawn(async move { spawn_rx(_rx, true).await }));\n            sleep(Duration::from_secs(1)).await;\n            println!(\"send\");\n            tx.send(1).await.expect(\"send\");\n            sleep(Duration::from_secs(2)).await;\n            for th in th_s {\n                let _ = async_join_result!(th);\n            }\n        });\n    }\n}\n\n#[logfn]\n#[rstest]\n#[case(spsc::bounded_async(1))]\n#[case(spsc::bounded_async(2))]\n#[case(mpsc::bounded_async(1))]\n#[case(mpsc::bounded_async(2))]\n#[case(mpmc::bounded_async(1))]\n#[case(mpmc::bounded_async(2))]\nfn test_basic_into_stream_1_1<T: AsyncTxTrait<usize>, R: AsyncRxTrait<usize>>(\n    setup_log: (), #[case] channel: (T, R),\n) {\n    runtime_block_on!(async move {\n        let total_message = 100;\n        let (tx, rx) = channel;\n        let th = async_spawn!(async move {\n            println!(\"sender thread send {} message start\", total_message);\n            for i in 0usize..total_message {\n                let _ = tx.send(i).await;\n                // println!(\"send {}\", i);\n            }\n            println!(\"sender thread send {} message end\", total_message);\n        });\n        let mut s: Pin<Box<dyn Stream<Item = usize>>> = rx.to_stream();\n\n        for _i in 0..total_message {\n            assert_eq!(s.next().await, Some(_i));\n        }\n        assert_eq!(s.next().await, None);\n        //assert!(s.is_terminated());\n        async_join_result!(th);\n    });\n}\n\n#[logfn]\n#[rstest]\n#[case(mpmc::bounded_async(1), 2)]\n#[case(mpmc::bounded_async(2), 4)]\n#[case(mpmc::bounded_async(2), 10)]\n#[case(mpmc::bounded_async(10), 3)]\n#[case(mpmc::bounded_async(10), 30)]\n#[case(mpmc::bounded_async(100), 2)]\n#[case(mpmc::bounded_async(100), 4)]\n#[case(mpmc::bounded_async(100), 50)]\nfn test_pressure_stream_multi<F: Flavor<Item = usize> + 'static>(\n    setup_log: (), #[case] channel: (MAsyncTx<F>, MAsyncRx<F>), #[case] rx_count: usize,\n) {\n    #[cfg(miri)]\n    {\n        if rx_count > 5 {\n            println!(\"skip\");\n            return;\n        }\n    }\n    runtime_block_on!(async move {\n        let (tx, rx) = channel;\n        let mut th_s = Vec::new();\n        let mut recv_counter = 0;\n        for rx_i in 0..rx_count {\n            let _rx = rx.clone();\n            th_s.push(async_spawn!(async move {\n                let mut counter = 0;\n                let mut stream = _rx.into_stream();\n                while let Some(_item) = stream.next().await {\n                    counter += 1;\n                }\n                debug!(\"rx{:?} {} exit\", tokio_task_id!(), rx_i);\n                counter\n            }));\n        }\n        drop(rx);\n        for i in 0..ROUND {\n            tx.send(i).await.expect(\"send\");\n        }\n        drop(tx);\n        for th in th_s {\n            recv_counter += async_join_result!(th);\n        }\n        assert_eq!(recv_counter, ROUND);\n    });\n}\n\n#[cfg(feature = \"time\")]\n#[logfn]\n#[rstest]\n#[case(mpmc::bounded_async(1), 2)]\n#[case(mpmc::bounded_async(2), 4)]\n#[case(mpmc::bounded_async(2), 10)]\n#[case(mpmc::bounded_async(10), 3)]\n#[case(mpmc::bounded_async(10), 30)]\n#[case(mpmc::bounded_async(100), 2)]\n#[case(mpmc::bounded_async(100), 4)]\n#[case(mpmc::bounded_async(100), 50)]\nfn test_pressure_stream_multi_idle<F: Flavor<Item = usize> + 'static>(\n    setup_log: (), #[case] channel: (MAsyncTx<F>, MAsyncRx<F>), #[case] rx_count: usize,\n) {\n    #[cfg(miri)]\n    {\n        if rx_count > 5 {\n            println!(\"skip\");\n            return;\n        }\n    }\n    runtime_block_on!(async move {\n        let total_message = ROUND / rx_count;\n        let (tx, rx) = channel;\n        let mut th_s = Vec::new();\n        for rx_i in 0..rx_count {\n            let _rx = rx.clone();\n            th_s.push(async_spawn!(async move {\n                debug!(\"rx{:?} {} spawn\", tokio_task_id!(), rx_i);\n                let mut count = 0;\n                let mut stream = _rx.into_stream();\n                while let Some(_item) = stream.next().await {\n                    count += 1;\n                }\n                debug!(\"rx{:?} {} exit\", tokio_task_id!(), rx_i);\n                count\n            }));\n        }\n        drop(rx);\n        for i in 0..total_message {\n            tx.send(i).await.expect(\"send\");\n            sleep(Duration::from_millis(3)).await;\n        }\n        drop(tx);\n\n        let mut recv_counter = 0;\n        for th in th_s {\n            recv_counter += async_join_result!(th);\n        }\n        assert_eq!(recv_counter, total_message);\n    });\n}\n\n// This test make sure we have correctly use of maybeuninit\n#[logfn]\n#[rstest]\n#[case(spsc::bounded_async(1))]\n#[case(spsc::bounded_async(10))]\n#[case(mpsc::bounded_async(1))]\n#[case(mpsc::bounded_async(10))]\n#[case(mpmc::bounded_async(1))]\n#[case(mpmc::bounded_async(10))]\nfn test_async_drop_small_msg<T: AsyncTxTrait<SmallMsg>, R: AsyncRxTrait<SmallMsg>>(\n    setup_log: (), #[case] channel: (T, R),\n) {\n    println!(\"needs_drop {}\", std::mem::needs_drop::<SmallMsg>());\n    _test_async_drop_msg(channel);\n}\n\n// This test make sure we have correctly use of maybeuninit\n#[logfn]\n#[rstest]\n#[case(spsc::bounded_async(1))]\n#[case(spsc::bounded_async(10))]\n#[case(mpsc::bounded_async(1))]\n#[case(mpsc::bounded_async(10))]\n#[case(mpmc::bounded_async(1))]\n#[case(mpmc::bounded_async(10))]\nfn test_async_drop_large_msg<T: AsyncTxTrait<LargeMsg>, R: AsyncRxTrait<LargeMsg>>(\n    setup_log: (), #[case] channel: (T, R),\n) {\n    println!(\"needs_drop {}\", std::mem::needs_drop::<LargeMsg>());\n    _test_async_drop_msg(channel);\n}\n\nfn _test_async_drop_msg<M: TestDropMsg, T: AsyncTxTrait<M>, R: AsyncRxTrait<M>>(channel: (T, R)) {\n    let (tx, rx) = channel;\n    reset_drop_counter();\n    runtime_block_on!(async move {\n        let cap = tx.capacity().unwrap();\n        let mut ids = cap;\n        for i in 0..ids {\n            let msg = M::new(i);\n            assert!(tx.try_send(msg).is_ok());\n        }\n        assert_eq!(get_drop_counter(), 0);\n        let msg = M::new(ids);\n        if let Err(TrySendError::Full(_msg)) = tx.try_send(msg) {\n            assert_eq!(_msg.get_value(), ids);\n            assert_eq!(get_drop_counter(), 0);\n            drop(_msg);\n            assert_eq!(get_drop_counter(), 1);\n        } else {\n            unreachable!();\n        }\n        let th = async_spawn!(async move {\n            let _msg = rx.recv().await.expect(\"recv\");\n            assert_eq!(_msg.get_value(), 0);\n            drop(_msg);\n            rx\n        });\n        let msg = M::new(ids);\n        tx.send(msg).await.expect(\"send\");\n        ids += 1;\n        let rx = async_join_result!(th);\n        drop(rx);\n        assert_eq!(get_drop_counter(), 2);\n        let msg = M::new(ids);\n        if let Err(TrySendError::Disconnected(_msg)) = tx.try_send(msg) {\n            assert_eq!(_msg.get_value(), ids);\n        } else {\n            unreachable!();\n        }\n        ids += 1;\n        let msg = M::new(ids);\n        if let Err(SendError(_msg)) = tx.send(msg).await {\n            assert_eq!(_msg.get_value(), ids);\n        } else {\n            unreachable!();\n        }\n        assert_eq!(get_drop_counter(), 4);\n        ids += 1;\n        drop(tx);\n        // every thing dropped inside the channel\n        assert_eq!(get_drop_counter(), ids + 1); // ids begins at 0\n        assert_eq!(get_drop_counter(), 4 + cap);\n    });\n}\n"
  },
  {
    "path": "test-suite/src/test_async_blocking.rs",
    "content": "use crate::*;\nuse captains_log::{logfn, *};\nuse crossfire::flavor::Flavor;\nuse crossfire::tokio_task_id;\nuse crossfire::*;\nuse rstest::*;\nuse std::thread;\nuse std::time::Duration;\n\n#[fixture]\nfn setup_log() {\n    _setup_log();\n}\n\n#[logfn]\n#[rstest]\n#[case(spsc::bounded_async_blocking(1))]\n#[case(mpsc::bounded_async_blocking(1))]\n#[case(mpmc::bounded_async_blocking(1))]\nfn test_basic_bounded_empty_full_drop_rx<T: AsyncTxTrait<usize>, R: BlockingRxTrait<usize>>(\n    setup_log: (), #[case] channel: (T, R),\n) {\n    let (tx, rx) = channel;\n    assert!(tx.is_empty());\n    assert!(rx.is_empty());\n    tx.try_send(1).expect(\"Ok\");\n    assert!(tx.is_full());\n    assert!(rx.is_full());\n    assert!(!tx.is_empty());\n    assert_eq!(tx.is_disconnected(), false);\n    assert_eq!(rx.is_disconnected(), false);\n    drop(rx);\n    assert_eq!(tx.is_disconnected(), true);\n    assert_eq!(tx.get_rx_count(), 0);\n    assert_eq!(tx.get_tx_count(), 1);\n}\n\n#[logfn]\n#[rstest]\n#[case(spsc::bounded_async_blocking(1))]\n#[case(mpsc::bounded_async_blocking(1))]\n#[case(mpmc::bounded_async_blocking(1))]\nfn test_basic_bounded_empty_full_drop_tx<T: AsyncTxTrait<usize>, R: BlockingRxTrait<usize>>(\n    setup_log: (), #[case] channel: (T, R),\n) {\n    let (tx, rx) = channel;\n    assert!(tx.is_empty());\n    assert!(rx.is_empty());\n    tx.try_send(1).expect(\"Ok\");\n    assert!(tx.is_full());\n    assert!(rx.is_full());\n    assert!(!tx.is_empty());\n    assert_eq!(tx.is_disconnected(), false);\n    assert_eq!(rx.is_disconnected(), false);\n    drop(tx);\n    assert_eq!(rx.is_disconnected(), true);\n    assert_eq!(rx.get_tx_count(), 0);\n    assert_eq!(rx.get_rx_count(), 1);\n}\n\n#[logfn]\n#[rstest]\nfn test_basic_compile_bounded_empty_full() {\n    let (tx, rx) = mpmc::bounded_async_blocking(1);\n    assert!(tx.is_empty());\n    assert!(rx.is_empty());\n    tx.try_send(1).expect(\"ok\");\n    assert!(tx.is_full());\n    assert!(!tx.is_empty());\n    assert!(rx.is_full());\n    assert_eq!(tx.get_tx_count(), 1);\n    assert_eq!(rx.get_tx_count(), 1);\n    assert_eq!(tx.is_disconnected(), false);\n    assert_eq!(rx.is_disconnected(), false);\n    drop(rx);\n    assert_eq!(tx.is_disconnected(), true);\n}\n\n#[cfg(feature = \"time\")]\n#[logfn]\n#[rstest]\n#[case(spsc::bounded_async_blocking(100))]\n#[case(mpsc::bounded_async_blocking(100))]\n#[case(mpmc::bounded_async_blocking(100))]\nfn test_basic_1_tx_async_1_rx_blocking<T: AsyncTxTrait<usize>, R: BlockingRxTrait<usize>>(\n    setup_log: (), #[case] channel: (T, R),\n) {\n    let (tx, rx) = channel;\n    let rx_res = rx.try_recv();\n    assert!(rx_res.is_err());\n    assert!(rx_res.unwrap_err().is_empty());\n    let batch_1: usize = 100;\n    let batch_2: usize = 200;\n    let th = thread::spawn(move || {\n        for count in 0..(batch_1 + batch_2) {\n            match rx.recv() {\n                Ok(i) => {\n                    trace!(\"recv {}\", i);\n                    if count < batch_1 {\n                        // First batch: values 0..batch_1\n                        assert_eq!(i, count);\n                    } else {\n                        // Second batch: values 10+batch_1..10+batch_1+batch_2\n                        assert_eq!(i, 10 + count);\n                    }\n                }\n                Err(e) => {\n                    panic!(\"error {}\", e);\n                }\n            }\n        }\n        let res = rx.recv();\n        assert!(res.is_err());\n    });\n\n    runtime_block_on!(async move {\n        for i in 0..batch_1 {\n            let tx_res = tx.send(i).await;\n            assert!(tx_res.is_ok());\n        }\n        for i in batch_1..(batch_1 + batch_2) {\n            assert!(tx.send(10 + i).await.is_ok());\n            sleep(Duration::from_millis(2)).await;\n        }\n    });\n    let _ = th.join().unwrap();\n}\n\n#[cfg(feature = \"time\")]\n#[logfn]\n#[rstest]\n#[case(mpsc::bounded_async_blocking(10), 5)]\n#[case(mpsc::bounded_async_blocking(10), 8)]\n#[case(mpsc::bounded_async_blocking(10), 100)]\n#[case(mpsc::bounded_async_blocking(10), 1000)]\n#[case(mpmc::bounded_async_blocking(10), 5)]\n#[case(mpmc::bounded_async_blocking(10), 8)]\n#[case(mpmc::bounded_async_blocking(10), 100)]\nfn test_basic_multi_tx_async_1_rx_blocking<\n    F: Flavor<Item = usize> + 'static,\n    R: BlockingRxTrait<usize>,\n>(\n    setup_log: (), #[case] channel: (MAsyncTx<F>, R), #[case] tx_count: usize,\n) {\n    let (tx, rx) = channel;\n    let batch_1: usize;\n    let batch_2: usize;\n\n    #[cfg(miri)]\n    {\n        if tx_count > 5 {\n            println!(\"skip\");\n            return;\n        }\n        batch_1 = 10;\n        batch_2 = 20;\n    }\n    #[cfg(not(miri))]\n    {\n        batch_1 = 100;\n        batch_2 = 200;\n    }\n    let rx_res = rx.try_recv();\n    assert!(rx_res.is_err());\n    assert!(rx_res.unwrap_err().is_empty());\n    let th = thread::spawn(move || {\n        for _ in 0..((batch_1 + batch_2) * tx_count) {\n            match rx.recv() {\n                Ok(i) => {\n                    trace!(\"recv {}\", i);\n                }\n                Err(e) => {\n                    panic!(\"error {}\", e);\n                }\n            }\n        }\n        let res = rx.recv();\n        assert!(res.is_err());\n        // Wait for spawn exit\n    });\n    runtime_block_on!(async move {\n        let mut th_s = Vec::new();\n        for _tx_i in 0..tx_count {\n            let _tx = tx.clone();\n            th_s.push(async_spawn!(async move {\n                for i in 0..batch_1 {\n                    let tx_res = _tx.send(i).await;\n                    assert!(tx_res.is_ok());\n                }\n                for i in batch_1..(batch_1 + batch_2) {\n                    assert!(_tx.send(10 + i).await.is_ok());\n                    sleep(Duration::from_millis(2)).await;\n                }\n            }));\n        }\n        drop(tx);\n\n        for th in th_s {\n            let _ = async_join_result!(th);\n        }\n    });\n    let _ = th.join().unwrap();\n}\n\n#[logfn]\n#[rstest]\n#[case(spsc::bounded_async_blocking(1))]\n#[case(spsc::bounded_async_blocking(10))]\n#[case(spsc::bounded_async_blocking(100))]\n#[case(spsc::bounded_async_blocking(1000))]\n#[case(mpsc::bounded_async_blocking(1))]\n#[case(mpsc::bounded_async_blocking(10))]\n#[case(mpsc::bounded_async_blocking(100))]\n#[case(mpsc::bounded_async_blocking(1000))]\n#[case(mpmc::bounded_async_blocking(1))]\n#[case(mpmc::bounded_async_blocking(10))]\n#[case(mpmc::bounded_async_blocking(100))]\n#[case(mpmc::bounded_async_blocking(1000))]\nfn test_pressure_1_tx_async_1_rx_blocking<T: AsyncTxTrait<usize>, R: BlockingRxTrait<usize>>(\n    setup_log: (), #[case] channel: (T, R),\n) {\n    let (tx, rx) = channel;\n\n    let round: usize;\n    #[cfg(miri)]\n    {\n        round = ROUND;\n    }\n    #[cfg(not(miri))]\n    {\n        round = ROUND * 100;\n    }\n\n    let th = thread::spawn(move || {\n        let mut count = 0;\n        'A: loop {\n            match rx.recv() {\n                Ok(i) => {\n                    assert_eq!(i, count);\n                    count += 1;\n                    trace!(\"recv {}\", i);\n                }\n                Err(_) => break 'A,\n            }\n        }\n        debug!(\"rx exit\");\n        count\n    });\n    runtime_block_on!(async move {\n        for i in 0..round {\n            match tx.send(i).await {\n                Err(e) => panic!(\"{}\", e),\n                _ => {}\n            }\n        }\n        debug!(\"tx{:?} exit\", tokio_task_id!());\n    });\n    let rx_count = th.join().unwrap();\n    assert_eq!(rx_count, round);\n}\n\n#[logfn]\n#[rstest]\n#[case(mpsc::bounded_async_blocking(10), 5)]\n#[case(mpsc::bounded_async_blocking(10), 10)]\n#[case(mpsc::bounded_async_blocking(10), 100)]\n#[case(mpsc::bounded_async_blocking(100), 50)]\n#[case(mpmc::bounded_async_blocking(10), 5)]\n#[case(mpmc::bounded_async_blocking(10), 100)]\n#[case(mpmc::bounded_async_blocking(10), 10)]\n#[case(mpmc::bounded_async_blocking(10), 1000)]\n#[case(mpmc::bounded_async_blocking(100), 100)]\nfn test_pressure_multi_tx_async_1_rx_blocking<\n    F: Flavor<Item = usize> + 'static,\n    R: BlockingRxTrait<usize>,\n>(\n    setup_log: (), #[case] channel: (MAsyncTx<F>, R), #[case] tx_count: usize,\n) {\n    let (tx, rx) = channel;\n    #[cfg(miri)]\n    {\n        if tx_count > 5 {\n            println!(\"skip\");\n            return;\n        }\n    }\n\n    let round: usize = ROUND;\n    let th = thread::spawn(move || {\n        let mut count = 0;\n        'A: loop {\n            match rx.recv() {\n                Ok(_i) => {\n                    count += 1;\n                    trace!(\"recv {}\", _i);\n                }\n                Err(_) => break 'A,\n            }\n        }\n        debug!(\"rx exit\");\n        count\n    });\n    runtime_block_on!(async move {\n        let mut th_co = Vec::new();\n        for _tx_i in 0..tx_count {\n            let _tx = tx.clone();\n            th_co.push(async_spawn!(async move {\n                debug!(\"tx{:?} {} spawn\", tokio_task_id!(), _tx_i);\n                for i in 0..round {\n                    match _tx.send(i).await {\n                        Err(e) => panic!(\"{}\", e),\n                        _ => {}\n                    }\n                }\n                debug!(\"tx{:?} {} exit\", tokio_task_id!(), _tx_i);\n            }));\n        }\n        drop(tx);\n        for th in th_co {\n            let _ = async_join_result!(th);\n        }\n    });\n    let rx_count = th.join().unwrap();\n    assert_eq!(rx_count, round * tx_count);\n}\n\n#[logfn]\n#[rstest]\n#[case(mpmc::bounded_async_blocking(10), 5, 5)]\n#[case(mpmc::bounded_async_blocking(10), 100, 50)]\n#[case(mpmc::bounded_async_blocking(10), 10, 100)]\n#[case(mpmc::bounded_async_blocking(100), 300, 100)]\nfn test_pressure_multi_tx_async_multi_rx_blocking<F: Flavor<Item = usize> + 'static>(\n    setup_log: (), #[case] channel: (MAsyncTx<F>, MRx<F>), #[case] tx_count: usize,\n    #[case] rx_count: usize,\n) {\n    let (tx, rx) = channel;\n    #[cfg(miri)]\n    {\n        if tx_count > 5 || rx_count > 5 {\n            println!(\"skip\");\n            return;\n        }\n    }\n\n    let round: usize = ROUND;\n    let mut rx_th_s = Vec::new();\n    for _rx_i in 0..rx_count {\n        let _rx = rx.clone();\n        rx_th_s.push(thread::spawn(move || {\n            debug!(\"rx {} spawn\", _rx_i);\n            let mut count = 0;\n            'A: loop {\n                match _rx.recv() {\n                    Ok(i) => {\n                        count += 1;\n                        trace!(\"recv {} {}\", _rx_i, i);\n                    }\n                    Err(_) => break 'A,\n                }\n            }\n            debug!(\"rx {} exit\", _rx_i);\n            count\n        }));\n    }\n    drop(rx);\n    runtime_block_on!(async move {\n        let mut th_co = Vec::new();\n        for _tx_i in 0..tx_count {\n            let _tx = tx.clone();\n            th_co.push(async_spawn!(async move {\n                debug!(\"tx{:?} {} spawn\", tokio_task_id!(), _tx_i);\n                for i in 0..round {\n                    match _tx.send(i).await {\n                        Err(e) => panic!(\"{}\", e),\n                        _ => {}\n                    }\n                }\n                debug!(\"tx{:?} {} exit\", tokio_task_id!(), _tx_i);\n            }));\n        }\n        drop(tx);\n        for th in th_co {\n            let _ = async_join_result!(th);\n        }\n    });\n    let mut total_count = 0;\n    for th in rx_th_s {\n        total_count += th.join().unwrap();\n    }\n    assert_eq!(total_count, round * tx_count);\n}\n"
  },
  {
    "path": "test-suite/src/test_blocking_async.rs",
    "content": "use crate::*;\nuse captains_log::{logfn, *};\nuse crossfire::flavor::Flavor;\nuse crossfire::tokio_task_id;\nuse crossfire::*;\nuse rstest::*;\nuse std::time::*;\n\n#[fixture]\nfn setup_log() {\n    _setup_log();\n}\n\n#[logfn]\n#[rstest]\n#[case(spsc::bounded_blocking_async(1))]\n#[case(mpsc::bounded_blocking_async(1))]\n#[case(mpmc::bounded_blocking_async(1))]\nfn test_basic_bounded_empty_full_drop_rx<T: BlockingTxTrait<usize>, R: AsyncRxTrait<usize>>(\n    setup_log: (), #[case] channel: (T, R),\n) {\n    let (tx, rx) = channel;\n    assert!(tx.is_empty());\n    assert!(rx.is_empty());\n    tx.try_send(1).expect(\"Ok\");\n    assert!(tx.is_full());\n    assert!(rx.is_full());\n    assert!(!tx.is_empty());\n    assert_eq!(tx.is_disconnected(), false);\n    assert_eq!(rx.is_disconnected(), false);\n    drop(rx);\n    assert_eq!(tx.is_disconnected(), true);\n    assert_eq!(tx.get_rx_count(), 0);\n    assert_eq!(tx.get_tx_count(), 1);\n}\n\n#[logfn]\n#[rstest]\n#[case(spsc::bounded_blocking_async(1))]\n#[case(mpsc::bounded_blocking_async(1))]\n#[case(mpmc::bounded_blocking_async(1))]\nfn test_basic_bounded_empty_full_drop_tx<T: BlockingTxTrait<usize>, R: AsyncRxTrait<usize>>(\n    setup_log: (), #[case] channel: (T, R),\n) {\n    let (tx, rx) = channel;\n    assert!(tx.is_empty());\n    assert!(rx.is_empty());\n    tx.try_send(1).expect(\"Ok\");\n    assert!(tx.is_full());\n    assert!(rx.is_full());\n    assert!(!tx.is_empty());\n    assert_eq!(tx.is_disconnected(), false);\n    assert_eq!(rx.is_disconnected(), false);\n    drop(tx);\n    assert_eq!(rx.is_disconnected(), true);\n    assert_eq!(rx.get_tx_count(), 0);\n    assert_eq!(rx.get_rx_count(), 1);\n}\n\n#[logfn]\n#[rstest]\n#[case(spsc::unbounded_async())]\n#[case(mpsc::unbounded_async())]\n#[case(mpmc::unbounded_async())]\nfn test_basic_unbounded_empty_drop_tx<T: BlockingTxTrait<usize>, R: AsyncRxTrait<usize>>(\n    setup_log: (), #[case] channel: (T, R),\n) {\n    let (tx, rx) = channel;\n    assert!(tx.is_empty());\n    assert!(rx.is_empty());\n    tx.try_send(1).expect(\"Ok\");\n    assert!(!tx.is_empty());\n    assert_eq!(tx.is_disconnected(), false);\n    assert_eq!(rx.is_disconnected(), false);\n    drop(tx);\n    assert_eq!(rx.is_disconnected(), true);\n    assert_eq!(rx.get_tx_count(), 0);\n    assert_eq!(rx.get_rx_count(), 1);\n}\n\n#[logfn]\n#[rstest]\nfn test_basic_compile_bounded_empty_full() {\n    let (tx, rx) = mpmc::bounded_blocking_async(1);\n    assert!(tx.is_empty());\n    assert!(rx.is_empty());\n    tx.try_send(1).expect(\"ok\");\n    assert!(tx.is_full());\n    assert!(!tx.is_empty());\n    assert!(rx.is_full());\n    assert_eq!(tx.get_tx_count(), 1);\n    assert_eq!(rx.get_tx_count(), 1);\n    assert_eq!(tx.is_disconnected(), false);\n    assert_eq!(rx.is_disconnected(), false);\n    drop(rx);\n    assert_eq!(tx.is_disconnected(), true);\n}\n\n#[logfn]\n#[rstest]\n#[case(spsc::bounded_blocking_async(10))]\n#[case(mpsc::bounded_blocking_async(10))]\n#[case(mpmc::bounded_blocking_async(10))]\nfn test_basic_1_tx_blocking_1_rx_async<T: BlockingTxTrait<usize>, R: AsyncRxTrait<usize>>(\n    setup_log: (), #[case] channel: (T, R),\n) {\n    let (tx, rx) = channel;\n    let rx_res = rx.try_recv();\n    assert!(rx_res.is_err());\n    assert!(rx_res.unwrap_err().is_empty());\n    for i in 0usize..10 {\n        let tx_res = tx.send(i);\n        assert!(tx_res.is_ok());\n    }\n    let tx_res = tx.try_send(11);\n    assert!(tx_res.is_err());\n    assert!(tx_res.unwrap_err().is_full());\n\n    let th = spawn_named_thread(\"sender_1\", move || {\n        assert!(tx.send(10).is_ok());\n        std::thread::sleep(Duration::from_secs(1));\n        assert!(tx.send(11).is_ok());\n    });\n    runtime_block_on!(async move {\n        for i in 0usize..12 {\n            match rx.recv().await {\n                Ok(j) => {\n                    trace!(\"recv {}\", i);\n                    assert_eq!(i, j);\n                }\n                Err(e) => {\n                    panic!(\"error {}\", e);\n                }\n            }\n        }\n        let res = rx.recv().await;\n        assert!(res.is_err());\n        trace!(\"rx close\");\n    });\n    let _ = th.join().unwrap();\n}\n\n#[logfn]\n#[rstest]\n#[case(spsc::bounded_blocking_async(1))]\n#[case(mpsc::bounded_blocking_async(1))]\n#[case(mpmc::bounded_blocking_async(1))]\n#[case(spsc::bounded_blocking_async(100))]\n#[case(mpsc::bounded_blocking_async(100))]\n#[case(mpmc::bounded_blocking_async(100))]\n#[case(spsc::unbounded_async())]\n#[case(mpsc::unbounded_async())]\n#[case(mpmc::unbounded_async())]\nfn test_pressure_1_tx_blocking_1_rx_async<T: BlockingTxTrait<usize>, R: AsyncRxTrait<usize>>(\n    setup_log: (), #[case] channel: (T, R),\n) {\n    let (tx, rx) = channel;\n    let round: usize;\n    #[cfg(miri)]\n    {\n        round = ROUND;\n    }\n    #[cfg(not(miri))]\n    {\n        round = ROUND * 100;\n    }\n    let th = spawn_named_thread(\"sender_2\", move || {\n        for i in 0..round {\n            tx.send(i).expect(\"send ok\");\n        }\n    });\n    runtime_block_on!(async move {\n        for i in 0..round {\n            match rx.recv().await {\n                Ok(msg) => {\n                    trace!(\"recv {}\", msg);\n                    assert_eq!(msg, i);\n                }\n                Err(_e) => {\n                    panic!(\"channel closed\");\n                }\n            }\n        }\n        assert!(rx.recv().await.is_err());\n    });\n    let _ = th.join().unwrap();\n}\n\n#[logfn]\n#[rstest]\n#[case(mpsc::bounded_blocking_async(1), 5)]\n#[case(mpsc::bounded_blocking_async(1), 100)]\n#[case(mpsc::bounded_blocking_async(1), 200)]\n#[case(mpsc::bounded_blocking_async(100), 10)]\n#[case(mpsc::bounded_blocking_async(100), 100)]\n#[case(mpsc::bounded_blocking_async(100), 200)]\n#[case(mpmc::bounded_blocking_async(1), 5)]\n#[case(mpmc::bounded_blocking_async(1), 100)]\n#[case(mpmc::bounded_blocking_async(1), 300)]\n#[case(mpmc::bounded_blocking_async(100), 5)]\n#[case(mpmc::bounded_blocking_async(100), 100)]\n#[case(mpmc::bounded_blocking_async(100), 200)]\n#[case(mpsc::unbounded_async(), 5)]\n#[case(mpsc::unbounded_async(), 100)]\n#[case(mpsc::unbounded_async(), 300)]\n#[case(mpmc::unbounded_async(), 6)]\n#[case(mpmc::unbounded_async(), 100)]\n#[case(mpmc::unbounded_async(), 300)]\nfn test_pressure_tx_multi_blocking_1_rx_async<\n    F: Flavor<Item = usize> + 'static,\n    R: AsyncRxTrait<usize>,\n>(\n    setup_log: (), #[case] channel: (MTx<F>, R), #[case] tx_count: usize,\n) {\n    let (tx, rx) = channel;\n    #[cfg(miri)]\n    {\n        if tx_count > 5 {\n            println!(\"skip\");\n            return;\n        }\n    }\n\n    let mut tx_th_s = Vec::new();\n    for _tx_i in 0..tx_count {\n        let _tx = tx.clone();\n        tx_th_s.push(spawn_named_thread(&format!(\"sender_{}\", _tx_i), move || {\n            debug!(\"tx {} spawn\", _tx_i);\n            for i in 0..ROUND {\n                match _tx.send(i) {\n                    Err(e) => panic!(\"{}\", e),\n                    _ => {\n                        trace!(\"tx {} {}\", _tx_i, i);\n                    }\n                }\n            }\n            debug!(\"tx {} exit\", _tx_i);\n        }));\n    }\n    drop(tx);\n    let rx_count = runtime_block_on!(async move {\n        let mut count = 0;\n        'A: loop {\n            match rx.recv().await {\n                Ok(_i) => {\n                    count += 1;\n                    trace!(\"rx {}\", _i);\n                }\n                Err(_) => break 'A,\n            }\n        }\n        count\n    });\n    for th in tx_th_s {\n        let _ = th.join().unwrap();\n    }\n    assert_eq!(rx_count, ROUND * tx_count);\n}\n\n#[logfn]\n#[rstest]\n#[case(mpmc::bounded_blocking_async(1), 5, 5)]\n#[case(mpmc::bounded_blocking_async(1), 20, 20)]\n#[case(mpmc::bounded_blocking_async(1), 20, 200)]\n#[case(mpmc::bounded_blocking_async(10), 10, 10)]\n#[case(mpmc::bounded_blocking_async(10), 50, 20)]\n#[case(mpmc::bounded_blocking_async(10), 100, 200)]\n#[case(mpmc::bounded_blocking_async(100), 10, 200)]\n#[case(mpmc::bounded_blocking_async(100), 100, 200)]\n#[case(mpmc::bounded_blocking_async(100), 300, 300)]\n#[case(mpmc::bounded_blocking_async(100), 30, 500)]\n#[case(mpmc::unbounded_async(), 5, 5)]\n#[case(mpmc::unbounded_async(), 50, 20)]\n#[case(mpmc::unbounded_async(), 200, 200)]\n#[case(mpmc::unbounded_async(), 10, 200)]\n#[case(mpmc::unbounded_async(), 100, 200)]\n#[case(mpmc::unbounded_async(), 300, 300)]\n#[case(mpmc::unbounded_async(), 30, 500)]\nfn test_pressure_tx_multi_blocking_multi_rx_async<F: Flavor<Item = usize> + 'static>(\n    setup_log: (), #[case] channel: (MTx<F>, MAsyncRx<F>), #[case] tx_count: usize,\n    #[case] rx_count: usize,\n) {\n    let (tx, rx) = channel;\n    #[cfg(miri)]\n    {\n        if tx_count > 5 || rx_count > 5 {\n            println!(\"skip\");\n            return;\n        }\n    }\n\n    let mut tx_th_s = Vec::new();\n    for _tx_i in 0..tx_count {\n        let _tx = tx.clone();\n        tx_th_s.push(spawn_named_thread(&format!(\"sender_{}\", _tx_i), move || {\n            for i in 0..ROUND {\n                match _tx.send(i) {\n                    Err(e) => panic!(\"{}\", e),\n                    _ => {\n                        trace!(\"tx {} {}\", _tx_i, i);\n                    }\n                }\n            }\n            debug!(\"tx {} exit\", _tx_i);\n        }));\n    }\n    drop(tx);\n    let total_count = runtime_block_on!(async move {\n        let mut th_co = Vec::new();\n        for _rx_i in 0..rx_count {\n            let _rx = rx.clone();\n            th_co.push(async_spawn!(async move {\n                debug!(\"rx{:?} {} spawn\", tokio_task_id!(), _rx_i);\n                let mut count = 0;\n                'A: loop {\n                    match _rx.recv().await {\n                        Ok(_i) => {\n                            count += 1;\n                            trace!(\"rx {} {}\", _rx_i, _i);\n                        }\n                        Err(_) => break 'A,\n                    }\n                }\n                debug!(\"rx{:?} {} exit\", tokio_task_id!(), _rx_i);\n                count\n            }));\n        }\n        drop(rx);\n        let mut total = 0;\n        for th in th_co {\n            total += async_join_result!(th);\n        }\n        total\n    });\n    for th in tx_th_s {\n        let _ = th.join().unwrap();\n    }\n    assert_eq!(total_count, tx_count * ROUND);\n}\n"
  },
  {
    "path": "test-suite/src/test_blocking_context.rs",
    "content": "use crate::*;\nuse captains_log::{logfn, *};\nuse crossfire::flavor::Flavor;\nuse crossfire::*;\nuse rstest::*;\nuse std::sync::Arc;\nuse std::thread::sleep;\nuse std::time::{Duration, Instant};\n\n#[fixture]\nfn setup_log() {\n    _setup_log();\n}\n\n#[logfn]\n#[rstest]\n#[case(spsc::bounded_blocking(1))]\n#[case(mpsc::bounded_blocking(1))]\n#[case(mpmc::bounded_blocking(1))]\nfn test_basic_bounded_empty_full_drop_rx<T: BlockingTxTrait<usize>, R: BlockingRxTrait<usize>>(\n    setup_log: (), #[case] _channel: (T, R),\n) {\n    // Just don't want to run duplicately in the workflow\n    #[cfg(not(feature = \"async_std\"))]\n    {\n        let (tx, rx) = _channel;\n        assert!(tx.is_empty());\n        assert!(rx.is_empty());\n        assert_eq!(tx.capacity(), Some(1));\n        assert_eq!(rx.capacity(), Some(1));\n        tx.try_send(1).expect(\"Ok\");\n        assert!(tx.is_full());\n        assert!(rx.is_full());\n        assert!(!tx.is_empty());\n        assert_eq!(tx.is_disconnected(), false);\n        assert_eq!(rx.is_disconnected(), false);\n        drop(rx);\n        assert_eq!(tx.is_disconnected(), true);\n        assert_eq!(tx.get_rx_count(), 0);\n        assert_eq!(tx.get_tx_count(), 1);\n        assert_eq!(tx.try_send(2).unwrap_err(), TrySendError::Disconnected(2));\n        assert_eq!(tx.send(2).unwrap_err(), SendError(2));\n        let start = Instant::now();\n        assert_eq!(\n            tx.send_timeout(3, Duration::from_secs(1)).unwrap_err(),\n            SendTimeoutError::Disconnected(3)\n        );\n        assert!(Instant::now() - start < Duration::from_secs(1));\n    }\n}\n\n#[logfn]\n#[rstest]\n#[case(spsc::bounded_blocking(1))]\n#[case(mpsc::bounded_blocking(1))]\n#[case(mpmc::bounded_blocking(1))]\nfn test_basic_bounded_empty_full_drop_tx<T: BlockingTxTrait<usize>, R: BlockingRxTrait<usize>>(\n    setup_log: (), #[case] _channel: (T, R),\n) {\n    #[cfg(not(feature = \"async_std\"))]\n    {\n        let (tx, rx) = _channel;\n        assert!(tx.is_empty());\n        assert!(rx.is_empty());\n        assert_eq!(tx.capacity(), Some(1));\n        assert_eq!(rx.capacity(), Some(1));\n        tx.try_send(1).expect(\"Ok\");\n        assert!(tx.is_full());\n        assert!(rx.is_full());\n        assert!(!tx.is_empty());\n        assert_eq!(tx.is_disconnected(), false);\n        assert_eq!(rx.is_disconnected(), false);\n        drop(tx);\n        assert_eq!(rx.is_disconnected(), true);\n        assert_eq!(rx.get_tx_count(), 0);\n        assert_eq!(rx.get_rx_count(), 1);\n        assert_eq!(rx.try_recv().unwrap(), 1);\n        assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Disconnected);\n        assert_eq!(rx.recv().unwrap_err(), RecvError);\n        let start = Instant::now();\n        assert_eq!(\n            rx.recv_timeout(Duration::from_secs(1)).unwrap_err(),\n            RecvTimeoutError::Disconnected\n        );\n        assert!(Instant::now() - start < Duration::from_secs(1));\n    }\n}\n\n#[logfn]\n#[rstest]\n#[case(spsc::unbounded_blocking())]\n#[case(mpsc::unbounded_blocking())]\n#[case(mpmc::unbounded_blocking())]\nfn test_basic_unbounded_empty_drop_rx<T: BlockingTxTrait<usize>, R: BlockingRxTrait<usize>>(\n    setup_log: (), #[case] _channel: (T, R),\n) {\n    #[cfg(not(feature = \"async_std\"))]\n    {\n        let (tx, rx) = _channel;\n        assert!(tx.is_empty());\n        assert!(rx.is_empty());\n        assert_eq!(tx.capacity(), None);\n        assert_eq!(rx.capacity(), None);\n        tx.try_send(1).expect(\"Ok\");\n        assert!(!tx.is_empty());\n        assert_eq!(tx.is_disconnected(), false);\n        assert_eq!(rx.is_disconnected(), false);\n        drop(rx);\n        assert_eq!(tx.is_disconnected(), true);\n        assert_eq!(tx.get_rx_count(), 0);\n        assert_eq!(tx.get_tx_count(), 1);\n        assert_eq!(tx.try_send(2).unwrap_err(), TrySendError::Disconnected(2));\n        assert_eq!(tx.send(2).unwrap_err(), SendError(2));\n        let start = Instant::now();\n        assert_eq!(\n            tx.send_timeout(3, Duration::from_secs(1)).unwrap_err(),\n            SendTimeoutError::Disconnected(3)\n        );\n        assert!(Instant::now() - start < Duration::from_secs(1));\n    }\n}\n\n#[logfn]\n#[rstest]\n#[case(spsc::unbounded_blocking())]\n#[case(mpsc::unbounded_blocking())]\n#[case(mpmc::unbounded_blocking())]\nfn test_basic_unbounded_empty_drop_tx<T: BlockingTxTrait<usize>, R: BlockingRxTrait<usize>>(\n    setup_log: (), #[case] _channel: (T, R),\n) {\n    #[cfg(not(feature = \"async_std\"))]\n    {\n        let (tx, rx) = _channel;\n        assert!(tx.is_empty());\n        assert!(rx.is_empty());\n        tx.try_send(1).expect(\"Ok\");\n        assert!(!tx.is_empty());\n        assert_eq!(tx.is_disconnected(), false);\n        assert_eq!(rx.is_disconnected(), false);\n        drop(tx);\n        assert_eq!(rx.is_disconnected(), true);\n        assert_eq!(rx.get_tx_count(), 0);\n        assert_eq!(rx.get_rx_count(), 1);\n        assert_eq!(rx.recv().unwrap(), 1);\n        assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Disconnected);\n        assert_eq!(rx.recv().unwrap_err(), RecvError);\n        let start = Instant::now();\n        assert_eq!(\n            rx.recv_timeout(Duration::from_secs(1)).unwrap_err(),\n            RecvTimeoutError::Disconnected\n        );\n        assert!(Instant::now() - start < Duration::from_secs(1));\n    }\n}\n\n#[logfn]\n#[rstest]\n#[case(spsc::bounded_blocking(10))]\n#[case(mpsc::bounded_blocking(10))]\n#[case(mpmc::bounded_blocking(10))]\nfn test_basic_bounded_1_thread<T: BlockingTxTrait<i32>, R: BlockingRxTrait<i32>>(\n    setup_log: (), #[case] _channel: (T, R),\n) {\n    #[cfg(not(feature = \"async_std\"))]\n    {\n        let (tx, rx) = _channel;\n        let rx_res = rx.try_recv();\n        assert!(rx_res.is_err());\n        assert!(rx_res.unwrap_err().is_empty());\n        for i in 0i32..10 {\n            let tx_res = tx.try_send(i);\n            assert!(tx_res.is_ok());\n        }\n        let tx_res = tx.try_send(11);\n        assert!(tx_res.is_err());\n        assert!(tx_res.unwrap_err().is_full());\n\n        let th = spawn_named_thread(\"receiver_1\", move || {\n            for i in 0i32..12 {\n                match rx.recv() {\n                    Ok(j) => {\n                        trace!(\"recv {}\", i);\n                        assert_eq!(i, j);\n                    }\n                    Err(e) => {\n                        panic!(\"error {}\", e);\n                    }\n                }\n            }\n            let res = rx.recv();\n            assert!(res.is_err());\n            trace!(\"rx close\");\n        });\n        assert!(tx.send(10).is_ok());\n        sleep(Duration::from_secs(1));\n        assert!(tx.send(11).is_ok());\n        drop(tx);\n        let _ = th.join().unwrap();\n    }\n}\n\n#[logfn]\n#[rstest]\n#[case(spsc::unbounded_blocking())]\n#[case(mpsc::unbounded_blocking())]\n#[case(mpmc::unbounded_blocking())]\nfn test_basic_unbounded_1_thread<T: BlockingTxTrait<i32>, R: BlockingRxTrait<i32>>(\n    setup_log: (), #[case] _channel: (T, R),\n) {\n    #[cfg(not(feature = \"async_std\"))]\n    {\n        let (tx, rx) = _channel;\n        let rx_res = rx.try_recv();\n        assert!(rx_res.is_err());\n        assert!(rx_res.unwrap_err().is_empty());\n        for i in 0i32..10 {\n            let tx_res = tx.try_send(i);\n            assert!(tx_res.is_ok());\n        }\n\n        let th = spawn_named_thread(\"receiver_1\", move || {\n            for i in 0i32..12 {\n                match rx.recv() {\n                    Ok(j) => {\n                        trace!(\"recv {}\", i);\n                        assert_eq!(i, j);\n                    }\n                    Err(e) => {\n                        panic!(\"error {}\", e);\n                    }\n                }\n            }\n            let res = rx.recv();\n            assert!(res.is_err());\n            trace!(\"rx close\");\n        });\n        assert!(tx.send(10).is_ok());\n        sleep(Duration::from_secs(1));\n        assert!(tx.send(11).is_ok());\n        drop(tx);\n        let _ = th.join().unwrap();\n    }\n}\n\n#[logfn]\n#[rstest]\n#[case(spsc::bounded_blocking(10))]\n#[case(mpsc::bounded_blocking(10))]\n#[case(mpmc::bounded_blocking(10))]\n#[case(spsc::unbounded_blocking())]\n#[case(mpsc::unbounded_blocking())]\n#[case(mpmc::unbounded_blocking())]\nfn test_basic_recv_after_sender_close<T: BlockingTxTrait<i32>, R: BlockingRxTrait<i32>>(\n    setup_log: (), #[case] _channel: (T, R),\n) {\n    #[cfg(not(feature = \"async_std\"))]\n    {\n        let (tx, rx) = _channel;\n        let total_msg_count = 5;\n        for i in 0..total_msg_count {\n            let _ = tx.try_send(i).expect(\"send ok\");\n        }\n        drop(tx);\n\n        // NOTE: 5 < 10\n        let mut recv_msg_count = 0;\n        loop {\n            match rx.recv() {\n                Ok(_) => {\n                    recv_msg_count += 1;\n                }\n                Err(_) => {\n                    break;\n                }\n            }\n        }\n        assert_eq!(recv_msg_count, total_msg_count);\n    }\n}\n\n#[logfn]\n#[rstest]\n#[case(spsc::bounded_blocking(1))]\n#[case(spsc::bounded_blocking(10))]\n#[case(spsc::bounded_blocking(100))]\n#[case(spsc::bounded_blocking(300))]\n#[case(mpsc::bounded_blocking(1))]\n#[case(mpsc::bounded_blocking(10))]\n#[case(mpsc::bounded_blocking(100))]\n#[case(mpsc::bounded_blocking(300))]\n#[case(mpmc::bounded_blocking(1))]\n#[case(mpmc::bounded_blocking(10))]\n#[case(mpmc::bounded_blocking(100))]\n#[case(mpmc::bounded_blocking(300))]\nfn test_pressure_bounded_blocking_1_1<T: BlockingTxTrait<usize>, R: BlockingRxTrait<usize>>(\n    setup_log: (), #[case] _channel: (T, R),\n) {\n    #[cfg(not(feature = \"async_std\"))]\n    {\n        let (tx, rx) = _channel;\n        let round: usize;\n        #[cfg(miri)]\n        {\n            round = ROUND;\n        }\n        #[cfg(not(miri))]\n        {\n            round = ROUND * 100;\n        }\n        let th = spawn_named_thread(\"sender_1\", move || {\n            for i in 0..round {\n                if let Err(e) = tx.send(i) {\n                    panic!(\"{:?}\", e);\n                }\n            }\n            trace!(\"tx exit\");\n        });\n        let mut count = 0;\n        'A: loop {\n            match rx.recv() {\n                Ok(_i) => {\n                    assert_eq!(_i, count);\n                    count += 1;\n                    trace!(\"recv {}\", _i);\n                }\n                Err(_) => break 'A,\n            }\n        }\n        drop(rx);\n        let _ = th.join().unwrap();\n        assert_eq!(count, round);\n    }\n}\n\n#[logfn]\n#[rstest]\n#[case(mpsc::bounded_blocking(1), 3)]\n#[case(mpsc::bounded_blocking(1), 5)]\n#[case(mpsc::bounded_blocking(1), 10)]\n#[case(mpsc::bounded_blocking(1), 16)]\n#[case(mpsc::bounded_blocking(10), 4)]\n#[case(mpsc::bounded_blocking(10), 7)]\n#[case(mpsc::bounded_blocking(10), 12)]\n#[case(mpsc::bounded_blocking(100), 3)]\n#[case(mpsc::bounded_blocking(100), 9)]\n#[case(mpsc::bounded_blocking(100), 13)]\n#[case(mpmc::bounded_blocking(1), 2)]\n#[case(mpmc::bounded_blocking(1), 5)]\n#[case(mpmc::bounded_blocking(1), 15)]\n#[case(mpmc::bounded_blocking(10), 3)]\n#[case(mpmc::bounded_blocking(10), 7)]\n#[case(mpmc::bounded_blocking(10), 16)]\n#[case(mpmc::bounded_blocking(100), 2)]\n#[case(mpmc::bounded_blocking(100), 8)]\n#[case(mpmc::bounded_blocking(100), 16)]\nfn test_pressure_bounded_blocking_multi_1<\n    F: Flavor<Item = usize> + 'static,\n    R: BlockingRxTrait<usize>,\n>(\n    setup_log: (), #[case] _channel: (MTx<F>, R), #[case] tx_count: usize,\n) {\n    #[cfg(not(feature = \"async_std\"))]\n    {\n        let (tx, rx) = _channel;\n        #[cfg(miri)]\n        {\n            if tx_count > 5 {\n                println!(\"skip\");\n                return;\n            }\n        }\n\n        let round: usize = ROUND * 10;\n        let mut th_s = Vec::new();\n        for _tx_i in 0..tx_count {\n            let _tx = tx.clone();\n            th_s.push(spawn_named_thread(&format!(\"sender_{}\", _tx_i), move || {\n                for i in 0..round {\n                    match _tx.send(i) {\n                        Err(e) => panic!(\"{:?}\", e),\n                        _ => {}\n                    }\n                }\n                trace!(\"tx {} exit\", _tx_i);\n            }));\n        }\n        drop(tx);\n        let mut count = 0;\n        'A: loop {\n            match rx.recv() {\n                Ok(_i) => {\n                    count += 1;\n                    trace!(\"recv {}\", _i);\n                }\n                Err(_) => break 'A,\n            }\n        }\n        drop(rx);\n        for th in th_s {\n            let _: () = th.join().unwrap();\n        }\n        assert_eq!(count, round * tx_count);\n    }\n}\n\n#[logfn]\n#[rstest]\n#[case(mpmc::bounded_blocking(1), 2, 2)]\n#[case(mpmc::bounded_blocking(1), 16, 2)]\n#[case(mpmc::bounded_blocking(1), 2, 16)]\n#[case(mpmc::bounded_blocking(10), 2, 2)]\n#[case(mpmc::bounded_blocking(10), 13, 2)]\n#[case(mpmc::bounded_blocking(10), 3, 10)]\n#[case(mpmc::bounded_blocking(100), 3, 3)]\n#[case(mpmc::bounded_blocking(100), 8, 3)]\n#[case(mpmc::bounded_blocking(100), 3, 8)]\n#[case(mpmc::bounded_blocking(100), 5, 5)]\nfn test_pressure_bounded_blocking_multi<F: Flavor<Item = usize> + 'static>(\n    setup_log: (), #[case] _channel: (MTx<F>, MRx<F>), #[case] tx_count: usize,\n    #[case] rx_count: usize,\n) {\n    #[cfg(not(feature = \"async_std\"))]\n    {\n        let round: usize;\n        #[cfg(miri)]\n        {\n            if tx_count > 5 || rx_count > 5 {\n                println!(\"skip\");\n                return;\n            }\n            round = ROUND;\n        }\n        #[cfg(not(miri))]\n        {\n            round = ROUND * 10;\n        }\n        let (tx, rx) = _channel;\n        let mut th_tx = Vec::new();\n        let mut th_rx = Vec::new();\n        for _tx_i in 0..tx_count {\n            let _tx = tx.clone();\n            th_tx.push(spawn_named_thread(&format!(\"sender_{}\", _tx_i), move || {\n                for i in 0..round {\n                    match _tx.send(i) {\n                        Err(e) => panic!(\"{:?}\", e),\n                        _ => {}\n                    }\n                }\n                trace!(\"tx {} exit\", _tx_i);\n            }));\n        }\n        for _rx_i in 0..rx_count {\n            let _rx = rx.clone();\n            th_rx.push(spawn_named_thread(&format!(\"receiver_{}\", _rx_i), move || {\n                let mut count = 0;\n                'A: loop {\n                    match _rx.recv() {\n                        Ok(_i) => {\n                            count += 1;\n                            trace!(\"recv {} {}\", _rx_i, _i);\n                        }\n                        Err(_) => break 'A,\n                    }\n                }\n                trace!(\"rx {} exit\", _rx_i);\n                count\n            }));\n        }\n        drop(tx);\n        drop(rx);\n        let mut total_count = 0;\n        for th in th_tx {\n            let _ = th.join().unwrap();\n        }\n        for th in th_rx {\n            total_count += th.join().unwrap();\n        }\n        assert_eq!(total_count, round * tx_count);\n    }\n}\n\n#[logfn]\n#[rstest]\n#[case(mpmc::bounded_blocking(1))]\n#[case(mpmc::bounded_blocking(10))]\nfn test_pressure_bounded_timeout_blocking<F: Flavor<Item = usize> + 'static>(\n    setup_log: (), #[case] _channel: (MTx<F>, MRx<F>),\n) {\n    #[cfg(not(feature = \"async_std\"))]\n    {\n        use std::collections::HashMap;\n        use std::sync::Mutex;\n        let (tx, rx) = _channel;\n\n        assert_eq!(\n            rx.recv_timeout(Duration::from_millis(1)).unwrap_err(),\n            RecvTimeoutError::Timeout\n        );\n        let (tx_wakers, rx_wakers) = rx.get_wakers_count();\n        println!(\"wakers: {}, {}\", tx_wakers, rx_wakers);\n        assert_eq!(tx_wakers, 0);\n        assert_eq!(rx_wakers, 0);\n\n        let recv_map = Arc::new(Mutex::new(HashMap::new()));\n\n        let mut th_tx = Vec::new();\n        let mut th_rx = Vec::new();\n        let tx_count: usize = 3;\n        for thread_id in 0..tx_count {\n            let _recv_map = recv_map.clone();\n            let _tx = tx.clone();\n            th_tx.push(spawn_named_thread(&format!(\"sender_{}\", thread_id), move || {\n                // randomize start up\n                sleep(Duration::from_millis((thread_id & 3) as u64));\n                let mut local_timeout_counter = 0;\n                for i in 0..ROUND {\n                    {\n                        let mut guard = _recv_map.lock().unwrap();\n                        guard.insert(i, ());\n                    }\n                    if i & 2 == 0 {\n                        sleep(Duration::from_millis(3));\n                    } else {\n                        sleep(Duration::from_millis(1));\n                    }\n                    loop {\n                        match _tx.send_timeout(i, Duration::from_millis(1)) {\n                            Ok(_) => break,\n                            Err(SendTimeoutError::Timeout(_i)) => {\n                                local_timeout_counter += 1;\n                                assert_eq!(_i, i);\n                            }\n                            Err(SendTimeoutError::Disconnected(_)) => {\n                                unreachable!();\n                            }\n                        }\n                    }\n                }\n                local_timeout_counter\n            }));\n        }\n        for _thread_id in 0..2 {\n            let _rx = rx.clone();\n            let _recv_map = recv_map.clone();\n            th_rx.push(spawn_named_thread(&format!(\"receiver_{}\", _thread_id), move || {\n                let mut step: usize = 0;\n                let mut local_recv_counter = 0;\n                let mut local_timeout_counter = 0;\n                loop {\n                    step += 1;\n                    let timeout = if step & 2 == 0 { 1 } else { 2 };\n                    if step & 2 > 0 {\n                        sleep(Duration::from_millis(1));\n                    }\n                    match _rx.recv_timeout(Duration::from_millis(timeout)) {\n                        Ok(item) => {\n                            local_recv_counter += 1;\n                            {\n                                let mut guard = _recv_map.lock().unwrap();\n                                guard.remove(&item);\n                            }\n                        }\n                        Err(RecvTimeoutError::Timeout) => {\n                            local_timeout_counter += 1;\n                        }\n                        Err(RecvTimeoutError::Disconnected) => {\n                            return (local_recv_counter, local_timeout_counter);\n                        }\n                    }\n                }\n            }));\n        }\n        drop(tx);\n        drop(rx);\n        let mut total_recv_count = 0;\n        let mut total_send_timeout = 0;\n        let mut total_recv_timeout = 0;\n        for th in th_tx {\n            total_send_timeout += th.join().unwrap();\n        }\n        for th in th_rx {\n            // rx threads return recv_count\n            let (local_recv_counter, local_timeout_counter) = th.join().unwrap();\n            total_recv_count += local_recv_counter;\n            total_recv_timeout += local_timeout_counter;\n        }\n        {\n            let guard = recv_map.lock().unwrap();\n            assert!(guard.is_empty());\n        }\n        assert_eq!(ROUND * tx_count, total_recv_count);\n        println!(\"send timeout count: {}\", total_send_timeout);\n        println!(\"recv timeout count: {}\", total_recv_timeout);\n    }\n}\n\n#[test]\nfn test_conversion() {\n    let (mtx, mrx) = mpmc::bounded_blocking::<usize>(1);\n    let _tx: Tx<_> = mtx.into();\n    let _rx: Rx<_> = mrx.into();\n}\n\n// This test make sure we have correctly use of maybeuninit\n#[logfn]\n#[rstest]\n#[case(spsc::bounded_blocking(1))]\n#[case(spsc::bounded_blocking(10))]\n#[case(mpsc::bounded_blocking(1))]\n#[case(mpsc::bounded_blocking(10))]\n#[case(mpmc::bounded_blocking(1))]\n#[case(mpmc::bounded_blocking(10))]\nfn test_drop_small_msg<T: BlockingTxTrait<SmallMsg>, R: BlockingRxTrait<SmallMsg>>(\n    setup_log: (), #[case] channel: (T, R),\n) {\n    println!(\"needs_drop {}\", std::mem::needs_drop::<SmallMsg>());\n    _test_drop_msg(channel);\n}\n\n// This test make sure we have correctly use of maybeuninit\n#[logfn]\n#[rstest]\n#[case(spsc::bounded_blocking(1))]\n#[case(spsc::bounded_blocking(10))]\n#[case(mpsc::bounded_blocking(1))]\n#[case(mpsc::bounded_blocking(10))]\n#[case(mpmc::bounded_blocking(1))]\n#[case(mpmc::bounded_blocking(10))]\nfn test_drop_large_msg<T: BlockingTxTrait<LargeMsg>, R: BlockingRxTrait<LargeMsg>>(\n    setup_log: (), #[case] channel: (T, R),\n) {\n    println!(\"needs_drop {}\", std::mem::needs_drop::<LargeMsg>());\n    _test_drop_msg(channel);\n}\n\nfn _test_drop_msg<M: TestDropMsg, T: BlockingTxTrait<M>, R: BlockingRxTrait<M>>(channel: (T, R)) {\n    let (tx, rx) = channel;\n    reset_drop_counter();\n    let cap = tx.capacity().unwrap();\n    let mut ids = cap;\n    for i in 0..ids {\n        let msg = M::new(i);\n        assert!(tx.try_send(msg).is_ok());\n    }\n    assert_eq!(get_drop_counter(), 0);\n    let msg = M::new(ids);\n    if let Err(TrySendError::Full(_msg)) = tx.try_send(msg) {\n        assert_eq!(_msg.get_value(), ids);\n        assert_eq!(get_drop_counter(), 0);\n        drop(_msg);\n        assert_eq!(get_drop_counter(), 1);\n    } else {\n        unreachable!();\n    }\n    let th = spawn_named_thread(\"receiver_3\", move || {\n        let _msg = rx.recv().expect(\"recv\");\n        assert_eq!(_msg.get_value(), 0);\n        drop(_msg);\n        rx\n    });\n    let msg = M::new(ids);\n    tx.send(msg).expect(\"send\");\n    ids += 1;\n    let rx = th.join().unwrap();\n    drop(rx);\n    assert_eq!(get_drop_counter(), 2);\n    let msg = M::new(ids);\n    if let Err(TrySendError::Disconnected(_msg)) = tx.try_send(msg) {\n        assert_eq!(_msg.get_value(), ids);\n    } else {\n        unreachable!();\n    }\n    ids += 1;\n    let msg = M::new(ids);\n    if let Err(SendError(_msg)) = tx.send(msg) {\n        assert_eq!(_msg.get_value(), ids);\n    } else {\n        unreachable!();\n    }\n    assert_eq!(get_drop_counter(), 4);\n    ids += 1;\n    drop(tx);\n    // every thing dropped inside the channel\n    assert_eq!(get_drop_counter(), ids + 1); // ids begins at 0\n    assert_eq!(get_drop_counter(), 4 + cap);\n}\n"
  },
  {
    "path": "test-suite/src/test_oneshot.rs",
    "content": "use crate::*;\nuse captains_log::logfn;\nuse crossfire::*;\nuse fastrand;\nuse rstest::*;\nuse std::thread;\nuse std::time::Duration;\n\n#[fixture]\nfn setup_log() {\n    _setup_log();\n}\n\n#[logfn]\n#[rstest]\nfn test_oneshot_blocking_basic(setup_log: ()) {\n    let (tx, mut rx) = oneshot::oneshot();\n    assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty);\n    assert_eq!(rx.is_empty(), true);\n    tx.send(42);\n    assert_eq!(rx.is_empty(), false);\n    assert_eq!(rx.recv(), Ok(42));\n\n    let (tx, mut rx) = oneshot::oneshot();\n    assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty);\n    tx.send(41);\n    assert_eq!(rx.try_recv(), Ok(41));\n    assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Disconnected);\n    assert_eq!(rx.recv().unwrap_err(), RecvError);\n}\n\n#[logfn]\n#[rstest]\nfn test_oneshot_blocking_drop_tx(setup_log: ()) {\n    let (tx, rx) = oneshot::oneshot::<i32>();\n    drop(tx);\n    assert_eq!(rx.recv(), Err(RecvError));\n\n    let (tx, rx) = oneshot::oneshot::<i32>();\n    let th = thread::spawn(move || {\n        // Should be wake up on sender drop\n        assert_eq!(rx.recv(), Err(RecvError));\n    });\n    thread::sleep(Duration::from_millis(fastrand::u64(1..=500)));\n    drop(tx);\n    th.join().expect(\"join\");\n}\n\n#[logfn]\n#[rstest]\nfn test_oneshot_blocking_drop_rx(setup_log: ()) {\n    let (tx, rx) = oneshot::oneshot::<i32>();\n    drop(rx);\n    assert!(tx.is_disconnected());\n    // send consumes tx, returns ()\n    tx.send(42);\n}\n\n#[logfn]\n#[rstest]\nfn test_oneshot_blocking_leak(setup_log: ()) {\n    // Check if OneShot drops the value if not received\n    reset_drop_counter();\n    {\n        let (tx, _rx) = oneshot::oneshot::<SmallMsg>();\n        tx.send(SmallMsg::new(1));\n    } // tx dropped (closed), rx dropped (OneShot dropped). msg should be dropped.\n    assert_eq!(get_drop_counter(), 1);\n}\n\n#[logfn]\n#[rstest]\nfn test_oneshot_blocking_drop_after_recv(setup_log: ()) {\n    // Check if OneShot drops the value after recv (it shouldn't, Rx has it)\n    reset_drop_counter();\n    {\n        let (tx, rx) = oneshot::oneshot::<SmallMsg>();\n        tx.send(SmallMsg::new(1));\n        let msg = rx.recv().unwrap();\n        assert_eq!(get_drop_counter(), 0);\n        drop(msg);\n        assert_eq!(get_drop_counter(), 1);\n    }\n    // OneShot dropped. Should NOT drop again.\n    assert_eq!(get_drop_counter(), 1);\n}\n\n#[logfn]\n#[rstest]\nfn test_oneshot_async_basic(setup_log: ()) {\n    runtime_block_on!(async move {\n        let (tx, mut rx) = oneshot::oneshot();\n        assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty);\n        assert_eq!(rx.is_empty(), true);\n        tx.send(42);\n        assert_eq!(rx.is_empty(), false);\n        assert_eq!(rx.await, Ok(42));\n        let (tx, mut rx) = oneshot::oneshot();\n        assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Empty);\n        tx.send(41);\n        assert_eq!(rx.try_recv(), Ok(41));\n        assert_eq!(rx.try_recv().unwrap_err(), TryRecvError::Disconnected);\n        assert_eq!(rx.await.unwrap_err(), RecvError);\n    });\n}\n\n#[cfg(feature = \"time\")]\n#[logfn]\n#[rstest]\nfn test_oneshot_async_drop_tx(setup_log: ()) {\n    runtime_block_on!(async move {\n        let (tx, rx) = oneshot::oneshot::<i32>();\n        drop(tx);\n        assert_eq!(rx.await, Err(RecvError));\n        log::debug!(\"next test\");\n        let (tx, rx) = oneshot::oneshot::<i32>();\n        let th = async_spawn!(async move {\n            // Should be wake up on sender drop\n            assert_eq!(rx.await, Err(RecvError));\n        });\n        sleep(Duration::from_millis(fastrand::u64(1..=500))).await;\n        drop(tx);\n        let _ = async_join_result!(th);\n    });\n}\n\n#[logfn]\n#[rstest]\nfn test_oneshot_async_pressure(setup_log: ()) {\n    let count = {\n        #[cfg(miri)]\n        {\n            10usize\n        }\n        #[cfg(not(miri))]\n        {\n            100usize\n        }\n    };\n    runtime_block_on!(async move {\n        let mut tasks = Vec::new();\n        for i in 0..count {\n            tasks.push(async_spawn!(async move {\n                let (tx, rx) = oneshot::oneshot();\n                tx.send(i);\n                assert_eq!(rx.await, Ok(i));\n            }));\n        }\n        for t in tasks {\n            let _ = async_join_result!(t);\n        }\n    });\n}\n\n#[logfn]\n#[rstest]\nfn test_oneshot_blocking_batch(setup_log: ()) {\n    let mut txs = Vec::with_capacity(ROUND);\n    let mut rxs = Vec::with_capacity(ROUND);\n    for _i in 0..ROUND {\n        let (tx, rx) = oneshot::oneshot();\n        txs.push(tx);\n        rxs.push(rx);\n    }\n    let th = thread::spawn(move || {\n        for (i, tx) in txs.into_iter().enumerate() {\n            tx.send(i);\n        }\n    });\n    for (i, rx) in rxs.into_iter().enumerate() {\n        assert_eq!(rx.recv(), Ok(i));\n    }\n    th.join().unwrap();\n}\n\n#[logfn]\n#[rstest]\nfn test_oneshot_async_batch(setup_log: ()) {\n    runtime_block_on!(async move {\n        let mut txs = Vec::with_capacity(ROUND);\n        let mut rxs = Vec::with_capacity(ROUND);\n        for _i in 0..ROUND {\n            let (tx, rx) = oneshot::oneshot();\n            txs.push(tx);\n            rxs.push(rx);\n        }\n        let th = async_spawn!(async move {\n            for (i, tx) in txs.into_iter().enumerate() {\n                tx.send(i);\n            }\n        });\n        for (i, rx) in rxs.into_iter().enumerate() {\n            assert_eq!(rx.await, Ok(i));\n        }\n        async_join_result!(th);\n    });\n}\n\n#[logfn]\n#[rstest]\nfn test_oneshot_blocking_concurrent(setup_log: ()) {\n    let count = {\n        #[cfg(miri)]\n        {\n            10usize\n        }\n        #[cfg(not(miri))]\n        {\n            50usize\n        }\n    };\n    let mut th_s = Vec::new();\n    for i in 0..count {\n        let (tx, rx) = oneshot::oneshot();\n        th_s.push(thread::spawn(move || {\n            tx.send(i);\n        }));\n        th_s.push(thread::spawn(move || {\n            assert_eq!(rx.recv(), Ok(i));\n        }));\n    }\n    for th in th_s {\n        th.join().unwrap();\n    }\n}\n\n#[logfn]\n#[rstest]\nfn test_oneshot_async_concurrent(setup_log: ()) {\n    let count = {\n        #[cfg(miri)]\n        {\n            10usize\n        }\n        #[cfg(not(miri))]\n        {\n            100usize\n        }\n    };\n    runtime_block_on!(async move {\n        let mut tasks = Vec::new();\n        for i in 0..count {\n            let (tx, rx) = oneshot::oneshot();\n            tasks.push(async_spawn!(async move {\n                tx.send(i);\n            }));\n            tasks.push(async_spawn!(async move {\n                assert_eq!(rx.await, Ok(i));\n            }));\n        }\n        for t in tasks {\n            let _ = async_join_result!(t);\n        }\n    });\n}\n\n#[logfn]\n#[rstest]\nfn test_oneshot_blocking_with_sleep(setup_log: ()) {\n    #[cfg(miri)]\n    {\n        // sleep in miri will be too slow\n        println!(\"skip on miri\");\n        return;\n    }\n    #[cfg(not(miri))]\n    {\n        let count = 50usize;\n        let mut th_s = Vec::new();\n        for i in 0..(count as u64) {\n            th_s.push(thread::spawn(move || {\n                let (tx, rx) = oneshot::oneshot();\n                // Spawn a thread that sends after a short delay\n                thread::spawn(move || {\n                    thread::sleep(Duration::from_millis(i % 10)); // Vary the delay\n                    tx.send(i);\n                });\n                // Wait for the value\n                assert_eq!(rx.recv(), Ok(i));\n            }));\n        }\n        for th in th_s {\n            th.join().unwrap();\n        }\n    }\n}\n\n#[cfg(feature = \"time\")]\n#[logfn]\n#[rstest]\nfn test_oneshot_async_with_sleep(setup_log: ()) {\n    #[cfg(miri)]\n    {\n        // sleep in miri will be too slow\n        println!(\"skip on miri\");\n    }\n    #[cfg(not(miri))]\n    {\n        let count = 50usize;\n        runtime_block_on!(async move {\n            let mut tasks = Vec::new();\n            for i in 0..count {\n                tasks.push(async_spawn!(async move {\n                    let (tx, rx) = oneshot::oneshot();\n                    let th = async_spawn!(async move {\n                        sleep(Duration::from_millis((i % 10) as u64)).await;\n                        tx.send(i);\n                    });\n\n                    // Wait for the value\n                    assert_eq!(rx.await, Ok(i));\n                    let _ = async_join_result!(th);\n                }));\n            }\n            for t in tasks {\n                let _ = async_join_result!(t);\n            }\n        });\n    }\n}\n\n#[cfg(feature = \"time\")]\n#[logfn]\n#[rstest]\nfn test_oneshot_async_batch_with_interval(setup_log: ()) {\n    #[cfg(miri)]\n    {\n        // sleep in miri will be too slow\n        println!(\"skip on miri\");\n        return;\n    }\n    #[cfg(not(miri))]\n    {\n        let batch_size = 30;\n        runtime_block_on!(async move {\n            let mut tasks = Vec::new();\n\n            // Create a batch of oneshots\n            for i in 0..batch_size {\n                tasks.push(async_spawn!(async move {\n                    let (tx, rx) = oneshot::oneshot();\n                    let th = async_spawn!(async move {\n                        // Sleep for different durations based on index\n                        sleep(Duration::from_millis((i * 2) as u64)).await;\n                        tx.send(i);\n                    });\n\n                    // Wait for the value\n                    assert_eq!(rx.await, Ok(i));\n                    let _ = async_join_result!(th);\n                }));\n            }\n            for t in tasks {\n                let _ = async_join_result!(t);\n            }\n        });\n    }\n}\n\n#[cfg(feature = \"time\")]\n#[logfn]\n#[rstest]\nfn test_oneshot_blocking_timeout_fail(setup_log: ()) {\n    let (_tx, rx) = oneshot::oneshot::<i32>();\n    let start = std::time::Instant::now();\n    let res = rx.recv_timeout(Duration::from_millis(100));\n    assert_eq!(res, Err(RecvTimeoutError::Timeout));\n    assert!(start.elapsed() >= Duration::from_millis(100));\n}\n\n#[cfg(feature = \"time\")]\n#[logfn]\n#[rstest]\nfn test_oneshot_blocking_timeout_success(setup_log: ()) {\n    let (tx, rx) = oneshot::oneshot::<i32>();\n    let th = thread::spawn(move || {\n        thread::sleep(Duration::from_millis(50));\n        tx.send(42);\n    });\n    let _res = rx.recv_timeout(Duration::from_secs(1));\n    #[cfg(not(miri))]\n    assert_eq!(_res, Ok(42));\n    let _ = th.join();\n}\n\n#[cfg(feature = \"time\")]\n#[logfn]\n#[rstest]\nfn test_oneshot_blocking_timeout_disconnected(setup_log: ()) {\n    let (tx, rx) = oneshot::oneshot::<i32>();\n    let th = thread::spawn(move || {\n        thread::sleep(Duration::from_millis(50));\n        drop(tx);\n    });\n    let _res = rx.recv_timeout(Duration::from_millis(200));\n    let _ = th.join();\n    assert!(_res.is_err());\n    // might be timeout or disconnected\n}\n\n#[cfg(feature = \"time\")]\n#[logfn]\n#[rstest]\nfn test_oneshot_async_timeout_fail(setup_log: ()) {\n    runtime_block_on!(async move {\n        let (_tx, rx) = oneshot::oneshot::<i32>();\n        let start = std::time::Instant::now();\n        let sleep_fut = sleep(Duration::from_millis(100));\n        futures_util::pin_mut!(sleep_fut);\n        let res = rx.recv_async_with_timer(sleep_fut).await;\n        assert_eq!(res, Err(RecvTimeoutError::Timeout));\n        assert!(start.elapsed() >= Duration::from_millis(100));\n    });\n}\n\n#[cfg(feature = \"time\")]\n#[logfn]\n#[rstest]\nfn test_oneshot_async_timeout_disconnected(setup_log: ()) {\n    runtime_block_on!(async move {\n        let (tx, rx) = oneshot::oneshot::<i32>();\n        let th = std::thread::spawn(move || {\n            std::thread::sleep(Duration::from_millis(50));\n            drop(tx);\n        });\n        let _res = rx.recv_async_with_timer(sleep(Duration::from_secs(1))).await;\n        let _ = th.join();\n        #[cfg(not(miri))]\n        assert_eq!(_res, Err(RecvTimeoutError::Disconnected));\n    });\n}\n\n#[cfg(feature = \"time\")]\n#[logfn]\n#[rstest]\nfn test_oneshot_async_timeout_success(setup_log: ()) {\n    runtime_block_on!(async move {\n        let (tx, rx) = oneshot::oneshot::<i32>();\n        let th = async_spawn!(async move {\n            sleep(Duration::from_millis(50)).await;\n            tx.send(42);\n        });\n        let _res = rx.recv_async_with_timer(sleep(Duration::from_secs(2))).await;\n        #[cfg(not(miri))]\n        assert_eq!(_res, Ok(42));\n        async_join_result!(th);\n    });\n}\n"
  },
  {
    "path": "test-suite/src/test_select_async.rs",
    "content": "use crate::*;\nuse captains_log::logfn;\nuse crossfire::{mpmc, mpsc};\nuse futures_util::{select, FutureExt};\nuse rstest::*;\nuse std::time::Duration;\n\n#[fixture]\nfn setup_log() {\n    _setup_log();\n}\n\n#[cfg(feature = \"time\")]\n#[logfn]\n#[rstest]\nfn test_mpmc_null_async_close(setup_log: ()) {\n    let flavor = mpmc::Null::new();\n    let (tx, rx) = flavor.new_async();\n\n    runtime_block_on!(async move {\n        let th = async_spawn!(async move {\n            sleep(Duration::from_millis(50)).await;\n            drop(tx);\n        });\n\n        let res = rx.recv().await;\n        assert!(res.is_err());\n        async_join_result!(th);\n    });\n}\n\n#[cfg(feature = \"time\")]\n#[logfn]\n#[rstest]\nfn test_mpsc_null_async_close(setup_log: ()) {\n    let flavor = mpsc::Null::new();\n    let (tx, rx) = flavor.new_async();\n\n    runtime_block_on!(async move {\n        let th = async_spawn!(async move {\n            sleep(Duration::from_millis(50)).await;\n            drop(tx);\n        });\n\n        let res = rx.recv().await;\n        assert!(res.is_err());\n        async_join_result!(th);\n    });\n}\n\n#[cfg(feature = \"time\")]\n#[logfn]\n#[rstest]\nfn test_mpmc_null_select(setup_log: ()) {\n    let flavor = mpmc::Null::new();\n    let (tx, rx) = flavor.new_async();\n\n    runtime_block_on!(async move {\n        let th = async_spawn!(async move {\n            sleep(Duration::from_millis(50)).await;\n            drop(tx);\n        });\n\n        let closed = select! {\n            res = rx.recv().fuse() => {\n                if res.is_err() {\n                    true\n                } else {\n                    panic!(\"Should not receive message from null\");\n                }\n            }\n        };\n        assert!(closed);\n        async_join_result!(th);\n    });\n}\n\n#[cfg(feature = \"time\")]\n#[logfn]\n#[rstest]\nfn test_mpsc_null_select(setup_log: ()) {\n    let flavor = mpsc::Null::new();\n    let (tx, rx) = flavor.new_async();\n\n    runtime_block_on!(async move {\n        let th = async_spawn!(async move {\n            sleep(Duration::from_millis(50)).await;\n            drop(tx);\n        });\n\n        let closed = select! {\n            res = rx.recv().fuse() => {\n                if res.is_err() {\n                    true\n                } else {\n                    panic!(\"Should not receive message from null\");\n                }\n            }\n        };\n        assert!(closed);\n        async_join_result!(th);\n    });\n}\n\n#[cfg(feature = \"time\")]\n#[logfn]\n#[rstest]\nfn test_null_select_timeout(setup_log: ()) {\n    let flavor = mpmc::Null::new();\n    let (tx, rx) = flavor.new_async();\n\n    runtime_block_on!(async move {\n        // Don't drop tx yet\n        let timed_out = select! {\n            res = rx.recv().fuse() => {\n                if res.is_err() {\n                    panic!(\"Should not be closed yet\");\n                }\n                false\n            }\n            _ = sleep(Duration::from_millis(50)).fuse() => {\n                true\n            }\n        };\n        assert!(timed_out);\n        drop(tx);\n    });\n}\n\n#[logfn]\n#[rstest]\nfn test_null_mixed_with_active_channel(setup_log: ()) {\n    let flavor = mpmc::Null::new();\n    let (tx_null, rx_null) = flavor.new_async();\n    let (tx_data, rx_data) = mpmc::bounded_async::<i32>(10);\n\n    runtime_block_on!(async move {\n        tx_data.send(42).await.unwrap();\n\n        // Data ready, null not triggered\n        select! {\n            _ = rx_null.recv().fuse() => {\n                panic!(\"Null triggered unexpectedly\");\n            }\n            res = rx_data.recv().fuse() => {\n                assert_eq!(res.unwrap(), 42);\n            }\n        }\n        drop(tx_null);\n    });\n}\n\n#[cfg(feature = \"time\")]\n#[logfn]\n#[rstest]\nfn test_null_mixed_trigger(setup_log: ()) {\n    let flavor = mpmc::Null::new();\n    let (tx_null, rx_null) = flavor.new_async();\n    let (_tx_data, rx_data) = mpmc::bounded_async::<i32>(10);\n\n    runtime_block_on!(async move {\n        let th = async_spawn!(async move {\n            sleep(Duration::from_millis(50)).await;\n            drop(tx_null);\n        });\n\n        // Data not ready (empty), null triggered via drop\n        let null_triggered = select! {\n            res = rx_null.recv().fuse() => {\n                assert!(res.is_err());\n                true\n            }\n            _ = rx_data.recv().fuse() => {\n                panic!(\"Data triggered unexpectedly\");\n            }\n        };\n        assert!(null_triggered);\n        async_join_result!(th);\n    });\n}\n"
  },
  {
    "path": "test-suite/src/test_select_blocking.rs",
    "content": "use crate::*;\nuse captains_log::logfn;\nuse crossfire::select::{Multiplex, Mux, Select};\nuse crossfire::*;\nuse rstest::*;\nuse std::sync::atomic::{AtomicUsize, Ordering};\nuse std::sync::{Arc, Barrier};\nuse std::thread;\nuse std::time::Duration;\n\n#[fixture]\nfn setup_log() {\n    _setup_log();\n}\n\n#[logfn]\n#[rstest]\nfn test_select_basic(setup_log: ()) {\n    let (tx1, rx1) = mpmc::bounded_blocking::<i32>(10);\n    let (tx2, rx2) = mpsc::bounded_blocking::<i32>(10);\n\n    tx1.send(100).expect(\"send\");\n    tx2.send(200).expect(\"send\");\n    let mut select = Select::new();\n    select.add(&rx1);\n    select.add(&rx2);\n    let mut results = Vec::new();\n    // Select twice\n    for _ in 0..2 {\n        let res = select.select().unwrap();\n        if res == rx1 {\n            results.push(rx1.read_select(res).unwrap());\n        } else if res == rx2 {\n            results.push(rx2.read_select(res).unwrap());\n        } else {\n            panic!(\"Unexpected token\");\n        }\n    }\n    results.sort();\n    assert_eq!(results, vec![100, 200]);\n}\n\n#[logfn]\n#[rstest]\nfn test_select_basic_timeout(setup_log: ()) {\n    let (_tx1, rx1) = mpmc::bounded_blocking::<i32>(10);\n    let (_tx2, rx2) = mpmc::bounded_blocking::<i32>(10);\n    let (_tx3, rx3) = mpmc::bounded_blocking::<i32>(10);\n    let mut select = Select::new();\n    select.add(&rx1);\n    select.add(&rx2);\n    select.add(&rx3);\n    let start = std::time::Instant::now();\n    let res = select.select_timeout(Duration::from_millis(100));\n    let elapsed = start.elapsed();\n    assert!(res.is_err());\n    assert!(elapsed >= Duration::from_millis(100));\n}\n\n#[logfn]\n#[rstest]\nfn test_select_basic_disconnect_before_park(setup_log: ()) {\n    let (_tx1, rx1) = mpmc::bounded_blocking::<i32>(10);\n    let (_tx2, rx2) = mpmc::bounded_blocking::<i32>(10);\n    let (_tx3, rx3) = mpmc::bounded_blocking::<i32>(10);\n    let (_tx4, rx4) = mpmc::bounded_blocking::<i32>(10);\n    let mut select = Select::new();\n    select.add(&rx1);\n    select.add(&rx2);\n    select.add(&rx3);\n    select.add(&rx4);\n    drop(_tx3);\n    let res = select.select();\n    assert!(res.is_ok());\n    let res = res.unwrap();\n    assert!(res == rx3);\n    // Disconnected and empty\n    assert!(rx3.read_select(res).is_err());\n    select.remove(&rx3);\n    assert_eq!(select.try_select().unwrap_err(), TryRecvError::Empty);\n    _tx2.send(200).expect(\"send\");\n    let res = select.select().unwrap();\n    assert!(res == rx2);\n    println!(\"select_result {:?}, rx2 {:?}\", res, rx2);\n    assert_eq!(rx2.read_select(res).unwrap(), 200);\n}\n\n#[logfn]\n#[rstest]\nfn test_select_basic_disconnect_after_park(setup_log: ()) {\n    let (_tx1, rx1) = mpmc::bounded_blocking::<i32>(10);\n    let (_tx2, rx2) = mpmc::bounded_blocking::<i32>(10);\n    let (_tx3, rx3) = mpmc::bounded_blocking::<i32>(10);\n    let (_tx4, rx4) = mpmc::bounded_blocking::<i32>(10);\n    let mut select = Select::new();\n    select.add(&rx1);\n    select.add(&rx2);\n    select.add(&rx3);\n    select.add(&rx4);\n    let barrier = Arc::new(Barrier::new(2));\n    let _barrier = barrier.clone();\n    let th = thread::spawn(move || {\n        _barrier.wait();\n        thread::sleep(Duration::from_millis(500));\n        drop(_tx3);\n    });\n    barrier.wait();\n    let res = select.select();\n    assert!(res.is_ok());\n    let res = res.unwrap();\n    assert!(res == rx3);\n    // Disconnected and empty\n    assert!(rx3.read_select(res).is_err());\n    let _ = th.join();\n    select.remove(&rx3);\n    assert_eq!(select.try_select().unwrap_err(), TryRecvError::Empty);\n    _tx2.send(200).expect(\"send\");\n    let res = select.select().unwrap();\n    assert!(res == rx2);\n    assert_eq!(rx2.read_select(res).unwrap(), 200);\n}\n\n#[logfn]\n#[rstest]\nfn test_select_basic_loop(setup_log: ()) {\n    let (tx1, rx1) = mpmc::unbounded_blocking::<i32>();\n    let (tx2, rx2) = mpmc::bounded_blocking::<i32>(10);\n    let (tx3, rx3): (MTx<mpmc::One<i32>>, MRx<mpmc::One<i32>>) = mpmc::build(mpmc::One::new());\n    let (tx4, rx4) = mpsc::unbounded_blocking::<i32>();\n    let (tx5, rx5) = mpsc::bounded_blocking::<i32>(10);\n    let (tx6, rx6): (MTx<mpsc::One<i32>>, Rx<mpsc::One<i32>>) = mpsc::new();\n\n    let mut select = Select::new();\n    select.add(&rx1);\n    select.add(&rx2);\n    select.add(&rx3);\n    select.add(&rx4);\n    select.add(&rx5);\n    select.add(&rx6);\n\n    let t1 = thread::spawn(move || {\n        for i in 0..10 {\n            tx1.send(i).expect(\"send\");\n            thread::sleep(Duration::from_millis(10));\n        }\n    });\n\n    let t2 = thread::spawn(move || {\n        for i in 0..10 {\n            tx2.send(i + 100).expect(\"send\");\n            thread::sleep(Duration::from_millis(10));\n        }\n    });\n\n    let t3 = thread::spawn(move || {\n        for i in 0..10 {\n            tx3.send(i + 200).expect(\"send\");\n            thread::sleep(Duration::from_millis(10));\n        }\n    });\n\n    let t4 = thread::spawn(move || {\n        for i in 0..10 {\n            tx4.send(i + 300).expect(\"send\");\n            thread::sleep(Duration::from_millis(10));\n        }\n    });\n\n    let t5 = thread::spawn(move || {\n        for i in 0..10 {\n            tx5.send(i + 400).expect(\"send\");\n            thread::sleep(Duration::from_millis(10));\n        }\n    });\n\n    let t6 = thread::spawn(move || {\n        for i in 0..10 {\n            tx6.send(i + 500).expect(\"send\");\n            thread::sleep(Duration::from_millis(10));\n        }\n    });\n\n    let mut sum = 0;\n    loop {\n        let res = match select.select() {\n            Ok(res) => res,\n            Err(RecvError) => {\n                println!(\"All channels disconnected or removed from select. Breaking loop.\");\n                break;\n            }\n        };\n\n        if res == rx1 {\n            match rx1.read_select(res) {\n                Ok(val) => {\n                    sum += val;\n                }\n                Err(RecvError) => {\n                    println!(\"rx1 disconnected, removing from select.\");\n                    select.remove(&rx1);\n                }\n            }\n        } else if res == rx2 {\n            match rx2.read_select(res) {\n                Ok(val) => {\n                    sum += val;\n                }\n                Err(RecvError) => {\n                    println!(\"rx2 disconnected, removing from select.\");\n                    select.remove(&rx2);\n                }\n            }\n        } else if res == rx3 {\n            match rx3.read_select(res) {\n                Ok(val) => {\n                    sum += val;\n                }\n                Err(RecvError) => {\n                    println!(\"rx3 disconnected, removing from select.\");\n                    select.remove(&rx3);\n                }\n            }\n        } else if res == rx4 {\n            match rx4.read_select(res) {\n                Ok(val) => {\n                    sum += val;\n                }\n                Err(RecvError) => {\n                    println!(\"rx4 disconnected, removing from select.\");\n                    select.remove(&rx4);\n                }\n            }\n        } else if res == rx5 {\n            match rx5.read_select(res) {\n                Ok(val) => {\n                    sum += val;\n                }\n                Err(RecvError) => {\n                    println!(\"rx5 disconnected, removing from select.\");\n                    select.remove(&rx5);\n                }\n            }\n        } else if res == rx6 {\n            match rx6.read_select(res) {\n                Ok(val) => {\n                    sum += val;\n                }\n                Err(RecvError) => {\n                    println!(\"rx6 disconnected, removing from select.\");\n                    select.remove(&rx6);\n                }\n            }\n        } else {\n            panic!(\"unknown token\");\n        }\n    }\n\n    t1.join().unwrap();\n    t2.join().unwrap();\n    t3.join().unwrap();\n    t4.join().unwrap();\n    t5.join().unwrap();\n    t6.join().unwrap();\n\n    assert_eq!(sum, 15270);\n}\n\n#[logfn]\n#[rstest]\nfn test_select_remove_mid(setup_log: ()) {\n    // Test removing a receiver from the middle of the list\n    let (tx1, rx1) = mpmc::bounded_blocking::<i32>(10);\n    let (tx2, rx2) = mpsc::bounded_blocking::<i32>(10);\n    let (tx3, rx3) = spsc::bounded_blocking::<i32>(10);\n\n    let mut select = Select::new();\n    select.add(&rx1);\n    select.add(&rx2);\n    select.add(&rx3);\n\n    // Remove rx2 (middle)\n    select.remove(&rx2);\n\n    tx1.send(1).unwrap();\n    tx3.send(3).unwrap();\n    tx2.send(2).unwrap(); // Should be ignored\n\n    let mut results = Vec::new();\n    for _ in 0..2 {\n        let res = select.select().unwrap();\n        if res == rx1 {\n            results.push(rx1.read_select(res).unwrap());\n        } else if res == rx3 {\n            results.push(rx3.read_select(res).unwrap());\n        } else {\n            panic!(\"Unexpected token\");\n        }\n    }\n\n    // Should not receive from rx2\n    assert!(select.select_timeout(Duration::from_millis(50)).is_err());\n\n    results.sort();\n    assert_eq!(results, vec![1, 3]);\n}\n\n#[logfn]\n#[rstest]\nfn test_select_mixed_flavors(setup_log: ()) {\n    // Test mixing List (unbounded), Array (bounded > 1) and One (explicit One)\n    let (tx_list, rx_list) = mpmc::unbounded_blocking::<i32>();\n    let (tx_array, rx_array) = mpmc::bounded_blocking::<i32>(10);\n    let (tx_one, rx_one): (MTx<mpmc::One<i32>>, MRx<mpmc::One<i32>>) =\n        mpmc::build(mpmc::One::new());\n\n    let mut select = Select::new();\n    select.add(&rx_list);\n    select.add(&rx_array);\n    select.add(&rx_one);\n\n    tx_list.send(1).expect(\"send\");\n    tx_array.send(2).expect(\"send\");\n    tx_one.send(3).expect(\"send\");\n\n    let mut results = Vec::new();\n    for _ in 0..3 {\n        let res = select.select().unwrap();\n        if res == rx_list {\n            results.push(rx_list.read_select(res).unwrap());\n        } else if res == rx_array {\n            results.push(rx_array.read_select(res).unwrap());\n        } else if res == rx_one {\n            results.push(rx_one.read_select(res).unwrap());\n        } else {\n            panic!(\"Unexpected token\");\n        }\n    }\n\n    results.sort();\n    assert_eq!(results, vec![1, 2, 3]);\n}\n\n#[logfn]\n#[rstest]\n#[case(1)]\n#[case(5)]\n#[case(10)]\nfn test_select_pressure(setup_log: (), #[case] producers: usize) {\n    #[cfg(miri)]\n    {\n        if producers > 5 {\n            println!(\"skip\");\n            return;\n        }\n    }\n    let (tx_list, rx_list) = mpmc::unbounded_blocking::<usize>();\n    let (tx_array, rx_array) = mpmc::bounded_blocking::<usize>(100);\n    let (tx_one, rx_one): (MTx<mpmc::One<usize>>, MRx<mpmc::One<usize>>) =\n        mpmc::build(mpmc::One::new());\n    let (tx_mpsc_list, rx_mpsc_list) = mpsc::unbounded_blocking::<usize>();\n    let (tx_mpsc_array, rx_mpsc_array) = mpsc::bounded_blocking::<usize>(100);\n    let (tx_mpsc_one, rx_mpsc_one): (MTx<mpsc::One<usize>>, Rx<mpsc::One<usize>>) = mpsc::new();\n\n    let mut select = Select::new();\n    select.add(&rx_list);\n    select.add(&rx_array);\n    select.add(&rx_one);\n    select.add(&rx_mpsc_list);\n    select.add(&rx_mpsc_array);\n    select.add(&rx_mpsc_one);\n\n    let round = ROUND;\n    let total_messages = round * 6 * producers;\n    let mut handlers = Vec::new();\n\n    for _ in 0..producers {\n        let tx = tx_list.clone();\n        handlers.push(thread::spawn(move || {\n            for i in 0..round {\n                tx.send(i).expect(\"send\");\n            }\n        }));\n\n        let tx = tx_array.clone();\n        handlers.push(thread::spawn(move || {\n            for i in 0..round {\n                tx.send(i).expect(\"send\");\n            }\n        }));\n\n        let tx = tx_one.clone();\n        handlers.push(thread::spawn(move || {\n            for i in 0..round {\n                tx.send(i).expect(\"send\");\n            }\n        }));\n\n        let tx = tx_mpsc_list.clone();\n        handlers.push(thread::spawn(move || {\n            for i in 0..round {\n                tx.send(i).expect(\"send\");\n            }\n        }));\n\n        let tx = tx_mpsc_array.clone();\n        handlers.push(thread::spawn(move || {\n            for i in 0..round {\n                tx.send(i).expect(\"send\");\n            }\n        }));\n\n        let tx = tx_mpsc_one.clone();\n        handlers.push(thread::spawn(move || {\n            for i in 0..round {\n                tx.send(i).expect(\"send\");\n            }\n        }));\n    }\n\n    // Drop original senders to ensure we don't hang if we were counting on close\n    drop(tx_list);\n    drop(tx_array);\n    drop(tx_one);\n    drop(tx_mpsc_list);\n    drop(tx_mpsc_array);\n    drop(tx_mpsc_one);\n\n    let mut count = 0;\n    while count < total_messages {\n        let res = select.select();\n        match res {\n            Ok(token) => {\n                if token == rx_list {\n                    if rx_list.read_select(token).is_ok() {\n                        count += 1;\n                    }\n                } else if token == rx_array {\n                    if rx_array.read_select(token).is_ok() {\n                        count += 1;\n                    }\n                } else if token == rx_one {\n                    if rx_one.read_select(token).is_ok() {\n                        count += 1;\n                    }\n                } else if token == rx_mpsc_list {\n                    if rx_mpsc_list.read_select(token).is_ok() {\n                        count += 1;\n                    }\n                } else if token == rx_mpsc_array {\n                    if rx_mpsc_array.read_select(token).is_ok() {\n                        count += 1;\n                    }\n                } else if token == rx_mpsc_one {\n                    if rx_mpsc_one.read_select(token).is_ok() {\n                        count += 1;\n                    }\n                } else {\n                    panic!(\"unknown token\");\n                }\n            }\n            Err(_) => {\n                break;\n            }\n        }\n    }\n\n    for h in handlers {\n        h.join().unwrap();\n    }\n    assert_eq!(count, total_messages);\n}\n\n#[logfn]\n#[rstest]\nfn test_select_null(setup_log: ()) {\n    let (tx, rx) = mpmc::bounded_blocking::<i32>(10);\n    let (stop_tx, stop_rx) = mpmc::Null::new().new_blocking();\n\n    let mut select = Select::new();\n    select.add(&rx);\n    select.add(&stop_rx);\n\n    let h = thread::spawn(move || {\n        for i in 0..10 {\n            tx.send(i).unwrap();\n        }\n        thread::sleep(Duration::from_millis(50));\n        drop(stop_tx);\n    });\n\n    let mut count = 0;\n    loop {\n        let res = select.select().unwrap();\n        if res == rx {\n            if let Ok(_item) = rx.read_select(res) {\n                assert_eq!(_item, count);\n                count += 1;\n            }\n        } else if res == stop_rx {\n            if stop_rx.read_select(res).is_err() {\n                while let Ok(_item) = rx.try_recv() {\n                    assert_eq!(_item, count);\n                    count += 1;\n                }\n                break;\n            } else {\n                unreachable!();\n            }\n        }\n    }\n    h.join().unwrap();\n    assert_eq!(count, 10);\n}\n\n#[logfn]\n#[rstest]\nfn test_select_pressure_concurrent(setup_log: ()) {\n    let (tx_list, rx_list) = mpmc::unbounded_blocking::<i32>();\n    let (tx_array, rx_array) = mpmc::bounded_blocking::<i32>(100);\n    let mut th_recv = Vec::new();\n    for _ in 0..2 {\n        let rx_list_clone = rx_list.clone();\n        let rx_array_clone = rx_array.clone();\n        th_recv.push(thread::spawn(move || {\n            let mut select = Select::new();\n            select.add(&rx_list_clone);\n            select.add(&rx_array_clone);\n            let mut local_sum: usize = 0;\n            loop {\n                match select.select() {\n                    Ok(res) => {\n                        if res == rx_list_clone {\n                            if rx_list_clone.read_select(res).is_err() {\n                                select.remove(&rx_list_clone);\n                            } else {\n                                local_sum += 1;\n                            }\n                        } else if res == rx_array_clone {\n                            if rx_array_clone.read_select(res).is_err() {\n                                select.remove(&rx_array_clone);\n                            } else {\n                                local_sum += 1;\n                            }\n                        } else {\n                            unreachable!();\n                        }\n                    }\n                    Err(_) => break,\n                }\n            }\n            local_sum\n        }));\n    }\n    let mut th_send = Vec::new();\n    for _ in 0..2 {\n        let tx_list_clone = tx_list.clone();\n        let tx_array_clone = tx_array.clone();\n        th_send.push(thread::spawn(move || {\n            for i in 0..ROUND {\n                tx_list_clone.send(i as i32).expect(\"send\");\n            }\n        }));\n        th_send.push(thread::spawn(move || {\n            for i in 0..ROUND {\n                tx_array_clone.send((i + ROUND) as i32).expect(\"send\");\n            }\n        }));\n    }\n    drop(tx_list);\n    drop(tx_array);\n    for th in th_send {\n        let _ = th.join();\n    }\n    let mut total_sum = 0;\n    for th in th_recv {\n        total_sum += th.join().unwrap();\n    }\n    assert_eq!(total_sum, 4 * ROUND);\n}\n\n#[logfn]\n#[rstest]\nfn test_multiplex_basic(setup_log: ()) {\n    let mut mp = Multiplex::<mpsc::Array<i32>>::new();\n    let tx1: MTx<_> = mp.bounded_tx(10);\n    let tx2: MTx<_> = mp.bounded_tx(10);\n\n    // Send values from different threads\n    let h1 = thread::spawn(move || {\n        tx1.send(1).unwrap();\n    });\n    let h2 = thread::spawn(move || {\n        tx2.send(2).unwrap();\n    });\n\n    h1.join().unwrap();\n    h2.join().unwrap();\n\n    // Collect received values\n    let mut received = Vec::new();\n    for _ in 0..2 {\n        let val = mp.recv().unwrap();\n        received.push(val);\n    }\n\n    // Verify we received both values (order may vary due to round-robin selection)\n    assert!(received.contains(&1));\n    assert!(received.contains(&2));\n    assert_eq!(received.len(), 2);\n}\n\n#[logfn]\n#[rstest]\nfn test_multiplex_timeout(setup_log: ()) {\n    let mut mp = Multiplex::<mpmc::Array<i32>>::new();\n    let _tx: MTx<_> = mp.bounded_tx(10);\n    let result = mp.recv_timeout(Duration::from_millis(10));\n    assert_eq!(result, Err(RecvTimeoutError::Timeout));\n}\n\n#[logfn]\n#[rstest]\nfn test_multiplex_try_recv(setup_log: ()) {\n    let mut mp = Multiplex::<mpmc::Array<i32>>::new();\n    let tx: MTx<_> = mp.bounded_tx(10);\n    assert_eq!(mp.try_recv(), Err(TryRecvError::Empty));\n    tx.send(42).unwrap();\n    assert_eq!(mp.try_recv(), Ok(42));\n    assert_eq!(mp.try_recv(), Err(TryRecvError::Empty));\n}\n\n#[logfn]\n#[rstest]\nfn test_multiplex_basic_array_blocking(setup_log: ()) {\n    let mut mp = Multiplex::<mpsc::Array<i32>>::new();\n    let tx1: MTx<_> = mp.bounded_tx(10);\n    let tx2: MTx<_> = mp.bounded_tx(10);\n    let tx3: MTx<_> = mp.bounded_tx(10);\n\n    let h1 = thread::spawn(move || {\n        thread::sleep(Duration::from_millis(50));\n        tx1.send(10).expect(\"send\");\n    });\n    let h2 = thread::spawn(move || {\n        thread::sleep(Duration::from_millis(100));\n        tx2.send(20).expect(\"send\");\n    });\n    let h3 = thread::spawn(move || {\n        thread::sleep(Duration::from_millis(25));\n        tx3.send(30).expect(\"send\");\n    });\n    let mut received_values = Vec::new();\n    for _ in 0..3 {\n        received_values.push(mp.recv().unwrap());\n    }\n    received_values.sort();\n    assert_eq!(received_values, vec![10, 20, 30]);\n    h1.join().unwrap();\n    h2.join().unwrap();\n    h3.join().unwrap();\n}\n\n#[logfn]\n#[rstest]\nfn test_multiplex_basic_list_blocking(setup_log: ()) {\n    let mut mp = Multiplex::<mpsc::List<usize>>::new();\n    let tx1: MTx<Mux<mpsc::List<usize>>> = mp.new_tx();\n    let tx2: MTx<Mux<mpsc::List<usize>>> = mp.new_tx();\n\n    let round = {\n        #[cfg(miri)]\n        {\n            99\n        }\n        #[cfg(not(miri))]\n        {\n            999\n        }\n    };\n\n    let h1 = thread::spawn(move || {\n        for i in 0..round {\n            tx1.send(1000 + i).expect(\"send\");\n        }\n    });\n    let h2 = thread::spawn(move || {\n        for i in 0..round {\n            tx2.send(2000 + i).expect(\"send\");\n        }\n    });\n    let mut received_values = Vec::with_capacity(round * 2);\n    for _ in 0..(2 * round) {\n        if let Ok(item) = mp.recv() {\n            received_values.push(item);\n        } else {\n            panic!(\"Unexpected early close, count={:?}\", received_values.len());\n        }\n    }\n    received_values.sort();\n    for i in 0..received_values.len() {\n        let item = received_values[i];\n        if item < 2000 {\n            assert_eq!(item, 1000 + i);\n        } else {\n            assert_eq!(item, 2000 + i - round);\n        }\n    }\n\n    h1.join().unwrap();\n    h2.join().unwrap();\n}\n\n#[logfn]\n#[rstest]\nfn test_multiplex_sender_close(setup_log: ()) {\n    let mut mp = Multiplex::<mpsc::Array<i32>>::new();\n    let tx1: MTx<_> = mp.bounded_tx(10);\n    let tx2: MTx<_> = mp.bounded_tx(10);\n\n    tx1.send(1).expect(\"send\");\n    tx2.send(2).expect(\"send\");\n\n    drop(tx1);\n    drop(tx2);\n\n    let mut received = 0;\n    while let Ok(_) = mp.recv() {\n        received += 1;\n    }\n    assert_eq!(received, 2);\n}\n\n#[logfn]\n#[rstest]\n#[case(1, 1)]\n#[case(5, 1)]\n#[case(5, 5)]\nfn test_multiplex_basic_drop_on_sender_blocked(\n    setup_log: (), #[case] producers: usize, #[case] bound: usize,\n) {\n    macro_rules! run_test {\n        ($flavor: path, $tx_t: tt)=>{{\n            let mut mp = Multiplex::<$flavor>::new();\n            println!(\"run_test {:?}\", mp);\n            let mut senders: Vec<$tx_t<Mux<$flavor>>> = Vec::new();\n            for _ in 0..producers {\n                senders.push(mp.bounded_tx(bound));\n            }\n            let results = Arc::new(AtomicUsize::new(0)); // To count how many senders returned disconnected\n                                                         // Fill the channel initially so the first sender blocks\n            for tx in &senders {\n                for i in 0..bound {\n                    tx.send(i).expect(\"send\"); // Fill up the capacity\n                }\n            }\n            let mut handles = Vec::new();\n            let barrier = Arc::new(Barrier::new(producers + 1)); // +1 for the main thread\n            for tx in senders {\n                let barrier_clone = barrier.clone();\n                let results_clone = results.clone();\n                handles.push(thread::spawn(move || {\n                    barrier_clone.wait(); // Wait for all senders to be ready to block\n                    let res = tx.send(100);\n                    if let Err(SendError(_)) = res {\n                        results_clone.fetch_add(1, Ordering::SeqCst);\n                    }\n                }));\n            }\n            barrier.wait(); // Main thread waits for all sender threads to reach the barrier\n                            // Give a moment for threads to potentially block\n            thread::sleep(Duration::from_millis(50));\n            // Drop the multiplexer, which should wake up all blocking senders\n            drop(mp);\n            for handle in handles {\n                handle.join().unwrap();\n            }\n            assert_eq!(results.load(Ordering::SeqCst), producers);\n            println!(\"\");\n        }};\n    }\n    run_test!(spsc::Array<usize>, Tx);\n    run_test!(mpsc::Array<usize>, MTx);\n    run_test!(mpmc::Array<usize>, MTx);\n}\n\n#[logfn]\n#[rstest]\n#[case(1, 1)]\n#[case(1, 10)]\n#[case(20, 1)]\n#[case(10, 10)]\n#[case(5, 100)]\nfn test_pressure_multiplex_array(setup_log: (), #[case] producers: usize, #[case] bound: usize) {\n    #[cfg(miri)]\n    {\n        if producers > 5 {\n            println!(\"skip\");\n            return;\n        }\n    }\n    let mut mp = Multiplex::<spsc::Array<usize>>::new();\n    let round = ROUND;\n    let total_messages = round * producers;\n    let mut handlers = Vec::new();\n\n    for _ in 0..producers {\n        let tx: Tx<_> = mp.bounded_tx(bound);\n        handlers.push(thread::spawn(move || {\n            for i in 0..round {\n                tx.send(i).expect(\"send\");\n            }\n        }));\n    }\n\n    let mut count = 0;\n    while count < total_messages {\n        match mp.recv() {\n            Ok(_) => count += 1,\n            Err(_) => break,\n        }\n    }\n    for h in handlers {\n        h.join().unwrap();\n    }\n    assert_eq!(count, total_messages);\n}\n\n#[logfn]\n#[rstest]\n#[case(1, 1)]\n#[case(1, 10)]\n#[case(20, 1)]\n#[case(10, 10)]\n#[case(5, 20)]\nfn test_pressure_multiplex_array_mp(setup_log: (), #[case] producers: usize, #[case] bound: usize) {\n    #[cfg(miri)]\n    {\n        if producers > 5 {\n            println!(\"skip\");\n            return;\n        }\n    }\n    macro_rules! run_test {\n        ($mp: expr) => {\n            println!(\"run_test {:?}\", $mp);\n            let round = ROUND;\n            let total_messages = round * producers * 4;\n            let mut handlers = Vec::new();\n            for _ in 0..producers {\n                let tx: MTx<_> = $mp.bounded_tx(bound);\n                for _ in 0..4 {\n                    let _tx = tx.clone();\n                    handlers.push(thread::spawn(move || {\n                        for i in 0..round {\n                            _tx.send(i).expect(\"send\");\n                        }\n                    }));\n                }\n            }\n            let mut count = 0;\n            while count < total_messages {\n                match $mp.recv() {\n                    Ok(_) => count += 1,\n                    Err(_) => break,\n                }\n            }\n            for h in handlers {\n                h.join().unwrap();\n            }\n            assert_eq!(count, total_messages);\n        };\n    }\n    let mut mp = Multiplex::<mpsc::Array<usize>>::new();\n    run_test!(mp);\n    let mut mp = Multiplex::<mpmc::Array<usize>>::new();\n    run_test!(mp);\n}\n\n#[logfn]\n#[rstest]\n#[case(1)]\n#[case(5)]\n#[case(20)]\nfn test_pressure_multiplex_list(setup_log: (), #[case] producers: usize) {\n    #[cfg(miri)]\n    {\n        if producers > 5 {\n            println!(\"skip\");\n            return;\n        }\n    }\n    macro_rules! run_test {\n        ($mp: expr, $tx_c: tt) => {\n            println!(\"run_test {:?}\", $mp);\n            let round = ROUND;\n            let total_messages = round * producers;\n            let mut handlers = Vec::new();\n            for _ in 0..producers {\n                let tx: $tx_c<_> = $mp.new_tx();\n                handlers.push(thread::spawn(move || {\n                    for i in 0..round {\n                        tx.send(i).expect(\"send\");\n                    }\n                }));\n            }\n\n            let mut count = 0;\n            while count < total_messages {\n                match $mp.recv() {\n                    Ok(_) => count += 1,\n                    Err(_) => break,\n                }\n            }\n\n            for h in handlers {\n                h.join().unwrap();\n            }\n            assert_eq!(count, total_messages);\n        };\n    }\n    let mut mp = Multiplex::<spsc::List<usize>>::new();\n    run_test!(mp, Tx);\n    let mut mp = Multiplex::<mpsc::List<usize>>::new();\n    run_test!(mp, MTx);\n    let mut mp = Multiplex::<mpmc::List<usize>>::new();\n    run_test!(mp, MTx);\n}\n\n#[logfn]\n#[rstest]\nfn test_multiplex_weighted_round_robin(setup_log: ()) {\n    let mut mp = Multiplex::<mpsc::Array<i32>>::new();\n    // Channel 1 with weight 2\n    let tx1: MTx<_> = mp.bounded_tx_with_weight(10, 2);\n    // Channel 2 with weight 2\n    let tx2: MTx<_> = mp.bounded_tx_with_weight(10, 2);\n\n    // Send data\n    for i in 0..6 {\n        tx1.send(10 + i).unwrap(); // 10, 11, 12, 13, 14, 15\n        tx2.send(20 + i).unwrap(); // 20, 21, 22, 23, 24, 25\n    }\n\n    let mut received = Vec::new();\n    for _ in 0..12 {\n        received.push(mp.recv().unwrap());\n    }\n\n    // Expected sequence:\n    // tx1 (10), tx1 (11), tx1 (12) -> weight exhausted (actually weight+1 logic), switch to tx2\n    // tx2 (20), tx2 (21), tx2 (22) -> weight exhausted, switch to tx1\n    // tx1 (13), tx1 (14), tx1 (15)\n    // tx2 (23), tx2 (24), tx2 (25)\n\n    let expected = vec![10, 11, 20, 21, 12, 13, 22, 23, 14, 15, 24, 25];\n\n    println!(\"Received: {:?}\", received);\n    assert_eq!(received, expected);\n}\n\n#[logfn]\n#[rstest]\nfn test_multiplex_weighted_skip_empty(setup_log: ()) {\n    // Test that if weight is not exhausted but channel is empty, we skips to next\n    let mut mp = Multiplex::<mpsc::Array<i32>>::new();\n    let tx1: MTx<_> = mp.bounded_tx_with_weight(10, 5); // High weight\n    let tx2: MTx<_> = mp.bounded_tx_with_weight(10, 2);\n\n    tx1.send(1).unwrap();\n    tx2.send(2).unwrap();\n    tx2.send(3).unwrap();\n\n    // 1. Recv from tx1 (left=4). Empty now.\n    assert_eq!(mp.recv().unwrap(), 1);\n\n    // 2. Recv. tx1 is empty (but left=4). Logic should skip tx1 and go to tx2.\n    // tx2 has item (2). Return 2.\n    assert_eq!(mp.recv().unwrap(), 2);\n\n    // 3. Recv. tx2 is current (left=1).\n    assert_eq!(mp.recv().unwrap(), 3);\n}\n"
  },
  {
    "path": "test-suite/src/test_type_switch.rs",
    "content": "use crate::*;\nuse captains_log::{logfn, *};\nuse crossfire::flavor::Flavor;\nuse crossfire::*;\nuse rstest::*;\nuse std::time::Duration;\n\n#[fixture]\nfn setup_log() {\n    let _ = recipe::env_logger(\"LOG_FILE\", \"LOG_LEVEL\").build().expect(\"log setup\");\n}\n\n// Macro to wrap tests with a 5-second timeout\nmacro_rules! runtime_block_on_with_timeout {\n    ($async_block:expr) => {{\n        runtime_block_on!(async move {\n            timeout(Duration::from_secs(5), $async_block)\n                .await\n                .expect(\"Test timed out after 5 seconds\")\n        })\n    }};\n}\n\n// Test async-to-blocking receiver switching for bounded channels with messages in buffer\n#[logfn]\n#[rstest]\n#[case(spsc::bounded_async(5))] // Small buffer to create backpressure\n#[case(mpsc::bounded_async(5))]\nfn test_bounded_async_with_sync_receiver_switch_buffered<\n    F: Flavor<Item = usize> + 'static,\n    T: AsyncTxTrait<usize>,\n>(\n    setup_log: (), #[case] channel: (T, AsyncRx<F>),\n) {\n    let (tx, rx) = channel;\n    let total_messages = 20; // More messages than buffer capacity\n    let async_consumed = 4; // Leave messages in buffer during switch\n\n    runtime_block_on_with_timeout!(async move {\n        // Spawn async sender task - will block when buffer fills\n        let sender_task = async_spawn!(async move {\n            for i in 0..total_messages {\n                trace!(\"Async sender sending message {}\", i);\n                tx.send(i).await.expect(\"Failed to send message\");\n            }\n            trace!(\"Async sender completed all {} messages\", total_messages);\n        });\n\n        // Consume some messages with async receiver (in async task)\n        let receiver_task = async_spawn!(async move {\n            let mut async_received = Vec::new();\n            for _ in 0..async_consumed {\n                match rx.recv().await {\n                    Ok(value) => {\n                        trace!(\"Async receiver got message: {}\", value);\n                        async_received.push(value);\n                    }\n                    Err(e) => {\n                        panic!(\"Failed to receive message: {:?}\", e);\n                    }\n                }\n            }\n            trace!(\"Async receiver consumed {} messages\", async_received.len());\n            (rx, async_received)\n        });\n\n        // Get the receiver back after partial consumption\n        let (rx, async_received) = async_join_result!(receiver_task);\n\n        // CRITICAL: Convert to blocking receiver while messages are still in buffer AND sender is waiting\n        let blocking_rx: Rx<F> = rx.into();\n\n        // Continue receiving with blocking receiver in a thread\n        let remaining_messages = total_messages - async_consumed;\n        let sync_th = std::thread::spawn(move || {\n            let mut sync_received = Vec::new();\n\n            while let Ok(value) = blocking_rx.recv() {\n                trace!(\"Sync receiver got message: {}\", value);\n                sync_received.push(value);\n            }\n\n            trace!(\"Sync receiver consumed {} messages from buffer\", sync_received.len());\n            sync_received\n        });\n\n        // Wait for sender to complete\n        let _ = sender_task.await;\n\n        let sync_received = sync_th.join().expect(\"Sync receiver thread panicked\");\n\n        // Verify all messages were received\n        assert_eq!(async_received.len(), async_consumed);\n        assert_eq!(sync_received.len(), remaining_messages);\n\n        let mut all_received = async_received;\n        all_received.extend(sync_received);\n        assert_eq!(all_received.len(), total_messages);\n\n        for i in 0..total_messages {\n            assert!(all_received.contains(&i), \"Missing value: {}\", i);\n        }\n\n        trace!(\n            \"Successfully switched bounded channel from async to sync receiver with backpressure\"\n        );\n    });\n}\n\n// Test async-to-blocking receiver switching for MPMC bounded channels with messages in buffer\n#[logfn]\n#[rstest]\n#[case(mpmc::bounded_async(5))] // Small buffer to create backpressure\nfn test_mpmc_bounded_async_with_sync_receiver_switch_buffered<F: Flavor<Item = usize> + 'static>(\n    setup_log: (), #[case] channel: (MAsyncTx<F>, MAsyncRx<F>),\n) {\n    let (tx, rx) = channel;\n    let total_messages = 20; // More messages than buffer capacity\n    let async_consumed = 4; // Consume some messages before switching\n\n    runtime_block_on_with_timeout!(async move {\n        // Send all messages first to fill buffer (async sender in task)\n        let sender_task = async_spawn!(async move {\n            for i in 0..total_messages {\n                tx.send(i).await.expect(\"Failed to send message\");\n                trace!(\"Async sender sent message: {}\", i);\n            }\n            trace!(\"Async sender completed all {} messages\", total_messages);\n        });\n\n        // Consume some messages with async receiver (in async task)\n        let receiver_task = async_spawn!(async move {\n            let mut async_received = Vec::new();\n            for _ in 0..async_consumed {\n                match rx.recv().await {\n                    Ok(value) => {\n                        trace!(\"Async receiver got message: {}\", value);\n                        async_received.push(value);\n                    }\n                    Err(e) => {\n                        panic!(\"Failed to receive message with async receiver: {:?}\", e);\n                    }\n                }\n            }\n            trace!(\"Async receiver consumed {} messages\", async_received.len());\n            (rx, async_received)\n        });\n\n        // Get the receiver back after partial consumption\n        let (rx, async_received) = async_join_result!(receiver_task);\n\n        // Convert to blocking receiver while messages are still in buffer\n        let sync_rx: MRx<F> = rx.into();\n\n        // Consume remaining messages with blocking receiver in thread\n        let remaining_messages = total_messages - async_consumed;\n        let sync_th = std::thread::spawn(move || {\n            let mut sync_received = Vec::new();\n            while let Ok(value) = sync_rx.recv() {\n                trace!(\"Sync receiver got message: {}\", value);\n                sync_received.push(value);\n            }\n            trace!(\"Sync receiver consumed {} messages from buffer\", sync_received.len());\n            sync_received\n        });\n\n        // Wait for sender to complete\n        let _ = sender_task.await;\n        let sync_received = sync_th.join().expect(\"Sync receiver thread panicked\");\n\n        // Verify all messages were received\n        assert_eq!(async_received.len(), async_consumed);\n        assert_eq!(sync_received.len(), remaining_messages);\n\n        let mut all_received = async_received;\n        all_received.extend(sync_received);\n        assert_eq!(all_received.len(), total_messages);\n\n        for i in 0..total_messages {\n            assert!(all_received.contains(&i), \"Missing value: {}\", i);\n        }\n\n        trace!(\n            \"Successfully switched MPMC bounded channel from async to sync receiver with backpressure\"\n        );\n    });\n}\n\n// Test blocking-to-async sender switching for bounded channels\n#[logfn]\n#[rstest]\n#[case(spsc::bounded_blocking(5))] // Small buffer for backpressure\nfn test_spsc_bounded_blocking_with_async_sender_switch<F: Flavor<Item = usize> + 'static>(\n    setup_log: (), #[case] channel: (Tx<F>, Rx<F>),\n) {\n    let (tx, rx) = channel;\n    let total_messages = 20;\n    let sync_sent = 4; // Fill buffer, then switch while sender would block\n\n    runtime_block_on_with_timeout!(async move {\n        // Start blocking receiver in a thread\n        let receiver_handle = std::thread::spawn(move || {\n            let mut all_received = Vec::new();\n            while let Ok(value) = rx.recv() {\n                trace!(\"Blocking receiver got message: {}\", value);\n                all_received.push(value);\n            }\n            trace!(\"Blocking receiver completed\");\n            all_received\n        });\n\n        // Send messages with blocking sender in a thread (will block when buffer fills)\n        let sender_handle = std::thread::spawn(move || {\n            for i in 0..sync_sent {\n                trace!(\"Blocking sender sending message {}\", i);\n                tx.send(i).expect(\"Failed to send message\");\n            }\n            trace!(\"Blocking sender sent {} messages\", sync_sent);\n            tx\n        });\n\n        // Get the sender back and convert to async\n        let tx = sender_handle.join().expect(\"Sender thread panicked\");\n\n        // CRITICAL: Convert to async sender while buffer has backpressure\n        let async_tx: AsyncTx<F> = tx.into();\n\n        // Send remaining messages with async sender in task\n        let remaining_messages = total_messages - sync_sent;\n        let async_sender_task = async_spawn!(async move {\n            for i in sync_sent..total_messages {\n                trace!(\"Async sender sending message {}\", i);\n                async_tx.send(i).await.expect(\"Failed to send message\");\n            }\n            trace!(\"Async sender sent {} more messages\", remaining_messages);\n        });\n        // Wait for async sender to complete\n        let _ = async_sender_task.await;\n        // Get final results\n        let all_received = receiver_handle.join().expect(\"Final receiver thread panicked\");\n\n        // Verify all messages were received\n        assert_eq!(all_received.len(), total_messages);\n        for i in 0..total_messages {\n            assert!(all_received.contains(&i), \"Missing value: {}\", i);\n        }\n\n        trace!(\"Successfully switched from blocking to async sender with backpressure\");\n    });\n}\n\n// Test blocking-to-async sender switching for multi-producer bounded channels\n#[logfn]\n#[rstest]\n#[case(mpsc::bounded_blocking(5))] // Buffer < 12 total messages\nfn test_mpsc_bounded_blocking_with_async_sender_switch<F: Flavor<Item = usize> + 'static>(\n    setup_log: (), #[case] channel: (MTx<F>, Rx<F>),\n) {\n    let (tx, rx) = channel;\n    let total_messages = 20;\n    let sync_sent = 4; // Fill buffer before switching\n\n    runtime_block_on_with_timeout!(async move {\n        // Send messages with blocking multi-sender in a thread\n        let sender_handle = std::thread::spawn(move || {\n            for i in 0..sync_sent {\n                tx.send(i).expect(\"Failed to send message\");\n            }\n            trace!(\"Blocking MTx sent {} messages, buffer has messages\", sync_sent);\n            tx\n        });\n\n        // Get the sender back\n        let tx = sender_handle.join().expect(\"Sender thread panicked\");\n\n        // CRITICAL: Convert to async multi-sender while messages are in buffer\n        let async_tx: MAsyncTx<F> = tx.into();\n\n        // Send remaining messages with async multi-sender in a task\n        let async_sender_task = async_spawn!(async move {\n            let remaining_messages = total_messages - sync_sent;\n            for i in sync_sent..total_messages {\n                async_tx.send(i).await.expect(\"Failed to send message\");\n            }\n            trace!(\"Async MAsyncTx sent {} more messages\", remaining_messages);\n        });\n\n        // Receive all messages with blocking receiver in a thread\n        let receiver_handle = std::thread::spawn(move || {\n            let mut all_received = Vec::new();\n            while let Ok(value) = rx.recv() {\n                all_received.push(value);\n            }\n\n            all_received\n        });\n\n        // Wait for async sender to complete\n        let _ = async_sender_task.await;\n\n        let all_received = receiver_handle.join().expect(\"Receiver thread panicked\");\n\n        // Verify all messages were received\n        assert_eq!(all_received.len(), total_messages);\n        for i in 0..total_messages {\n            assert!(all_received.contains(&i), \"Missing value: {}\", i);\n        }\n\n        trace!(\"Successfully switched from blocking MTx to async MAsyncTx with buffered messages\");\n    });\n}\n\n// Test blocking-to-async sender switching for MPMC bounded channels\n#[logfn]\n#[rstest]\n#[case(mpmc::bounded_blocking(5))] // Buffer < 12 total messages\nfn test_mpmc_bounded_blocking_with_async_sender_switch<F: Flavor<Item = usize> + 'static>(\n    setup_log: (), #[case] channel: (MTx<F>, MRx<F>),\n) {\n    let (tx, rx) = channel;\n    let total_messages = 20;\n    let sync_sent = 4; // Fill buffer before switching\n\n    runtime_block_on_with_timeout!(async move {\n        // Send messages with blocking multi-sender in a thread\n        let sender_handle = std::thread::spawn(move || {\n            for i in 0..sync_sent {\n                tx.send(i).expect(\"Failed to send message\");\n            }\n            trace!(\"Blocking MTx sent {} messages, buffer has messages\", sync_sent);\n            tx\n        });\n\n        // Get the sender back\n        let tx = sender_handle.join().expect(\"Sender thread panicked\");\n\n        // CRITICAL: Convert to async multi-sender while messages are in buffer\n        let async_tx: MAsyncTx<F> = tx.into();\n\n        // Send remaining messages with async multi-sender in a task\n        let async_sender_task = async_spawn!(async move {\n            let remaining_messages = total_messages - sync_sent;\n            for i in sync_sent..total_messages {\n                async_tx.send(i).await.expect(\"Failed to send message\");\n            }\n            trace!(\"Async MAsyncTx sent {} more messages\", remaining_messages);\n        });\n\n        // Receive all messages with blocking receiver in a thread\n        let receiver_handle = std::thread::spawn(move || {\n            let mut all_received = Vec::new();\n            while let Ok(value) = rx.recv() {\n                all_received.push(value);\n            }\n\n            all_received\n        });\n\n        // Wait for async sender to complete\n        let _ = async_sender_task.await;\n        let all_received = receiver_handle.join().expect(\"Receiver thread panicked\");\n\n        // Verify all messages were received\n        assert_eq!(all_received.len(), total_messages);\n        for i in 0..total_messages {\n            assert!(all_received.contains(&i), \"Missing value: {}\", i);\n        }\n\n        trace!(\"Successfully switched from blocking MTx to async MAsyncTx with buffered messages for MPMC\");\n    });\n}\n\n// Test blocking-to-async receiver switching for bounded channels\n#[logfn]\n#[rstest]\n#[case(spsc::bounded_blocking(5))] // Buffer < 12 total messages\nfn test_spsc_bounded_blocking_with_async_receiver_switch<F: Flavor<Item = usize> + 'static>(\n    setup_log: (), #[case] channel: (Tx<F>, Rx<F>),\n) {\n    let (tx, rx) = channel;\n    let total_messages = 20;\n    let sync_consumed = 4; // Leave most messages in buffer\n\n    runtime_block_on_with_timeout!(async move {\n        // Send all messages in a thread (sync sender)\n        let sender_handle = std::thread::spawn(move || {\n            for i in 0..total_messages {\n                tx.send(i).expect(\"Failed to send message\");\n            }\n            trace!(\"Sent {} messages to buffer\", total_messages);\n        });\n\n        // Start receiver in a thread to consume some messages\n        let receiver_handle = std::thread::spawn(move || {\n            let mut sync_received = Vec::new();\n            for _ in 0..sync_consumed {\n                sync_received.push(rx.recv().expect(\"Failed to receive message\"));\n            }\n            trace!(\n                \"Blocking receiver consumed {} messages, {} remain in buffer\",\n                sync_received.len(),\n                total_messages - sync_consumed\n            );\n            (rx, sync_received)\n        });\n\n        // Join receiver first, then sender\n        let (rx, sync_received) = receiver_handle.join().expect(\"Receiver thread panicked\");\n\n        // CRITICAL: Convert to async receiver while messages are still in buffer\n        let async_rx: AsyncRx<F> = rx.into();\n\n        // Consume remaining messages with async receiver in a task\n        let async_receiver_task = async_spawn!(async move {\n            let mut async_received = Vec::new();\n\n            while let Ok(value) = async_rx.recv().await {\n                async_received.push(value);\n            }\n\n            trace!(\"Async receiver consumed {} messages from buffer\", async_received.len());\n            async_received\n        });\n\n        let async_received = async_join_result!(async_receiver_task);\n        sender_handle.join().expect(\"Sender thread panicked\");\n\n        // Verify all messages were received\n        let remaining_messages = total_messages - sync_consumed;\n        assert_eq!(sync_received.len(), sync_consumed);\n        assert_eq!(async_received.len(), remaining_messages);\n\n        let mut all_received = sync_received;\n        all_received.extend(async_received);\n        assert_eq!(all_received.len(), total_messages);\n\n        for i in 0..total_messages {\n            assert!(all_received.contains(&i), \"Missing value: {}\", i);\n        }\n\n        trace!(\"Successfully switched from blocking to async receiver with buffered messages\");\n    });\n}\n\n// Test blocking-to-async receiver switching for multi-producer bounded channels\n#[logfn]\n#[rstest]\n#[case(mpsc::bounded_blocking(5))] // Buffer < 12 total messages\nfn test_mpsc_bounded_blocking_with_async_receiver_switch<F: Flavor<Item = usize> + 'static>(\n    setup_log: (), #[case] channel: (MTx<F>, Rx<F>),\n) {\n    let (tx, rx) = channel;\n    let total_messages = 20;\n    let sync_consumed = 4; // Leave most messages in buffer\n\n    runtime_block_on_with_timeout!(async move {\n        // Start sender in a thread (sync sender)\n        let sender_handle = std::thread::spawn(move || {\n            for i in 0..total_messages {\n                tx.send(i).expect(\"Failed to send message\");\n            }\n            trace!(\"Sent {} messages to buffer\", total_messages);\n        });\n\n        // Start receiver in a thread to consume some messages\n        let receiver_handle = std::thread::spawn(move || {\n            let mut sync_received = Vec::new();\n            for _ in 0..sync_consumed {\n                sync_received.push(rx.recv().expect(\"Failed to receive message\"));\n            }\n            trace!(\n                \"Blocking receiver consumed {} messages, {} remain in buffer\",\n                sync_received.len(),\n                total_messages - sync_consumed\n            );\n            (rx, sync_received)\n        });\n\n        // Join receiver first, then sender\n        let (rx, sync_received) = receiver_handle.join().expect(\"Receiver thread panicked\");\n\n        // CRITICAL: Convert to async receiver while messages are still in buffer\n        let async_rx: AsyncRx<F> = rx.into();\n\n        // Consume remaining messages with async receiver in a task\n        let async_receiver_task = async_spawn!(async move {\n            let mut async_received = Vec::new();\n\n            while let Ok(value) = async_rx.recv().await {\n                async_received.push(value);\n            }\n\n            trace!(\"Async receiver consumed {} messages from buffer\", async_received.len());\n            async_received\n        });\n\n        let async_received = async_join_result!(async_receiver_task);\n        sender_handle.join().expect(\"Sender thread panicked\");\n\n        // Verify all messages were received\n        let remaining_messages = total_messages - sync_consumed;\n        assert_eq!(sync_received.len(), sync_consumed);\n        assert_eq!(async_received.len(), remaining_messages);\n\n        let mut all_received = sync_received;\n        all_received.extend(async_received);\n        assert_eq!(all_received.len(), total_messages);\n\n        for i in 0..total_messages {\n            assert!(all_received.contains(&i), \"Missing value: {}\", i);\n        }\n\n        trace!(\"Successfully switched from blocking to async receiver with buffered messages\");\n    });\n}\n\n// Test blocking-to-async receiver switching for MPMC bounded channels\n#[logfn]\n#[rstest]\n#[case(mpmc::bounded_blocking(5))] // Buffer < 12 total messages\nfn test_mpmc_bounded_blocking_with_async_receiver_switch<F: Flavor<Item = usize> + 'static>(\n    setup_log: (), #[case] channel: (MTx<F>, MRx<F>),\n) {\n    let (tx, rx) = channel;\n    let total_messages = 20;\n    let sync_consumed = 4; // Leave most messages in buffer\n\n    runtime_block_on_with_timeout!(async move {\n        // Send all messages in a thread (sync sender)\n        let sender_handle = std::thread::spawn(move || {\n            for i in 0..total_messages {\n                tx.send(i).expect(\"Failed to send message\");\n            }\n            trace!(\"Sent {} messages to buffer\", total_messages);\n        });\n\n        // Start receiver in a thread to consume some messages\n        let receiver_handle = std::thread::spawn(move || {\n            let mut sync_received = Vec::new();\n            for _ in 0..sync_consumed {\n                sync_received.push(rx.recv().expect(\"Failed to receive message\"));\n            }\n            trace!(\n                \"Blocking receiver consumed {} messages, {} remain in buffer\",\n                sync_received.len(),\n                total_messages - sync_consumed\n            );\n            (rx, sync_received)\n        });\n\n        // Join receiver first, then sender\n        let (rx, sync_received) = receiver_handle.join().expect(\"Receiver thread panicked\");\n\n        // CRITICAL: Convert to async receiver while messages are still in buffer\n        let async_rx: MAsyncRx<F> = rx.into();\n\n        // Consume remaining messages with async receiver in a task\n        let async_receiver_task = async_spawn!(async move {\n            let mut async_received = Vec::new();\n            while let Ok(value) = async_rx.recv().await {\n                async_received.push(value);\n            }\n            trace!(\"Async receiver consumed {} remaining messages\", async_received.len());\n            async_received\n        });\n\n        let async_received = async_join_result!(async_receiver_task);\n        sender_handle.join().expect(\"Sender thread panicked\");\n\n        // Verify all messages were received\n        let mut all_received = sync_received;\n        all_received.extend(async_received);\n        assert_eq!(all_received.len(), total_messages);\n\n        for i in 0..total_messages {\n            assert!(all_received.contains(&i), \"Missing value: {}\", i);\n        }\n\n        trace!(\"Successfully switched MPMC from blocking to async receiver with buffered messages\");\n    });\n}\n\n// Test multi-producer sender switching (MTx to MAsyncTx)\n#[logfn]\n#[rstest]\n#[case(mpsc::bounded_blocking(5))] // Buffer < 20 total messages\n#[case(mpmc::bounded_blocking(5))]\nfn test_multi_producer_sender_switch<\n    F: Flavor<Item = usize> + 'static,\n    R: BlockingRxTrait<usize>,\n>(\n    setup_log: (), #[case] channel: (MTx<F>, R),\n) {\n    let (tx, rx) = channel;\n    let total_messages = 20;\n    let sync_sent = 4; // Fill most of buffer before switching\n\n    runtime_block_on_with_timeout!(async move {\n        // Start receiver first to consume messages as they arrive\n        let receiver_handle = std::thread::spawn(move || {\n            let mut all_received = Vec::new();\n            while let Ok(value) = rx.recv() {\n                all_received.push(value);\n            }\n            all_received\n        });\n\n        // Send messages with blocking multi-sender in a thread\n        let sender_handle = std::thread::spawn(move || {\n            for i in 0..sync_sent {\n                tx.send(i).expect(\"Failed to send message\");\n            }\n            trace!(\"Blocking MTx sent {} messages, buffer has messages\", sync_sent);\n            tx\n        });\n\n        // Get the sender back and convert to async\n        let tx = sender_handle.join().expect(\"Sender thread panicked\");\n        let async_tx: MAsyncTx<F> = tx.into();\n\n        // Send remaining messages with async multi-sender in a task\n        let async_sender_task = async_spawn!(async move {\n            for i in sync_sent..total_messages {\n                async_tx.send(i).await.expect(\"Failed to send message\");\n            }\n            trace!(\"Async MAsyncTx sent {} more messages\", total_messages - sync_sent);\n        });\n\n        // Wait for async sender to complete, then join receiver\n        let _ = async_sender_task.await;\n        let all_received = receiver_handle.join().expect(\"Receiver thread panicked\");\n\n        // Verify all messages were received\n        assert_eq!(all_received.len(), total_messages);\n        for i in 0..total_messages {\n            assert!(all_received.contains(&i), \"Missing value: {}\", i);\n        }\n\n        trace!(\"Successfully switched from MTx to MAsyncTx with buffered messages\");\n    });\n}\n\n// Test async-to-blocking sender switching for SPSC bounded channels\n#[logfn]\n#[rstest]\n#[case(spsc::bounded_async(5))] // Small buffer for backpressure\nfn test_spsc_bounded_async_with_blocking_sender_switch<F: Flavor<Item = usize> + 'static>(\n    setup_log: (), #[case] channel: (AsyncTx<F>, AsyncRx<F>),\n) {\n    let (tx, rx) = channel;\n    let total_messages = 10;\n    let async_sent = 4; // Fill buffer before switching\n\n    runtime_block_on_with_timeout!(async move {\n        // Send messages with async sender in a task\n        let sender_task = async_spawn!(async move {\n            for i in 0..async_sent {\n                tx.send(i).await.expect(\"Failed to send message\");\n            }\n            trace!(\"Async sender sent {} messages, buffer has messages\", async_sent);\n            tx\n        });\n\n        // Get the sender back and convert to blocking\n        let tx = async_join_result!(sender_task);\n        let blocking_tx: Tx<F> = tx.into();\n\n        // Send remaining messages with blocking sender in thread\n        let blocking_sender_handle = std::thread::spawn(move || {\n            let remaining_messages = total_messages - async_sent;\n            for i in async_sent..total_messages {\n                blocking_tx.send(i).expect(\"Failed to send message\");\n            }\n            trace!(\"Blocking sender sent {} more messages\", remaining_messages);\n        });\n\n        // Receive all messages with async receiver in a task\n        let receiver_task = async_spawn!(async move {\n            let mut all_received = Vec::new();\n            while let Ok(value) = rx.recv().await {\n                all_received.push(value);\n            }\n            all_received\n        });\n\n        // Wait for both sender and receiver to complete\n        let all_received = async_join_result!(receiver_task);\n        blocking_sender_handle.join().expect(\"Blocking sender thread panicked\");\n\n        // Verify all messages were received\n        assert_eq!(all_received.len(), total_messages);\n        for i in 0..total_messages {\n            assert!(all_received.contains(&i), \"Missing value: {}\", i);\n        }\n\n        trace!(\"Successfully switched SPSC from async to blocking sender with buffered messages\");\n    });\n}\n\n// Test async-to-blocking sender switching for MPSC bounded channels\n#[logfn]\n#[rstest]\n#[case(mpsc::bounded_async(5))] // Small buffer for backpressure\nfn test_mpsc_bounded_async_with_blocking_sender_switch<F: Flavor<Item = usize> + 'static>(\n    setup_log: (), #[case] channel: (MAsyncTx<F>, AsyncRx<F>),\n) {\n    let (tx, rx) = channel;\n    let total_messages = 10;\n    let async_sent = 4; // Fill buffer before switching\n\n    runtime_block_on_with_timeout!(async move {\n        // Send messages with async multi-sender in a task\n        let sender_task = async_spawn!(async move {\n            for i in 0..async_sent {\n                tx.send(i).await.expect(\"Failed to send message\");\n            }\n            trace!(\"Async MAsyncTx sent {} messages, buffer has messages\", async_sent);\n            tx\n        });\n\n        // Get the sender back and convert to blocking\n        let tx = async_join_result!(sender_task);\n        let blocking_tx: MTx<F> = tx.into();\n\n        // Send remaining messages with blocking multi-sender in thread\n        let blocking_sender_handle = std::thread::spawn(move || {\n            let remaining_messages = total_messages - async_sent;\n            for i in async_sent..total_messages {\n                blocking_tx.send(i).expect(\"Failed to send message\");\n            }\n            trace!(\"Blocking MTx sent {} more messages\", remaining_messages);\n        });\n\n        // Receive all messages with async receiver in a task\n        let receiver_task = async_spawn!(async move {\n            let mut all_received = Vec::new();\n            while let Ok(value) = rx.recv().await {\n                all_received.push(value);\n            }\n            all_received\n        });\n\n        // Wait for both sender and receiver to complete\n        let all_received = async_join_result!(receiver_task);\n        blocking_sender_handle.join().expect(\"Blocking sender thread panicked\");\n\n        // Verify all messages were received\n        assert_eq!(all_received.len(), total_messages);\n        for i in 0..total_messages {\n            assert!(all_received.contains(&i), \"Missing value: {}\", i);\n        }\n\n        trace!(\"Successfully switched MPSC from async to blocking sender with buffered messages\");\n    });\n}\n\n// Test async-to-blocking sender switching for MPMC bounded channels\n#[logfn]\n#[rstest]\n#[case(mpmc::bounded_async(5))] // Small buffer for backpressure\nfn test_mpmc_bounded_async_with_blocking_sender_switch<F: Flavor<Item = usize> + 'static>(\n    setup_log: (), #[case] channel: (MAsyncTx<F>, MAsyncRx<F>),\n) {\n    let (tx, rx) = channel;\n    let total_messages = 10;\n    let async_sent = 4; // Fill buffer before switching\n\n    runtime_block_on_with_timeout!(async move {\n        // Send messages with async multi-sender in a task\n        let sender_task = async_spawn!(async move {\n            for i in 0..async_sent {\n                tx.send(i).await.expect(\"Failed to send message\");\n            }\n            trace!(\"Async MAsyncTx sent {} messages, buffer has messages\", async_sent);\n            tx\n        });\n\n        // Get the sender back and convert to blocking\n        let tx = async_join_result!(sender_task);\n        let blocking_tx: MTx<F> = tx.into();\n\n        // Send remaining messages with blocking multi-sender in thread\n        let blocking_sender_handle = std::thread::spawn(move || {\n            let remaining_messages = total_messages - async_sent;\n            for i in async_sent..total_messages {\n                blocking_tx.send(i).expect(\"Failed to send message\");\n            }\n            trace!(\"Blocking MTx sent {} more messages\", remaining_messages);\n        });\n\n        // Receive all messages with async multi-receiver in a task\n        let receiver_task = async_spawn!(async move {\n            let mut all_received = Vec::new();\n            while let Ok(value) = rx.recv().await {\n                all_received.push(value);\n            }\n            all_received\n        });\n\n        // Wait for both sender and receiver to complete\n        let all_received = async_join_result!(receiver_task);\n        blocking_sender_handle.join().expect(\"Blocking sender thread panicked\");\n\n        // Verify all messages were received\n        assert_eq!(all_received.len(), total_messages);\n        for i in 0..total_messages {\n            assert!(all_received.contains(&i), \"Missing value: {}\", i);\n        }\n\n        trace!(\"Successfully switched MPMC from async to blocking sender with buffered messages\");\n    });\n}\n"
  },
  {
    "path": "test-suite/src/test_waitgroup.rs",
    "content": "use crate::*;\nuse crossfire::waitgroup::{WaitGroup, WaitGroupInline};\nuse crossfire::*;\nuse fastrand;\nuse rstest::*;\nuse std::sync::Arc;\nuse std::time::Duration;\n\n#[fixture]\nfn setup_log() {\n    _setup_log();\n    // Seed fastrand for more deterministic testing.\n    fastrand::seed(\n        std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap().as_secs(),\n    );\n}\n\n#[logfn]\n#[rstest]\nfn test_basic_wg_try_wait(setup_log: ()) {\n    let mut wg = WaitGroup::new((), 0);\n    assert_eq!(wg.get_left(), 0);\n    wg.wait(); // should return immediately\n    assert_eq!(wg.try_wait(), Ok(()));\n    // change threshold\n    wg.set_threshold(1);\n    assert_eq!(wg.try_wait(), Ok(()));\n    let guard1 = wg.add_guard();\n    assert_eq!(wg.try_wait(), Ok(()));\n    let guard2 = wg.add_guard();\n    assert_eq!(wg.try_wait(), Err(()));\n    drop(guard2);\n    assert_eq!(wg.try_wait(), Ok(()));\n    // change threshold\n    wg.set_threshold(0);\n    assert_eq!(wg.try_wait(), Err(()));\n    drop(guard1);\n    assert_eq!(wg.try_wait(), Ok(()));\n    assert_eq!(wg.try_wait(), Ok(()));\n}\n\n#[logfn]\n#[rstest]\nfn test_waitgroup_with_state(setup_log: ()) {\n    use std::sync::atomic::{AtomicBool, Ordering};\n    let wg = WaitGroup::new(AtomicBool::new(true), 0);\n    for i in 0..10 {\n        let guard = wg.add_guard();\n        std::thread::spawn(move || {\n            if i == 5 {\n                guard.store(false, Ordering::SeqCst);\n            }\n            drop(guard);\n        });\n    }\n    wg.wait();\n    assert_eq!(wg.load(Ordering::SeqCst), false);\n}\n\n#[logfn]\n#[rstest]\nfn test_basic_wg_timeout_blocking(setup_log: ()) {\n    // Test timeout case\n    let wg = WaitGroup::new((), 0);\n    let _guard = wg.add_guard();\n    assert_eq!(wg.wait_timeout(Duration::from_millis(100)), Err(()));\n    let _wg = WaitGroup::new((), 0);\n    let _guard_parent = _wg.add_guard();\n    // Test drop while guard not finish\n    let th = std::thread::spawn(move || {\n        _wg.wait();\n        std::thread::sleep(Duration::from_secs(1));\n        drop(_guard);\n    });\n    assert!(wg.wait_timeout(Duration::from_millis(10)).is_err());\n    drop(_guard_parent);\n    if wg.get_left() > 0 {\n        println!(\"drop early\");\n        drop(wg);\n    }\n    th.join().expect(\"join\");\n}\n\n#[logfn]\n#[rstest]\nfn test_basic_no_wait_async(setup_log: ()) {\n    runtime_block_on!(async move {\n        let wg = WaitGroup::new((), 0);\n        assert_eq!(wg.get_left(), 0);\n        wg.wait_async().await; // should return immediately\n        assert_eq!(wg.try_wait(), Ok(()));\n    });\n}\n\n#[cfg(feature = \"time\")]\n#[logfn]\n#[rstest]\nfn test_basic_wg_one_guard_async(setup_log: ()) {\n    runtime_block_on!(async move {\n        let wg = WaitGroup::new((), 0);\n        let guard = wg.add_guard();\n        assert_eq!(wg.get_left(), 1);\n        assert_eq!(wg.try_wait(), Err(()));\n\n        let _ = async_spawn!(async move {\n            sleep(Duration::from_millis(100)).await;\n            drop(guard);\n        });\n\n        wg.wait_async().await;\n        assert_eq!(wg.get_left_seqcst(), 0);\n    });\n}\n\n#[cfg(feature = \"time\")]\n#[logfn]\n#[rstest]\nfn test_basic_wg_multi_guards_async(setup_log: ()) {\n    const NUM_GUARDS: usize = 10;\n    runtime_block_on!(async move {\n        let mut wg = WaitGroup::new((), 3);\n        let mut guards = Vec::new();\n        for _ in 0..NUM_GUARDS {\n            guards.push(wg.add_guard());\n        }\n        assert_eq!(wg.get_left(), NUM_GUARDS);\n        // test clone of the WaitGroupGuard\n        let guards1 = guards.clone();\n        assert_eq!(wg.get_left(), NUM_GUARDS * 2);\n        let guards2 = guards;\n        let _ = async_spawn!(async move {\n            sleep(Duration::from_millis(10)).await;\n            drop(guards1);\n        });\n        let _ = async_spawn!(async move {\n            sleep(Duration::from_millis(10)).await;\n            drop(guards2);\n        });\n        wg.wait_async().await;\n        assert!(wg.get_left() <= 3);\n        // change threshold\n        wg.set_threshold(0);\n        wg.wait_async().await;\n        assert_eq!(wg.get_left(), 0);\n    });\n}\n\n#[cfg(feature = \"time\")]\n#[logfn]\n#[rstest]\nfn test_basic_wg_timeout_async(setup_log: ()) {\n    runtime_block_on!(async move {\n        let wg = WaitGroup::new((), 0);\n        let guard = wg.add_guard();\n        let th = async_spawn!(async move {\n            sleep(Duration::from_millis(50)).await;\n            drop(guard);\n        });\n        assert_eq!(wg.wait_async_with_timer(sleep(Duration::from_secs(1))).await, Ok(()));\n        async_join_result!(th);\n\n        #[cfg(feature = \"tokio\")]\n        {\n            let wg_child = WaitGroup::new((), 0);\n            let guard_parent = wg_child.add_guard();\n            let guard = wg.add_guard();\n            let th = async_spawn!(async move {\n                wg_child.wait_async().await;\n                sleep(Duration::from_secs(1)).await;\n                drop(guard);\n                log::info!(\"drop guard\");\n            });\n            assert!(tokio::time::timeout(Duration::from_millis(10), wg.wait_async())\n                .await\n                .is_err());\n            drop(wg);\n            log::info!(\"drop wg\");\n            drop(guard_parent);\n            async_join_result!(th);\n        }\n    });\n}\n\n#[logfn]\n#[rstest]\n#[cfg_attr(miri, ignore)]\nfn test_pressure_wg_blocking_spawn_sleep(setup_log: ()) {\n    let wg = WaitGroup::new((), 0);\n    let mut loop_cnt = 0;\n    for _ in 0..50 {\n        let num_guards = fastrand::u32(1..=10); // Generate between 1 and 10 guards\n        loop_cnt += 1;\n        info!(\"loop_cnt={} threads={}\", loop_cnt, num_guards);\n\n        let mut guards = Vec::new();\n        for _ in 0..num_guards {\n            guards.push(wg.add_guard());\n        }\n        let mut handles = Vec::new();\n        for (i, guard) in guards.into_iter().enumerate() {\n            handles.push(spawn_named_thread(&format!(\"worker-{}\", i), move || {\n                let millis = fastrand::u64(0..=10); // Sleep for 0 to 10 milliseconds\n                std::thread::sleep(Duration::from_millis(millis));\n                drop(guard);\n            }));\n        }\n        wg.wait();\n        assert_eq!(wg.get_left_seqcst(), 0);\n\n        for handle in handles {\n            handle.join().unwrap();\n        }\n    }\n}\n\n#[logfn]\n#[rstest]\n#[case(0, 5)]\n#[case(2, 8)]\n#[case(3, 20)]\n#[case(10, 50)]\nfn test_pressure_wg_async_channel(\n    setup_log: (), #[case] threshold: usize, #[case] num_tasks: usize,\n) {\n    #[cfg(miri)]\n    {\n        if num_tasks > 10 {\n            println!(\"skip\");\n            return;\n        }\n    }\n    runtime_block_on!(async move {\n        let (tx, rx) = mpmc::unbounded_async();\n        let mut wg = WaitGroup::new((), threshold);\n        let mut total_received = 0;\n\n        // Spawn consumer tasks\n        let mut th_s = Vec::new();\n        for _ in 0..num_tasks {\n            let _rx = rx.clone();\n            let th = async_spawn!(async move {\n                let mut count = 0;\n                while let Ok(guard) = _rx.recv().await {\n                    count += 1;\n                    drop(guard);\n                }\n                count\n            });\n            th_s.push(th);\n        }\n        drop(rx);\n\n        for i in 0..ROUND {\n            wg.wait_async().await;\n            assert!(wg.get_left() <= threshold);\n            log::trace!(\"send {i}\");\n            // Publish next batch.\n            for _ in 0..num_tasks {\n                let guard = wg.add_guard();\n                tx.send(guard).expect(\"send\");\n            }\n        }\n        drop(tx);\n        log::info!(\"change threshold\");\n        wg.set_threshold(0);\n        wg.wait_async().await;\n        assert_eq!(wg.get_left(), 0);\n        for th in th_s {\n            total_received += async_join_result!(th);\n        }\n        assert_eq!(num_tasks * ROUND, total_received);\n    });\n}\n\n#[cfg(feature = \"time\")]\n#[logfn]\n#[rstest]\n#[case(0, 5)]\n#[case(2, 4)]\n#[case(3, 20)]\n#[case(10, 50)]\nfn test_pressure_wg_async_channel_sleep(\n    setup_log: (), #[case] threshold: usize, #[case] num_tasks: usize,\n) {\n    let rounds: usize = {\n        #[cfg(miri)]\n        {\n            if num_tasks > 5 {\n                println!(\"skip\");\n                return;\n            }\n            10\n        }\n        #[cfg(not(miri))]\n        100\n    };\n    runtime_block_on!(async move {\n        let (tx, rx) = mpmc::unbounded_async();\n        let mut wg = WaitGroup::new((), threshold);\n        let mut total_received = 0;\n\n        // Spawn consumer tasks\n        let mut th_s = Vec::new();\n        for _ in 0..num_tasks {\n            let _rx = rx.clone();\n            let th = async_spawn!(async move {\n                let mut count = 0;\n                while let Ok(guard) = _rx.recv().await {\n                    count += 1;\n                    // Simulate work\n                    sleep(Duration::from_millis(fastrand::u64(1..=5))).await;\n                    drop(guard);\n                }\n                count\n            });\n            th_s.push(th);\n        }\n        drop(rx);\n\n        for i in 0..rounds {\n            wg.wait_async().await;\n            assert!(wg.get_left() <= threshold);\n            log::trace!(\"send {i}\");\n            // Publish next batch.\n            for _ in 0..num_tasks {\n                let guard = wg.add_guard();\n                tx.send(guard).expect(\"send\");\n            }\n        }\n        drop(tx);\n        log::info!(\"change threshold\");\n        wg.set_threshold(0);\n        wg.wait_async().await;\n        assert_eq!(wg.get_left(), 0);\n        for th in th_s {\n            total_received += async_join_result!(th);\n        }\n        assert_eq!(num_tasks * rounds, total_received);\n    });\n}\n\n#[logfn]\n#[rstest]\n#[case(0, 5)]\n#[case(2, 8)]\n#[case(3, 20)]\n#[case(4, 10)]\nfn test_pressure_wg_blocking_channel(\n    setup_log: (), #[case] threshold: usize, #[case] num_threads: usize,\n) {\n    #[cfg(miri)]\n    {\n        if num_threads > 10 {\n            println!(\"skip\");\n            return;\n        }\n    }\n    runtime_block_on!(async move {\n        let (tx, rx) = mpmc::unbounded_blocking();\n        let mut wg = WaitGroup::new((), threshold);\n        let mut total_received = 0;\n\n        // Spawn consumer tasks\n        let mut th_s = Vec::new();\n        for _ in 0..num_threads {\n            let _rx = rx.clone();\n            let th = std::thread::spawn(move || {\n                let mut count = 0;\n                while let Ok(guard) = _rx.recv() {\n                    count += 1;\n                    drop(guard);\n                }\n                count\n            });\n            th_s.push(th);\n        }\n        drop(rx);\n\n        for i in 0..ROUND {\n            wg.wait();\n            assert!(wg.get_left() <= threshold);\n            log::trace!(\"send {i}\");\n            // Publish next batch.\n            for _ in 0..num_threads {\n                let guard = wg.add_guard();\n                tx.send(guard).expect(\"send\");\n            }\n        }\n        drop(tx);\n        log::info!(\"change threshold\");\n        wg.set_threshold(0);\n        wg.wait();\n        assert_eq!(wg.get_left(), 0);\n        for th in th_s {\n            total_received += th.join().unwrap();\n        }\n        assert_eq!(num_threads * ROUND, total_received);\n    });\n}\n\n#[logfn]\n#[rstest]\nfn test_waitgroup_inline(setup_log: ()) {\n    let wg = Arc::new(WaitGroupInline::<0>::new());\n    assert_eq!(wg.get_left_seqcst(), 0);\n    wg.add_many(1);\n    assert!(wg.try_wait().is_err());\n    let _wg = wg.clone();\n    let th = std::thread::spawn(move || {\n        std::thread::sleep(Duration::from_secs(1));\n        unsafe { _wg.done_many(1) };\n    });\n    unsafe { wg.wait() };\n    th.join().expect(\"join\");\n    assert_eq!(wg.get_left_seqcst(), 0);\n\n    runtime_block_on!(async move {\n        let _wg = wg.clone();\n        wg.add();\n        let th = async_spawn!(async move {\n            #[cfg(feature = \"time\")]\n            {\n                sleep(Duration::from_secs(1)).await;\n            }\n            unsafe { _wg.done() };\n        });\n        unsafe { wg.wait_async().await };\n        async_join_result!(th);\n        assert_eq!(wg.get_left_seqcst(), 0);\n    });\n}\n\n#[test]\n#[should_panic]\nfn test_waitgroup_inline_underflow() {\n    recipe::console_logger(ConsoleTarget::Stdout, Level::Trace).test().build().expect(\"log\");\n    let wg = WaitGroupInline::<0>::new();\n    unsafe { wg.done() };\n}\n"
  }
]