[
  {
    "path": ".github/workflows/ci.yml",
    "content": "name: CI\n\non:\n  push:\n    branches:\n    - main\n  pull_request: {}\n\nenv:\n  RUSTFLAGS: -Dwarnings\n  RUST_BACKTRACE: 1\n  # Change to specific Rust release to pin\n  rust_stable: stable\n  rust_clippy: 1.52.0\n  rust_min: 1.49.0\n\njobs:\n  check:\n    # Run `cargo check` first to ensure that the pushed code at least compiles.\n    runs-on: ubuntu-latest\n    env:\n      RUSTFLAGS: --cfg tokio_unstable -Dwarnings\n    steps:\n    - uses: actions/checkout@master\n    - uses: actions-rs/toolchain@v1\n      with:\n        toolchain: ${{ env.rust_stable }}\n        override: true\n        profile: minimal\n        components: clippy, rustfmt\n    - uses: Swatinem/rust-cache@v1\n    - name: Check\n      uses: actions-rs/cargo@v1\n      with:\n        command: clippy\n        args: --all --all-targets --all-features\n    - name: rustfmt\n      uses: actions-rs/cargo@v1\n      with:\n        command: fmt\n        args: --all -- --check\n\n  check-docs:\n    runs-on: ubuntu-latest\n    env:\n      RUSTDOCFLAGS: -D broken-intra-doc-links --cfg tokio_unstable\n      RUSTFLAGS: --cfg tokio_unstable -Dwarnings\n    steps:\n    - uses: actions/checkout@master\n    - uses: actions-rs/toolchain@v1\n      with:\n        toolchain: ${{ env.rust_stable }}\n        override: true\n        profile: minimal\n    - uses: Swatinem/rust-cache@v1\n    - name: cargo doc\n      run: cargo doc --all-features --no-deps\n\n  cargo-hack:\n    runs-on: ubuntu-latest\n    env:\n      RUSTFLAGS: --cfg tokio_unstable -Dwarnings\n    steps:\n    - uses: actions/checkout@master\n    - uses: actions-rs/toolchain@v1\n      with:\n        toolchain: ${{ env.rust_stable }}\n        override: true\n        profile: minimal\n    - uses: Swatinem/rust-cache@v1\n    - name: Install cargo-hack\n      run: |\n        curl -LsSf https://github.com/taiki-e/cargo-hack/releases/latest/download/cargo-hack-x86_64-unknown-linux-gnu.tar.gz | tar xzf - -C ~/.cargo/bin\n    - name: cargo hack check\n      run: cargo hack check --each-feature --no-dev-deps --all\n\n  test-versions:\n    name: test-version (${{ matrix.name }})\n    needs: check\n    runs-on: ubuntu-latest\n    strategy:\n      matrix:\n        include:\n          - rustflags: \"--cfg tokio_unstable -Dwarnings\"\n            name: \"tokio-unstable\"\n          - rustflags: \"-Dwarnings\"\n            name: \"stable\"\n    env:\n      RUSTFLAGS: ${{ matrix.rustflags }}\n    steps:\n    - uses: actions/checkout@master\n    - uses: actions-rs/toolchain@v1\n      with:\n        toolchain: ${{ env.rust_stable }}\n        override: true\n        profile: minimal\n    - uses: Swatinem/rust-cache@v1\n    - name: Run tests\n      uses: actions-rs/cargo@v1\n      with:\n        command: test\n        args: --all --all-features --all-targets\n\n  test-docs:\n    needs: check\n    runs-on: ubuntu-latest\n    steps:\n    - uses: actions/checkout@master\n    - uses: actions-rs/toolchain@v1\n      with:\n        toolchain: ${{ env.rust_stable }}\n        override: true\n        profile: minimal\n    - uses: Swatinem/rust-cache@v1\n    - name: Run doc tests\n      uses: actions-rs/cargo@v1\n      with:\n        command: test\n        args: --all-features --doc\n      env:\n        RUSTDOCFLAGS: --cfg tokio_unstable\n        RUSTFLAGS: --cfg tokio_unstable -Dwarnings\n\n  semver:\n    name: semver\n    needs: check\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v4\n      - name: Check `tokio-metrics` semver with only default features\n        uses: obi1kenobi/cargo-semver-checks-action@v2\n        with:\n          rust-toolchain: ${{ env.rust_stable }}\n          package: tokio-metrics\n          feature-group: default-features\n      - name: Check `tokio-metrics` semver with all features & tokio_unstable RUSTFLAG\n        uses: obi1kenobi/cargo-semver-checks-action@v2\n        with:\n          rust-toolchain: ${{ env.rust_stable }}\n          package: tokio-metrics\n          feature-group: all-features\n        env:\n          RUSTFLAGS: --cfg tokio_unstable -Dwarnings\n"
  },
  {
    "path": ".github/workflows/release.yml",
    "content": "name: Publish release\n\npermissions:\n  pull-requests: write\n  contents: write\n  id-token: write # Required for OIDC token exchange / trusted publishing\n\non:\n  push:\n    branches:\n      - main\n\njobs:\n  release-plz-release:\n    if: github.repository_owner == 'tokio-rs'\n    name: Release-plz release\n    runs-on: ubuntu-latest\n    environment: release\n\n    steps:\n      - name: Checkout repository\n        uses: actions/checkout@v4\n        with:\n          fetch-depth: 0\n        \n      - name: Install Rust toolchain\n        uses: dtolnay/rust-toolchain@stable\n      - name: Authenticate to crates.io\n        uses: rust-lang/crates-io-auth-action@v1\n        id: auth\n      - name: Run release-plz\n        uses: release-plz/action@v0.5.102\n        with:\n          command: release\n        env:\n          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n          CARGO_REGISTRY_TOKEN: ${{ steps.auth.outputs.token }}\n"
  },
  {
    "path": ".gitignore",
    "content": "/target\nCargo.lock\n.vscode"
  },
  {
    "path": "CHANGELOG.md",
    "content": "# Changelog\n\nAll notable changes to this project will be documented in this file.\n\nThe format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),\nand this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).\n\n## [Unreleased]\n\n## [0.5.0](https://github.com/tokio-rs/tokio-metrics/compare/v0.4.9...v0.5.0) - 2026-04-09\n\n### Breaking Changes\n\n- `RuntimeMetrics::poll_time_histogram` is now a `PollTimeHistogram` instead of `Vec<u64>`. Each bucket carries its duration range alongside the count. ([#121](https://github.com/tokio-rs/tokio-metrics/pull/121))\n\n### Added\n\n- Add `metrique-integration` feature to use `RuntimeMetrics` as a metrique unit of work ([#121](https://github.com/tokio-rs/tokio-metrics/pull/121))\n\n### Other\n\n- Fix doctests failing after Tokio v1.51 ([#122](https://github.com/tokio-rs/tokio-metrics/pull/122))\n\n## [0.4.9](https://github.com/tokio-rs/tokio-metrics/compare/v0.4.8...v0.4.9) - 2026-02-23\n\n### Added\n\n- *(task)* Expose a static-friendly TaskMonitorCore without inner Arc ([#115](https://github.com/tokio-rs/tokio-metrics/pull/115))\n\n### Other\n\n- Fix doctest feature gates and relax rt requirement for task metrics reporter ([#118](https://github.com/tokio-rs/tokio-metrics/pull/118))\n\n## [0.4.8](https://github.com/tokio-rs/tokio-metrics/compare/v0.4.7...v0.4.8) - 2026-02-16\n\n### Added\n\n- publicly export task `TaskIntervals` type ([#112](https://github.com/tokio-rs/tokio-metrics/pull/112))\n\n### Fixed\n\n- use saturating_sub to prevent overflow panics in runtime metrics ([#114](https://github.com/tokio-rs/tokio-metrics/pull/114))\n\n# 0.4.7 (January 15, 2025)\n- docs: fix typos in `TaskMetrics` ([#103])\n- rt: integrate derived metrics with metrics.rs ([#104])\n- fix: indentation in task.rs ([#105])\n- docs: update readme and crate documentation ([#107])\n- rt: make `live_tasks_count` (`num_alive_tasks()`) stable([#108])\n- docs: move `live_tasks_count` to stable metrics in README ([#109])\n\n[#103]: https://github.com/tokio-rs/tokio-metrics/pull/103\n[#104]: https://github.com/tokio-rs/tokio-metrics/pull/104\n[#105]: https://github.com/tokio-rs/tokio-metrics/pull/105\n[#107]: https://github.com/tokio-rs/tokio-metrics/pull/107\n[#108]: https://github.com/tokio-rs/tokio-metrics/pull/108\n[#109]: https://github.com/tokio-rs/tokio-metrics/pull/109\n\n# 0.4.6 (December 3rd, 2025)\n- add metrics_rs integration to task metrics ([#100])\n- readme: add max_idle_duration to readme ([#98])\n- readme: keep default features ([#29])\n\n[#29]: https://github.com/tokio-rs/tokio-metrics/pull/29\n[#98]: https://github.com/tokio-rs/tokio-metrics/pull/98\n[#100]: https://github.com/tokio-rs/tokio-metrics/pull/100\n\n# 0.4.5 (September 4th, 2025)\n- Add max_idle_duration ([#95])\n\n[#95]: https://github.com/tokio-rs/tokio-metrics/pull/95\n\n# 0.4.4 (August 5th, 2025)\n\n### Added\n - fix: Add TaskIntervals struct ([#91])\n - chore: update dev-dependencies ([#92])\n\n[#91]: https://github.com/tokio-rs/tokio-metrics/pull/91\n[#92]: https://github.com/tokio-rs/tokio-metrics/pull/92\n\n# 0.4.3 (July 3rd, 2025)\n\n### Added\n - rt: partially stabilize `RuntimeMonitor` and related metrics ([#87])\n\n[#87]: https://github.com/tokio-rs/tokio-metrics/pull/87\n\n# 0.4.2 (April 30th, 2025)\n\n### Fixed\n - docs: specify metrics-rs-integration feature dependency for relevant APIs ([#78])\n - docs: fix links ([#79])\n\n[#78]: https://github.com/tokio-rs/tokio-metrics/pull/78\n[#79]: https://github.com/tokio-rs/tokio-metrics/pull/79\n\n# 0.4.1 (April 20th, 2025)\n\n### Added\n - rt: add support for `blocking_queue_depth`, `live_task_count`, `blocking_threads_count`,\n   `idle_blocking_threads_count` ([#49], [#74])\n - rt: add integration with metrics.rs ([#68])\n\n[#49]: https://github.com/tokio-rs/tokio-metrics/pull/49\n[#68]: https://github.com/tokio-rs/tokio-metrics/pull/68\n[#74]: https://github.com/tokio-rs/tokio-metrics/pull/74\n\n# 0.4.0 (November 26th, 2024)\n\nThe core Tokio crate has renamed some of the metrics and this breaking release\nuses the new names. The minimum required Tokio is bumped to 1.41, and the MSRV\nis bumped to 1.70 to match.\n\n- runtime: use new names for poll time histogram ([#66])\n- runtime: rename injection queue to global queue ([#66])\n- doc: various doc fixes ([#66], [#65])\n\n[#65]: https://github.com/tokio-rs/tokio-metrics/pull/65\n[#66]: https://github.com/tokio-rs/tokio-metrics/pull/66\n\n# 0.3.1 (October 12th, 2023)\n\n### Fixed\n- task: fix doc error in idle definition ([#54])\n- chore: support tokio 1.33 without stats feature ([#55])\n\n[#54]: https://github.com/tokio-rs/tokio-metrics/pull/54\n[#55]: https://github.com/tokio-rs/tokio-metrics/pull/55\n\n# 0.3.0 (August 14th, 2023)\n\n### Added\n- rt: add support for mean task poll time ([#50])\n- rt: add support for task poll count histogram ([#52])\n\n[#50]: https://github.com/tokio-rs/tokio-metrics/pull/50\n[#52]: https://github.com/tokio-rs/tokio-metrics/pull/52\n\n# 0.2.2 (April 13th, 2023)\n### Added\n- task: add TaskMonitorBuilder ([#46])\n\n### Fixed\n- task: fix default long delay threshold ([#46])\n\n[#46]: https://github.com/tokio-rs/tokio-metrics/pull/46\n\n# 0.2.1 (April 5th, 2023)\n\n### Added\n- task: add short and long delay metrics ([#44])\n\n[#44]: https://github.com/tokio-rs/tokio-metrics/pull/44\n\n# 0.2.0 (March 6th, 2023)\n\n### Added\n- Add `Debug` implementations. ([#28])\n- rt: add concrete `RuntimeIntervals` iterator type ([#26])\n- rt: add budget_forced_yield_count metric ([#39])\n- rt: add io_driver_ready_count metric ([#40])\n- rt: add steal_operations metric ([#37])\n- task: also instrument streams ([#31])\n\n### Documented\n- doc: fix count in `TaskMonitor` docstring ([#24])\n- doc: the description of steal_count ([#35])\n\n[#24]: https://github.com/tokio-rs/tokio-metrics/pull/24\n[#26]: https://github.com/tokio-rs/tokio-metrics/pull/26\n[#28]: https://github.com/tokio-rs/tokio-metrics/pull/28\n[#31]: https://github.com/tokio-rs/tokio-metrics/pull/31\n[#35]: https://github.com/tokio-rs/tokio-metrics/pull/35\n[#37]: https://github.com/tokio-rs/tokio-metrics/pull/37\n[#39]: https://github.com/tokio-rs/tokio-metrics/pull/39\n[#40]: https://github.com/tokio-rs/tokio-metrics/pull/40\n"
  },
  {
    "path": "CONTRIBUTING.md",
    "content": "## Doing releases\n\nThere is a `.github/workflows/release.yml` workflow that will publish a crates.io release and create a GitHub release every time the version in `Cargo.toml` changes on `main`. The workflow is authorized to publish via [trusted publishing](https://rust-lang.github.io/rfcs/3691-trusted-publishing-cratesio.html), no further authorization is needed.\n\nTo prepare a release, use [conventional commits](https://www.conventionalcommits.org/en/v1.0.0/), and in a clean git repo run:\n\n```\ncargo install release-plz --locked\ngit checkout main && release-plz update\n# review the changes to Cargo.toml and CHANGELOG.md\ngit commit -a\n```\n\nThen open a PR for the release and get it approved. Even if you have bypass permissions on branch protection, always use a PR so CI runs before the release publishes. Once merged, the release workflow will automatically publish to crates.io and create a GitHub release.\n\n## How to test docs.rs changes\n\nSet up your local docs.rs environment as per official README:  \nhttps://github.com/rust-lang/docs.rs?tab=readme-ov-file#getting-started\n\nMake sure you have:\n- Your .env contents exported to your local ENVs\n- docker-compose stack for db and s3 running\n- The web server running via local (or pure docker-compose approach)\n- If on a remote machine, port 3000 (or whatever your webserver is listening on) forwarded\n\nInvoke the cargo build command against your local path to your `tokio-metrics` workspace:\n\n```\n# you could also invoke the built `cratesfyi` binary from outside of your cargo workspace,\n# though you'll still need the right ENVs exported\ncargo run -- build crate --local ../tokio-metrics\n```\n\nThen, you can view the generated documentation for `tokio-metrics` in your browser. If you figure\nout how to get CSS working, update this guide :)\n"
  },
  {
    "path": "Cargo.toml",
    "content": "[package]\nname = \"tokio-metrics\"\nversion = \"0.5.0\"\nedition = \"2021\"\nrust-version = \"1.70.0\"\nauthors = [\"Tokio Contributors <team@tokio.rs>\"]\nlicense = \"MIT\"\nreadme = \"README.md\"\nrepository = \"https://github.com/tokio-rs/tokio-metrics\"\nhomepage = \"https://tokio.rs\"\ndescription = \"\"\"\nRuntime and task level metrics for Tokio applications.\n\"\"\"\ncategories = [\"asynchronous\", \"network-programming\"]\nkeywords = [\"async\", \"futures\", \"metrics\", \"debugging\"]\n\n[lints.rust]\nunexpected_cfgs = { level = \"warn\", check-cfg = ['cfg(tokio_unstable)'] }\n\n[features]\ndefault = [\"rt\"]\nmetrics-rs-integration = [\"dep:metrics\"]\nmetrique-integration = [\"dep:metrique\"]\nrt = [\"tokio\"]\n\n[dependencies]\ntokio-stream = \"0.1.11\"\nfutures-util = \"0.3.19\"\npin-project-lite = \"0.2.7\"\ntokio = { version = \"1.45.1\", features = [\"rt\", \"time\", \"net\"], optional = true }\nmetrics = { version = \"0.24\", optional = true }\nmetrique = { version = \"0.1.23\", default-features = false, optional = true }\n\n[dev-dependencies]\nmetrique = { version = \"0.1.23\", features = [\"test-util\"] }\naxum = \"0.8\"\ncriterion = \"0.7\"\nfutures = \"0.3.21\"\nnum_cpus = \"1.13.1\"\nserde = { version = \"1.0.136\", features = [\"derive\"] }\nserde_json = \"1.0.79\"\ntokio = { version = \"1.45.1\", features = [\"full\", \"rt\", \"time\", \"macros\", \"test-util\"] }\nmetrics-util = { version = \"0.20\", features = [\"debugging\"] }\nmetrics = { version = \"0.24\" }\nmetrics-exporter-prometheus = { version = \"0.17\", features = [\"uds-listener\"] }\n\n[[example]]\nname = \"runtime\"\nrequired-features = [\"rt\"]\n\n[[bench]]\nname = \"poll_overhead\"\nharness = false\n\n[package.metadata.docs.rs]\nall-features = true\n# enable unstable features in the documentation\nrustdoc-args = [\"--cfg\", \"docsrs\", \"--cfg\", \"tokio_unstable\"]\n# it's necessary to _also_ pass `--cfg tokio_unstable` to rustc, or else\n# dependencies will not be enabled, and the docs build will fail.\nrustc-args = [\"--cfg\", \"tokio_unstable\"]\n"
  },
  {
    "path": "LICENSE",
    "content": "Copyright (c) 2022 Tokio Contributors\n\nPermission is hereby granted, free of charge, to any\nperson obtaining a copy of this software and associated\ndocumentation files (the \"Software\"), to deal in the\nSoftware without restriction, including without\nlimitation the rights to use, copy, modify, merge,\npublish, distribute, sublicense, and/or sell copies of\nthe Software, and to permit persons to whom the Software\nis furnished to do so, subject to the following\nconditions:\n\nThe above copyright notice and this permission notice\nshall be included in all copies or substantial portions\nof the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF\nANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED\nTO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\nPARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\nSHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\nCLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\nOF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR\nIN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE."
  },
  {
    "path": "README.md",
    "content": "# Tokio Metrics\n\n[![Crates.io][crates-badge]][crates-url]\n[![Documentation][docs-badge]][docs-url]\n[![MIT licensed][mit-badge]][mit-url]\n[![Build Status][actions-badge]][actions-url]\n[![Discord chat][discord-badge]][discord-url]\n\n[crates-badge]: https://img.shields.io/crates/v/tokio-metrics.svg\n[crates-url]: https://crates.io/crates/tokio-metrics\n[docs-badge]: https://docs.rs/tokio-metrics/badge.svg\n[docs-url]: https://docs.rs/tokio-metrics\n[mit-badge]: https://img.shields.io/badge/license-MIT-blue.svg\n[mit-url]: https://github.com/tokio-rs/tokio-metrics/blob/master/LICENSE\n[actions-badge]: https://github.com/tokio-rs/tokio-metrics/workflows/CI/badge.svg\n[actions-url]: https://github.com/tokio-rs/tokio-metrics/actions?query=workflow%3ACI+branch%3Amain\n[discord-badge]: https://img.shields.io/discord/500028886025895936.svg?logo=discord&style=flat-square\n[discord-url]: https://discord.gg/tokio\n\nProvides utilities for collecting metrics from a Tokio application, including\nruntime and per-task metrics.\n\n```toml\n[dependencies]\ntokio-metrics = \"0.5\"\n```\n\n## Getting Started With Task Metrics\n\nUse `TaskMonitor` to instrument tasks before spawning them, and to observe\nmetrics for those tasks. All tasks instrumented with a given `TaskMonitor`\naggregate their metrics together. To split out metrics for different tasks, use\nseparate `TaskMetrics` instances.\n\n```rust\n// construct a TaskMonitor\nlet monitor = tokio_metrics::TaskMonitor::new();\n\n// print task metrics every 500ms\n{\n    let frequency = std::time::Duration::from_millis(500);\n    let monitor = monitor.clone();\n    tokio::spawn(async move {\n        for metrics in monitor.intervals() {\n            println!(\"{:?}\", metrics);\n            tokio::time::sleep(frequency).await;\n        }\n    });\n}\n\n// instrument some tasks and spawn them\nloop {\n    tokio::spawn(monitor.instrument(do_work()));\n}\n```\n\n### Task Metrics\n#### Base Metrics\n- **[`instrumented_count`]**\n  The number of tasks instrumented.\n- **[`dropped_count`]**\n  The number of tasks dropped.\n- **[`first_poll_count`]**\n  The number of tasks polled for the first time.\n- **[`total_first_poll_delay`]**\n  The total duration elapsed between the instant tasks are instrumented, and the instant they are first polled.\n- **[`total_idled_count`]**\n  The total number of times that tasks idled, waiting to be awoken.\n- **[`total_idle_duration`]**\n  The total duration that tasks idled.\n- **[`max_idle_duration`]**\n  The maximum idle duration that a task took.\n- **[`total_scheduled_count`]**\n  The total number of times that tasks were awoken (and then, presumably, scheduled for execution).\n- **[`total_scheduled_duration`]**\n  The total duration that tasks spent waiting to be polled after awakening.\n- **[`total_poll_count`]**\n  The total number of times that tasks were polled.\n- **[`total_poll_duration`]**\n  The total duration elapsed during polls.\n- **[`total_fast_poll_count`]**\n  The total number of times that polling tasks completed swiftly.\n- **[`total_fast_poll_duration`]**\n  The total duration of fast polls.\n- **[`total_slow_poll_count`]**\n  The total number of times that polling tasks completed slowly.\n- **[`total_slow_poll_duration`]**\n  The total duration of slow polls.\n- **[`total_short_delay_count`]**\n  The total count of short scheduling delays.\n- **[`total_short_delay_duration`]**\n  The total duration of short scheduling delays.\n- **[`total_long_delay_count`]**\n  The total count of long scheduling delays.\n- **[`total_long_delay_duration`]**\n  The total duration of long scheduling delays.\n\n#### Derived Metrics\n- **[`mean_first_poll_delay`]**\n  The mean duration elapsed between the instant tasks are instrumented, and the instant they are first polled.\n- **[`mean_idle_duration`]**\n  The mean duration of idles.\n- **[`mean_scheduled_duration`]**\n  The mean duration that tasks spent waiting to be executed after awakening.\n- **[`mean_poll_duration`]**\n  The mean duration of polls.\n- **[`slow_poll_ratio`]**\n  The ratio between the number polls categorized as slow and fast.\n- **[`long_delay_ratio`]**\n  The ratio between the number of long scheduling delays and the number of total schedules.\n- **[`mean_fast_poll_duration`]**\n  The mean duration of fast polls.\n- **[`mean_slow_poll_duration`]**\n  The mean duration of slow polls.\n- **[`mean_short_delay_duration`]**\n  The mean duration of short schedules.\n- **[`mean_long_delay_duration`]**\n  The mean duration of long schedules.\n\n[`instrumented_count`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.TaskMetrics.html#structfield.instrumented_count\n[`dropped_count`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.TaskMetrics.html#structfield.dropped_count\n[`first_poll_count`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.TaskMetrics.html#structfield.first_poll_count\n[`total_first_poll_delay`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.TaskMetrics.html#structfield.total_first_poll_delay\n[`total_idled_count`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.TaskMetrics.html#structfield.total_idled_count\n[`total_idle_duration`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.TaskMetrics.html#structfield.total_idle_duration\n[`max_idle_duration`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.TaskMetrics.html#structfield.max_idle_duration\n[`total_scheduled_count`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.TaskMetrics.html#structfield.total_scheduled_count\n[`total_scheduled_duration`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.TaskMetrics.html#structfield.total_scheduled_duration\n[`total_poll_count`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.TaskMetrics.html#structfield.total_poll_count\n[`total_poll_duration`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.TaskMetrics.html#structfield.total_poll_duration\n[`total_fast_poll_count`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.TaskMetrics.html#structfield.total_fast_poll_count\n[`total_fast_poll_duration`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.TaskMetrics.html#structfield.total_fast_poll_duration\n[`total_slow_poll_count`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.TaskMetrics.html#structfield.total_slow_poll_count\n[`total_slow_poll_duration`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.TaskMetrics.html#structfield.total_slow_poll_duration\n[`total_short_delay_count`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.TaskMetrics.html#structfield.total_short_delay_count\n[`total_short_delay_duration`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.TaskMetrics.html#structfield.total_short_delay_duration\n[`total_long_delay_count`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.TaskMetrics.html#structfield.total_long_delay_count\n[`total_long_delay_duration`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.TaskMetrics.html#structfield.total_long_delay_duration\n[`mean_first_poll_delay`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.TaskMetrics.html#method.mean_first_poll_delay\n[`mean_idle_duration`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.TaskMetrics.html#method.mean_idle_duration\n[`mean_scheduled_duration`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.TaskMetrics.html#method.mean_scheduled_duration\n[`mean_poll_duration`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.TaskMetrics.html#method.mean_poll_duration\n[`slow_poll_ratio`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.TaskMetrics.html#method.slow_poll_ratio\n[`long_delay_ratio`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.TaskMetrics.html#method.long_delay_ratio\n[`mean_fast_poll_duration`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.TaskMetrics.html#method.mean_fast_poll_duration\n[`mean_slow_poll_duration`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.TaskMetrics.html#method.mean_slow_poll_duration\n[`mean_short_delay_duration`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.TaskMetrics.html#method.mean_short_delay_duration\n[`mean_long_delay_duration`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.TaskMetrics.html#method.mean_long_delay_duration\n\n## Getting Started With Runtime Metrics\n\nNot all runtime metrics are stable. Using unstable metrics requires `tokio_unstable`, and the  `rt` crate\nfeature. To enable `tokio_unstable`, the `--cfg` `tokio_unstable` must be passed\nto `rustc` when compiling. You can do this by setting the `RUSTFLAGS`\nenvironment variable before compiling your application; e.g.:\n```sh\nRUSTFLAGS=\"--cfg tokio_unstable\" cargo build\n```\nOr, by creating the file `.cargo/config.toml` in the root directory of your crate.\nIf you're using a workspace, put this file in the root directory of your workspace instead.\n```toml\n[build]\nrustflags = [\"--cfg\", \"tokio_unstable\"]\nrustdocflags = [\"--cfg\", \"tokio_unstable\"] \n```\nPutting `.cargo/config.toml` files below the workspace or crate root directory may lead to tools like\nRust-Analyzer or VSCode not using your `.cargo/config.toml` since they invoke cargo from\nthe workspace or crate root and cargo only looks for the `.cargo` directory in the current & parent directories.\nCargo ignores configurations in child directories.\nMore information about where cargo looks for configuration files can be found\n[here](https://doc.rust-lang.org/cargo/reference/config.html).\n\nMissing this configuration file during compilation will cause tokio-metrics to not work, and alternating\nbetween building with and without this configuration file included will cause full rebuilds of your project.\n\n### Collecting Runtime Metrics directly\n\nThe `rt` feature of `tokio-metrics` is on by default; simply check that you do\nnot set `default-features = false` when declaring it as a dependency; e.g.:\n```toml\n[dependencies]\ntokio-metrics = \"0.5\"\n```\n\nFrom within a Tokio runtime, use `RuntimeMonitor` to monitor key metrics of\nthat runtime.\n```rust\nlet handle = tokio::runtime::Handle::current();\nlet runtime_monitor = tokio_metrics::RuntimeMonitor::new(&handle);\n\n// print runtime metrics every 500ms\nlet frequency = std::time::Duration::from_millis(500);\ntokio::spawn(async move {\n    for metrics in runtime_monitor.intervals() {\n        println!(\"Metrics = {:?}\", metrics);\n        tokio::time::sleep(frequency).await;\n    }\n});\n\n// run some tasks\ntokio::spawn(do_work());\ntokio::spawn(do_work());\ntokio::spawn(do_work());\n```\n\n### Runtime Metrics\n#### Stable Base Metrics\n- **[`workers_count`]**\n  The number of worker threads used by the runtime.\n- **[`total_park_count`]**\n  The number of times worker threads parked.\n- **[`max_park_count`]**\n  The maximum number of times any worker thread parked.\n- **[`min_park_count`]**\n  The minimum number of times any worker thread parked.\n- **[`total_busy_duration`]**\n  The amount of time worker threads were busy.\n- **[`max_busy_duration`]**\n  The maximum amount of time a worker thread was busy.\n- **[`min_busy_duration`]**\n  The minimum amount of time a worker thread was busy.\n- **[`global_queue_depth`]**\n  The number of tasks currently scheduled in the runtime's global queue.\n- **[`elapsed`]**\n  Total amount of time elapsed since observing runtime metrics.\n- **[`live_tasks_count`]**\n  The current number of alive tasks in the runtime.\n\n#### Unstable Base Metrics\n- **[`mean_poll_duration`](https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#structfield.mean_poll_duration)**\n  The average duration of a single invocation of poll on a task.\n- **[`mean_poll_duration_worker_min`]**\n  The average duration of a single invocation of poll on a task on the worker with the lowest value.\n- **[`mean_poll_duration_worker_max`]**\n  The average duration of a single invocation of poll on a task on the worker with the highest value.\n- **[`poll_time_histogram`]**\n  A histogram of task polls since the previous probe grouped by poll times.\n- **[`total_noop_count`]**\n  The number of times worker threads unparked but performed no work before parking again.\n- **[`max_noop_count`]**\n  The maximum number of times any worker thread unparked but performed no work before parking again.\n- **[`min_noop_count`]**\n  The minimum number of times any worker thread unparked but performed no work before parking again.\n- **[`total_steal_count`]**\n  The number of tasks worker threads stole from another worker thread.\n- **[`max_steal_count`]**\n  The maximum number of tasks any worker thread stole from another worker thread.\n- **[`min_steal_count`]**\n  The minimum number of tasks any worker thread stole from another worker thread.\n- **[`total_steal_operations`]**\n  The number of times worker threads stole tasks from another worker thread.\n- **[`max_steal_operations`]**\n  The maximum number of times any worker thread stole tasks from another worker thread.\n- **[`min_steal_operations`]**\n  The minimum number of times any worker thread stole tasks from another worker thread.\n- **[`num_remote_schedules`]**\n  The number of tasks scheduled from outside of the runtime.\n- **[`total_local_schedule_count`]**\n  The number of tasks scheduled from worker threads.\n- **[`max_local_schedule_count`]**\n  The maximum number of tasks scheduled from any one worker thread.\n- **[`min_local_schedule_count`]**\n  The minimum number of tasks scheduled from any one worker thread.\n- **[`total_overflow_count`]**\n  The number of times worker threads saturated their local queues.\n- **[`max_overflow_count`]**\n  The maximum number of times any one worker saturated its local queue.\n- **[`min_overflow_count`]**\n  The minimum number of times any one worker saturated its local queue.\n- **[`total_polls_count`]**\n  The number of tasks that have been polled across all worker threads.\n- **[`max_polls_count`]**\n  The maximum number of tasks that have been polled in any worker thread.\n- **[`min_polls_count`]**\n  The minimum number of tasks that have been polled in any worker thread.\n- **[`total_local_queue_depth`]**\n  The total number of tasks currently scheduled in workers' local queues.\n- **[`max_local_queue_depth`]**\n  The maximum number of tasks currently scheduled any worker's local queue.\n- **[`min_local_queue_depth`]**\n  The minimum number of tasks currently scheduled any worker's local queue.\n- **[`blocking_queue_depth`]**\n  The number of tasks currently waiting to be executed in the blocking threadpool.\n- **[`blocking_threads_count`]**\n  The number of additional threads spawned by the runtime.\n- **[`idle_blocking_threads_count`]**\n  The number of idle threads, which have spawned by the runtime for `spawn_blocking` calls.\n- **[`budget_forced_yield_count`]**\n  The number of times that a task was forced to yield because it exhausted its budget.\n- **[`io_driver_ready_count`]**\n  The number of ready events received from the I/O driver.\n\n#### Stable Derived Metrics\n- **[`busy_ratio`]**\n  The ratio between the amount of time worker threads were busy and the total time elapsed since observing runtime metrics.\n\n#### Unstable Derived Metrics\n- **[`mean_polls_per_park`]**\n  The ratio of the number of tasks that have been polled and the number of times worker threads unparked but performed no work before parking again.\n\n\n[`workers_count`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#structfield.workers_count\n[`total_park_count`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#structfield.total_park_count\n[`max_park_count`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#structfield.max_park_count\n[`min_park_count`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#structfield.min_park_count\n[`total_busy_duration`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#structfield.total_busy_duration\n[`max_busy_duration`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#structfield.max_busy_duration\n[`min_busy_duration`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#structfield.min_busy_duration\n[`global_queue_depth`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#structfield.global_queue_depth\n[`elapsed`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#structfield.elapsed\n[`mean_poll_duration_worker_min`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#structfield.mean_poll_duration_worker_min\n[`mean_poll_duration_worker_max`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#structfield.mean_poll_duration_worker_max\n[`poll_time_histogram`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#structfield.poll_time_histogram\n[`total_noop_count`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#structfield.total_noop_count\n[`max_noop_count`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#structfield.max_noop_count\n[`min_noop_count`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#structfield.min_noop_count\n[`total_steal_count`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#structfield.total_steal_count\n[`max_steal_count`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#structfield.max_steal_count\n[`min_steal_count`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#structfield.min_steal_count\n[`total_steal_operations`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#structfield.total_steal_operations\n[`max_steal_operations`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#structfield.max_steal_operations\n[`min_steal_operations`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#structfield.min_steal_operations\n[`num_remote_schedules`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#structfield.num_remote_schedules\n[`total_local_schedule_count`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#structfield.total_local_schedule_count\n[`max_local_schedule_count`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#structfield.max_local_schedule_count\n[`min_local_schedule_count`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#structfield.min_local_schedule_count\n[`total_overflow_count`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#structfield.total_overflow_count\n[`max_overflow_count`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#structfield.max_overflow_count\n[`min_overflow_count`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#structfield.min_overflow_count\n[`total_polls_count`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#structfield.total_polls_count\n[`max_polls_count`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#structfield.max_polls_count\n[`min_polls_count`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#structfield.min_polls_count\n[`injection_queue_depth`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#structfield.injection_queue_depth\n[`total_local_queue_depth`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#structfield.total_local_queue_depth\n[`max_local_queue_depth`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#structfield.max_local_queue_depth\n[`min_local_queue_depth`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#structfield.min_local_queue_depth\n[`blocking_queue_depth`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#structfield.blocking_queue_depth\n[`live_tasks_count`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#structfield.live_tasks_count\n[`blocking_threads_count`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#structfield.blocking_threads_count\n[`idle_blocking_threads_count`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#structfield.idle_blocking_threads_count\n[`budget_forced_yield_count`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#structfield.budget_forced_yield_count\n[`io_driver_ready_count`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#structfield.io_driver_ready_count\n[`busy_ratio`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#method.busy_ratio\n[`mean_polls_per_park`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetrics.html#method.mean_polls_per_park\n\n\n## Collecting Metrics via metrics.rs\n\nIf you also enable the `metrics-rs-integration` feature, you can use [metrics.rs] exporters to export metrics\noutside of your process. `metrics.rs` supports a variety of exporters, including [Prometheus].\n\nThe exported metrics by default will be exported with their name, preceded by `tokio_`. For example,\n`tokio_workers_count` for the [`workers_count`] metric and `tokio_instrumented_count` for the\n[`instrumented_count`] metric. This can be customized by using the\n[`RuntimeMetricsReporterBuilder::with_metrics_transformer`] and [`TaskMetricsReporterBuilder::new`] functions.\n\nIf you want to use [Prometheus], you could have this `Cargo.toml`:\n\n[Prometheus]: https://prometheus.io\n[`RuntimeMetricsReporterBuilder::with_metrics_transformer`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.RuntimeMetricsReporterBuilder.html#method.with_metrics_transformer\n[`TaskMetricsReporterBuilder::new`]: https://docs.rs/tokio-metrics/latest/tokio_metrics/struct.TaskMetricsReporterBuilder.html#method.new\n\n```toml\n[dependencies]\ntokio-metrics = { version = \"0.5\", features = [\"metrics-rs-integration\"] }\nmetrics = \"0.24\"\n# You don't actually need to use the Prometheus exporter with uds-listener enabled,\n# it's just here as an example.\nmetrics-exporter-prometheus = { version = \"0.16\", features = [\"uds-listener\"] }\n```\n\nThen, you can launch a metrics exporter:\n```rust\n// This makes metrics visible via a local Unix socket with name prometheus.sock\n// You probably want to do it differently.\n//\n// If you use this exporter, you can access the metrics for debugging\n// by running `curl --unix-socket prometheus.sock localhost`.\nmetrics_exporter_prometheus::PrometheusBuilder::new()\n    .with_http_uds_listener(\"prometheus.sock\")\n    .install()\n    .unwrap();\n\n// This line launches the runtime reporter that monitors the Tokio runtime and exports the metrics.\ntokio::task::spawn(\n    tokio_metrics::RuntimeMetricsReporterBuilder::default().describe_and_run(),\n);\n\n// This line creates a task monitor.\nlet task_monitor = tokio_metrics::TaskMonitor::new();\n\n// This line launches the task reporter that exports the task metrics.\ntokio::task::spawn(\n    tokio_metrics::TaskMetricsReporterBuilder::new(|name| {\n        let name = name.replacen(\"tokio_\", \"my_task_\", 1);\n        Key::from_parts(name, &[(\"application\", \"my_app\")])\n    })\n    .describe_and_run(task_monitor.clone()),\n);\n\n// run some tasks\ntokio::spawn(do_work());\n// This line causes the task monitor to monitor this task.\ntokio::spawn(task_monitor.instrument(do_work()));\ntokio::spawn(do_work());\n```\n\nOf course, it will work with any other [metrics.rs] exporter.\n\n[metrics.rs]: https://docs.rs/metrics\n\n## Relation to Tokio Console\n\nCurrently, Tokio Console is primarily intended for **local** debugging. Tokio\nmetrics is intended to enable reporting of metrics in production to your\npreferred tools. Longer term, it is likely that `tokio-metrics` will merge with\nTokio Console.\n\n## License\n\nThis project is licensed under the [MIT license].\n\n[MIT license]: LICENSE\n\n### Contribution\n\nUnless you explicitly state otherwise, any contribution intentionally submitted\nfor inclusion in tokio-metrics by you, shall be licensed as MIT, without any\nadditional terms or conditions.\n"
  },
  {
    "path": "benches/poll_overhead.rs",
    "content": "use criterion::{criterion_group, criterion_main, Criterion};\nuse futures::task;\nuse std::future::Future;\nuse std::hint::black_box;\nuse std::iter;\nuse std::pin::Pin;\nuse std::sync::{Arc, Barrier};\nuse std::task::{Context, Poll};\nuse std::thread;\nuse std::time::{Duration, Instant};\nuse tokio_metrics::TaskMonitor;\n\npub struct TestFuture;\n\nimpl Future for TestFuture {\n    type Output = ();\n\n    fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {\n        cx.waker().wake_by_ref();\n        Poll::Pending\n    }\n}\n\nfn bench_poll(c: &mut Criterion) {\n    c.bench_function(\"poll\", move |b| {\n        b.iter_custom(|iters| {\n            let monitor = TaskMonitor::new();\n            let num_cpus = num_cpus::get();\n            let start = Arc::new(Barrier::new(num_cpus + 1));\n            let stop = Arc::new(Barrier::new(num_cpus + 1));\n\n            let mut workers: Vec<_> = iter::repeat((monitor, start.clone(), stop.clone()))\n                .take(num_cpus)\n                .map(|(monitor, start, stop)| {\n                    thread::spawn(move || {\n                        let waker = task::noop_waker();\n                        let mut cx = Context::from_waker(&waker);\n                        let mut instrumented = Box::pin(monitor.instrument(TestFuture));\n                        start.wait();\n                        let start_time = Instant::now();\n                        for _i in 0..iters {\n                            let _ = black_box(instrumented.as_mut().poll(&mut cx));\n                        }\n                        let stop_time = Instant::now();\n                        stop.wait();\n                        stop_time - start_time\n                    })\n                })\n                .collect();\n\n            start.wait();\n            stop.wait();\n\n            let elapsed: Duration = workers.drain(..).map(|w| w.join().unwrap()).sum();\n\n            elapsed / (num_cpus as u32)\n        })\n    });\n}\n\ncriterion_group!(benches, bench_poll);\ncriterion_main!(benches);\n"
  },
  {
    "path": "examples/axum.rs",
    "content": "#[tokio::main]\nasync fn main() {\n    // construct a TaskMonitor for each endpoint\n    let monitor_root = tokio_metrics::TaskMonitor::new();\n\n    let monitor_create_user = CreateUserMonitors {\n        // monitor for the entire endpoint\n        route: tokio_metrics::TaskMonitor::new(),\n        // monitor for database insertion subtask\n        insert: tokio_metrics::TaskMonitor::new(),\n    };\n\n    // build our application with two instrumented endpoints\n    let app = axum::Router::new()\n        // `GET /` goes to `root`\n        .route(\n            \"/\",\n            axum::routing::get({\n                let monitor = monitor_root.clone();\n                move || monitor.instrument(async { \"Hello, World!\" })\n            }),\n        )\n        // `POST /users` goes to `create_user`\n        .route(\n            \"/users\",\n            axum::routing::post({\n                let monitors = monitor_create_user.clone();\n                let route = monitors.route.clone();\n                move |payload| route.instrument(create_user(payload, monitors))\n            }),\n        );\n\n    // print task metrics for each endpoint every 1s\n    let metrics_frequency = std::time::Duration::from_secs(1);\n    tokio::spawn(async move {\n        let root_intervals = monitor_root.intervals();\n        let create_user_route_intervals = monitor_create_user.route.intervals();\n        let create_user_insert_intervals = monitor_create_user.insert.intervals();\n        let create_user_intervals = create_user_route_intervals.zip(create_user_insert_intervals);\n\n        let intervals = root_intervals.zip(create_user_intervals);\n        for (root_route, (create_user_route, create_user_insert)) in intervals {\n            println!(\"root_route = {root_route:#?}\");\n            println!(\"create_user_route = {create_user_route:#?}\");\n            println!(\"create_user_insert = {create_user_insert:#?}\");\n            tokio::time::sleep(metrics_frequency).await;\n        }\n    });\n\n    // run the server\n    let addr = std::net::SocketAddr::from(([127, 0, 0, 1], 3000));\n    let listener = tokio::net::TcpListener::bind(&addr).await.unwrap();\n    axum::serve(listener, app).await.unwrap();\n}\n\nasync fn create_user(\n    axum::Json(payload): axum::Json<CreateUser>,\n    monitors: CreateUserMonitors,\n) -> impl axum::response::IntoResponse {\n    let user = User {\n        id: 1337,\n        username: payload.username,\n    };\n    // instrument inserting the user into the db:\n    monitors.insert.instrument(insert_user(user.clone())).await;\n    (axum::http::StatusCode::CREATED, axum::Json(user))\n}\n\n#[derive(Clone)]\nstruct CreateUserMonitors {\n    // monitor for the entire endpoint\n    route: tokio_metrics::TaskMonitor,\n    // monitor for database insertion subtask\n    insert: tokio_metrics::TaskMonitor,\n}\n\n#[derive(serde::Deserialize)]\nstruct CreateUser {\n    username: String,\n}\n#[derive(Clone, serde::Serialize)]\nstruct User {\n    id: u64,\n    username: String,\n}\n\n// insert the user into the database\nasync fn insert_user(_: User) {\n    /* talk to database */\n    tokio::time::sleep(std::time::Duration::from_secs(1)).await;\n}\n"
  },
  {
    "path": "examples/runtime.rs",
    "content": "use std::time::Duration;\nuse tokio_metrics::RuntimeMonitor;\n\n#[tokio::main]\nasync fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {\n    let handle = tokio::runtime::Handle::current();\n\n    // print runtime metrics every 500ms\n    {\n        let runtime_monitor = RuntimeMonitor::new(&handle);\n        tokio::spawn(async move {\n            for interval in runtime_monitor.intervals() {\n                // pretty-print the metric interval\n                println!(\"{interval:?}\");\n                // wait 500ms\n                tokio::time::sleep(Duration::from_millis(500)).await;\n            }\n        });\n    }\n\n    // await some tasks\n    tokio::join![do_work(), do_work(), do_work(),];\n\n    Ok(())\n}\n\nasync fn do_work() {\n    for _ in 0..25 {\n        tokio::task::yield_now().await;\n        tokio::time::sleep(Duration::from_millis(100)).await;\n    }\n}\n"
  },
  {
    "path": "examples/stream.rs",
    "content": "use std::time::Duration;\n\nuse futures::{stream::FuturesUnordered, StreamExt};\n\n#[tokio::main]\nasync fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {\n    let metrics_monitor = tokio_metrics::TaskMonitor::new();\n\n    // print task metrics every 500ms\n    {\n        let metrics_monitor = metrics_monitor.clone();\n        tokio::spawn(async move {\n            for deltas in metrics_monitor.intervals() {\n                // pretty-print the metric deltas\n                println!(\"{deltas:?}\");\n                // wait 500ms\n                tokio::time::sleep(Duration::from_millis(500)).await;\n            }\n        })\n    };\n\n    // instrument a stream and await it\n    let mut stream =\n        metrics_monitor.instrument((0..3).map(|_| do_work()).collect::<FuturesUnordered<_>>());\n    while stream.next().await.is_some() {}\n\n    println!(\"{:?}\", metrics_monitor.cumulative());\n\n    Ok(())\n}\n\nasync fn do_work() {\n    for _ in 0..25 {\n        tokio::task::yield_now().await;\n        tokio::time::sleep(Duration::from_millis(100)).await;\n    }\n}\n"
  },
  {
    "path": "examples/task.rs",
    "content": "use std::time::Duration;\nuse tokio_metrics::{TaskMonitor, TaskMonitorCore};\n\n/// It's usually the right choice to use a static [`tokio_metrics::TaskMonitorCore`].\n///\n/// If you need to dynamically generate task monitors at runtime,\n/// [`tokio_metrics::TaskMonitor`] will be more ergonomic.\n///\n/// See the [`tokio_metrics::TaskMonitorCore`] documentation for more discussion.\nstatic STATIC_MONITOR: TaskMonitorCore = TaskMonitorCore::new();\n\n#[tokio::main]\nasync fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {\n    // spawn a task that prints out from the static monitor on a loop\n    tokio::spawn(async {\n        for deltas in TaskMonitorCore::intervals(&STATIC_MONITOR) {\n            // pretty print\n            println!(\"{deltas:?}\");\n            tokio::time::sleep(Duration::from_millis(500)).await;\n        }\n    });\n\n    tokio::join![\n        STATIC_MONITOR.instrument(do_work()),\n        STATIC_MONITOR.instrument(do_work()),\n        STATIC_MONITOR.instrument(do_work()),\n    ];\n\n    // imagine we wanted to generate a task monitor to keep track of all tasks\n    // and child tasks spawned by a given request\n    for i in 0..5 {\n        // roughly equivalent to Arc::new(TaskMonitorCore::new())\n        let metrics_monitor = TaskMonitor::new();\n\n        // instrument some tasks and await them\n        tokio::join![\n            // roughly equivalent to TaskMonitorCore::instrument_with(do_work(), metrics_monitor.clone())\n            metrics_monitor.instrument(do_work()),\n            metrics_monitor.instrument(do_work()),\n            metrics_monitor.instrument(do_work())\n        ];\n\n        let cumulative = metrics_monitor.cumulative();\n        println!(\"{i}: {cumulative:?}\");\n    }\n\n    Ok(())\n}\n\nasync fn do_work() {\n    for _ in 0..25 {\n        tokio::task::yield_now().await;\n        tokio::time::sleep(Duration::from_millis(100)).await;\n    }\n}\n"
  },
  {
    "path": "release-plz.toml",
    "content": "[workspace]\ngit_release_enable = false\nchangelog_update = false\n\n[[package]]\nname = \"tokio-metrics\"\nchangelog_path = \"./CHANGELOG.md\"\nchangelog_update = true\ngit_release_enable = true\n"
  },
  {
    "path": "src/derived_metrics.rs",
    "content": "macro_rules! derived_metrics {\n    (\n        [$metrics_name:ty] {\n            stable {\n                $(\n                    $(#[$($attributes:tt)*])*\n                    $vis:vis fn $name:ident($($args:tt)*) -> $ty:ty $body:block\n                )*\n            }\n            unstable {\n                $(\n                    $(#[$($unstable_attributes:tt)*])*\n                    $unstable_vis:vis fn $unstable_name:ident($($unstable_args:tt)*) -> $unstable_ty:ty $unstable_body:block\n                )*\n            }\n        }\n    ) => {\n        impl $metrics_name {\n            $(\n                $(#[$($attributes)*])*\n                $vis fn $name($($args)*) -> $ty $body\n            )*\n            $(\n                $(#[$($unstable_attributes)*])*\n                #[cfg(tokio_unstable)]\n                $unstable_vis fn $unstable_name($($unstable_args)*) -> $unstable_ty $unstable_body\n            )*\n\n            #[cfg(all(test, feature = \"metrics-rs-integration\"))]\n            const DERIVED_METRICS: &[&str] = &[$(stringify!($name),)*];\n            #[cfg(all(test, tokio_unstable, feature = \"metrics-rs-integration\"))]\n            const UNSTABLE_DERIVED_METRICS: &[&str] = &[$(stringify!($unstable_name),)*];\n        }\n    };\n}\n\npub(crate) use derived_metrics;\n"
  },
  {
    "path": "src/lib.rs",
    "content": "#![warn(\n    clippy::arithmetic_side_effects,\n    missing_debug_implementations,\n    missing_docs,\n    rust_2018_idioms,\n    unreachable_pub\n)]\n#![cfg_attr(docsrs, feature(doc_cfg))]\n#![cfg_attr(docsrs, allow(unused_attributes))]\n\n//! Monitor key metrics of tokio tasks and runtimes.\n//!\n//! ### Monitoring task metrics\n//! [Monitor][TaskMonitor] key [metrics][TaskMetrics] of tokio tasks.\n//!\n//! In the below example, a [`TaskMonitor`] is [constructed][TaskMonitor::new] and used to\n//! [instrument][TaskMonitor::instrument] three worker tasks; meanwhile, a fourth task\n//! prints [metrics][TaskMetrics] in 500ms [intervals][TaskMonitor::intervals]:\n//! ```\n//! use std::time::Duration;\n//!\n//! #[tokio::main]\n//! async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {\n//!     // construct a metrics taskmonitor\n//!     let metrics_monitor = tokio_metrics::TaskMonitor::new();\n//!\n//!     // print task metrics every 500ms\n//!     {\n//!         let metrics_monitor = metrics_monitor.clone();\n//!         tokio::spawn(async move {\n//!             for interval in metrics_monitor.intervals() {\n//!                 // pretty-print the metric interval\n//!                 println!(\"{:?}\", interval);\n//!                 // wait 500ms\n//!                 tokio::time::sleep(Duration::from_millis(500)).await;\n//!             }\n//!         });\n//!     }\n//!\n//!     // instrument some tasks and await them\n//!     // note that the same taskmonitor can be used for multiple tasks\n//!     tokio::join![\n//!         metrics_monitor.instrument(do_work()),\n//!         metrics_monitor.instrument(do_work()),\n//!         metrics_monitor.instrument(do_work())\n//!     ];\n//!\n//!     Ok(())\n//! }\n//!\n//! async fn do_work() {\n//!     for _ in 0..25 {\n//!         tokio::task::yield_now().await;\n//!         tokio::time::sleep(Duration::from_millis(100)).await;\n//!     }\n//! }\n//! ```\n\n#![cfg_attr(\n    feature = \"rt\",\n    doc = r##\"\n### Monitoring runtime metrics\n[Monitor][RuntimeMonitor] key [metrics][RuntimeMetrics] of a tokio runtime.\n**This functionality requires crate feature `rt` and some metrics require `tokio_unstable`.**\n\nIn the below example, a [`RuntimeMonitor`] is [constructed][RuntimeMonitor::new] and\nthree tasks are spawned and awaited; meanwhile, a fourth task prints [metrics][RuntimeMetrics]\nin 500ms [intervals][RuntimeMonitor::intervals]:\n```\nuse std::time::Duration;\n\n#[tokio::main]\nasync fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {\n    let handle = tokio::runtime::Handle::current();\n    // construct the runtime metrics monitor\n    let runtime_monitor = tokio_metrics::RuntimeMonitor::new(&handle);\n\n    // print runtime metrics every 500ms\n    {\n        tokio::spawn(async move {\n            for interval in runtime_monitor.intervals() {\n                // pretty-print the metric interval\n                println!(\"{:?}\", interval);\n                // wait 500ms\n                tokio::time::sleep(Duration::from_millis(500)).await;\n            }\n        });\n    }\n\n    // await some tasks\n    tokio::join![\n        do_work(),\n        do_work(),\n        do_work(),\n    ];\n\n    Ok(())\n}\n\nasync fn do_work() {\n    for _ in 0..25 {\n        tokio::task::yield_now().await;\n        tokio::time::sleep(Duration::from_millis(100)).await;\n    }\n}\n```\n\"##\n)]\n\n//! ### Monitoring and publishing metrics\n//!\n//! If the `metrics-rs-integration` feature is additionally enabled, this crate allows\n//! publishing metrics externally via [metrics-rs](metrics) exporters.\n//!\n//! For example, you can use [metrics_exporter_prometheus] to make metrics visible\n//! to [Prometheus]. You can see the [metrics_exporter_prometheus] and [metrics-rs](metrics)\n//! docs for guidance on configuring exporters.\n//!\n//! The published metrics are the same as the fields and methods of\n#![cfg_attr(feature = \"rt\", doc = \"[RuntimeMetrics] and\")]\n//! [TaskMetrics], but with a \"tokio_\" prefix added, for example\n#![cfg_attr(feature = \"rt\", doc = \"`tokio_workers_count` and\")]\n//! `tokio_instrumented_count`.\n//!\n//! [metrics_exporter_prometheus]: https://docs.rs/metrics_exporter_prometheus\n#![cfg_attr(feature = \"rt\", doc = \"[RuntimeMetrics]: crate::RuntimeMetrics\")]\n//! [Prometheus]: https://prometheus.io\n//! [TaskMetrics]: crate::TaskMetrics\n//!\n//! This example exports [Prometheus] metrics by listening on a local Unix socket\n//! called `prometheus.sock`, which you can access for debugging by\n//! `curl --unix-socket prometheus.sock localhost`.\n//!\n//! ```\n//! use std::time::Duration;\n//!\n//! #[tokio::main]\n//! async fn main() {\n//!     metrics_exporter_prometheus::PrometheusBuilder::new()\n//!         .with_http_uds_listener(\"prometheus.sock\")\n//!         .install()\n//!         .unwrap();\n#![cfg_attr(\n    all(feature = \"rt\", feature = \"metrics-rs-integration\"),\n    doc = r##\"\n    // This line launches the runtime reporter that monitors the Tokio runtime and exports the metrics.\n    tokio::task::spawn(\n        tokio_metrics::RuntimeMetricsReporterBuilder::default().describe_and_run(),\n    );\n\"##\n)]\n//!     let monitor = tokio_metrics::TaskMonitor::new();\n#![cfg_attr(\n    all(feature = \"rt\", feature = \"metrics-rs-integration\"),\n    doc = r##\"\n    use metrics::Key;\n    // This line launches the task reporter that monitors Tokio tasks and exports the metrics.\n    tokio::task::spawn(\n        tokio_metrics::TaskMetricsReporterBuilder::new(|name| {\n            let name = name.replacen(\"tokio_\", \"my_task_\", 1);\n            Key::from_parts(name, &[(\"application\", \"my_app\")])\n        })\n        .describe_and_run(monitor.clone()),\n    );\n\"##\n)]\n//!     // Run some code.\n//!     tokio::task::spawn(monitor.instrument(async move {\n//!         for _ in 0..1000 {\n//!             tokio::time::sleep(Duration::from_millis(10)).await;\n//!         }\n//!     }))\n//!     .await\n//!     .unwrap();\n//! }\n//! ```\n\nmacro_rules! cfg_rt {\n    ($($item:item)*) => {\n        $(\n            #[cfg(feature = \"rt\")]\n            #[cfg_attr(docsrs, doc(cfg(feature = \"rt\")))]\n            $item\n        )*\n    };\n}\n\ncfg_rt! {\n    mod runtime;\n    pub use runtime::{\n        RuntimeIntervals,\n        RuntimeMetrics,\n        RuntimeMonitor,\n    };\n}\n\n#[cfg(all(feature = \"rt\", tokio_unstable))]\npub use runtime::{HistogramBucket, PollTimeHistogram};\n\n#[cfg(all(feature = \"rt\", feature = \"metrics-rs-integration\"))]\n#[cfg_attr(\n    docsrs,\n    doc(cfg(all(feature = \"rt\", feature = \"metrics-rs-integration\")))\n)]\npub use runtime::metrics_rs_integration::{RuntimeMetricsReporter, RuntimeMetricsReporterBuilder};\n\nmod derived_metrics;\n#[cfg(feature = \"metrics-rs-integration\")]\nmod metrics_rs;\nmod task;\n\n#[cfg(feature = \"metrics-rs-integration\")]\n#[cfg_attr(docsrs, doc(cfg(feature = \"metrics-rs-integration\")))]\npub use task::metrics_rs_integration::{TaskMetricsReporter, TaskMetricsReporterBuilder};\npub use task::{\n    Instrumented, TaskIntervals, TaskMetrics, TaskMonitor, TaskMonitorCore, TaskMonitorCoreBuilder,\n};\n"
  },
  {
    "path": "src/metrics_rs.rs",
    "content": "use std::time::Duration;\n\npub(crate) const DEFAULT_METRIC_SAMPLING_INTERVAL: Duration = Duration::from_secs(30);\n\nmacro_rules! kind_to_type {\n    (Counter) => {\n        metrics::Counter\n    };\n    (Gauge) => {\n        metrics::Gauge\n    };\n    (PollTimeHistogram) => {\n        metrics::Histogram\n    };\n}\n\nmacro_rules! metric_key {\n    ($transform_fn:ident, $name:ident) => {\n        $transform_fn(concat!(\"tokio_\", stringify!($name)))\n    };\n}\n\n// calling `trim` since /// inserts spaces into docs\nmacro_rules! describe_metric_ref {\n    ($transform_fn:ident, $doc:expr, $name:ident: Counter<$unit:ident> []) => {\n        metrics::describe_counter!(\n            crate::metrics_rs::metric_key!($transform_fn, $name)\n                .name()\n                .to_owned(),\n            metrics::Unit::$unit,\n            $doc.trim()\n        )\n    };\n    ($transform_fn:ident, $doc:expr, $name:ident: Gauge<$unit:ident> []) => {\n        metrics::describe_gauge!(\n            crate::metrics_rs::metric_key!($transform_fn, $name)\n                .name()\n                .to_owned(),\n            metrics::Unit::$unit,\n            $doc.trim()\n        )\n    };\n    ($transform_fn:ident, $doc:expr, $name:ident: PollTimeHistogram<$unit:ident> []) => {\n        metrics::describe_histogram!(\n            crate::metrics_rs::metric_key!($transform_fn, $name)\n                .name()\n                .to_owned(),\n            metrics::Unit::$unit,\n            $doc.trim()\n        )\n    };\n}\n\nmacro_rules! capture_metric_ref {\n    ($transform_fn:ident, $name:ident: Counter []) => {{\n        let (name, labels) = crate::metrics_rs::metric_key!($transform_fn, $name).into_parts();\n        metrics::counter!(name, labels)\n    }};\n    ($transform_fn:ident, $name:ident: Gauge []) => {{\n        let (name, labels) = crate::metrics_rs::metric_key!($transform_fn, $name).into_parts();\n        metrics::gauge!(name, labels)\n    }};\n    ($transform_fn:ident, $name:ident: PollTimeHistogram []) => {{\n        let (name, labels) = crate::metrics_rs::metric_key!($transform_fn, $name).into_parts();\n        metrics::histogram!(name, labels)\n    }};\n}\n\nmacro_rules! metric_refs {\n    (\n        [$struct_name:ident] [$($ignore:ident),* $(,)?] [$metrics_name:ty] [$emit_arg_type:ty] {\n         stable {\n            $(\n                #[doc = $doc:tt]\n                $name:ident: $kind:tt <$unit:ident> $opts:tt\n            ),*\n            $(,)?\n         }\n         stable_derived {\n             $(\n                #[doc = $derived_doc:tt]\n                $derived_name:ident: $derived_kind:tt <$derived_unit:ident> $derived_opts:tt\n            ),*\n            $(,)?\n         }\n         unstable {\n            $(\n                #[doc = $unstable_doc:tt]\n                $unstable_name:ident: $unstable_kind:tt <$unstable_unit:ident> $unstable_opts:tt\n            ),*\n            $(,)?\n         }\n         unstable_derived {\n             $(\n                #[doc = $unstable_derived_doc:tt]\n                $unstable_derived_name:ident: $unstable_derived_kind:tt <$unstable_derived_unit:ident> $unstable_derived_opts:tt\n            ),*\n            $(,)?\n         }\n        }\n  ) => {\n        struct $struct_name {\n            $(\n                $name: crate::metrics_rs::kind_to_type!($kind),\n            )*\n            $(\n                $derived_name: crate::metrics_rs::kind_to_type!($derived_kind),\n            )*\n            $(\n                #[cfg(tokio_unstable)]\n                $unstable_name: crate::metrics_rs::kind_to_type!($unstable_kind),\n            )*\n            $(\n                #[cfg(tokio_unstable)]\n                $unstable_derived_name: crate::metrics_rs::kind_to_type!($unstable_derived_kind),\n            )*\n        }\n\n        impl $struct_name {\n            fn capture(transform_fn: &mut dyn FnMut(&'static str) -> metrics::Key) -> Self {\n                Self {\n                    $(\n                        $name: crate::metrics_rs::capture_metric_ref!(transform_fn, $name: $kind $opts),\n                    )*\n                    $(\n                        $derived_name: crate::metrics_rs::capture_metric_ref!(transform_fn, $derived_name: $derived_kind $derived_opts),\n                    )*\n                    $(\n                        #[cfg(tokio_unstable)]\n                        $unstable_name: crate::metrics_rs::capture_metric_ref!(transform_fn, $unstable_name: $unstable_kind $unstable_opts),\n                    )*\n                    $(\n                        #[cfg(tokio_unstable)]\n                        $unstable_derived_name: crate::metrics_rs::capture_metric_ref!(transform_fn, $unstable_derived_name: $unstable_derived_kind $unstable_derived_opts),\n                    )*\n                }\n            }\n\n            fn emit(&self, metrics: $metrics_name, emit_arg: $emit_arg_type) {\n                // Emit derived metrics before base metrics because emitting base metrics may move\n                // out of `$metrics`.\n                $(\n                    crate::metrics_rs::MyMetricOp::op((&self.$derived_name, metrics.$derived_name()), emit_arg);\n                )*\n                $(\n                    #[cfg(tokio_unstable)]\n                    crate::metrics_rs::MyMetricOp::op((&self.$unstable_derived_name, metrics.$unstable_derived_name()), emit_arg);\n                )*\n                $(\n                    crate::metrics_rs::MyMetricOp::op((&self.$name, metrics.$name), emit_arg);\n                )*\n                $(\n                    #[cfg(tokio_unstable)]\n                    crate::metrics_rs::MyMetricOp::op((&self.$unstable_name, metrics.$unstable_name), emit_arg);\n                )*\n            }\n\n            fn describe(transform_fn: &mut dyn FnMut(&'static str) -> metrics::Key) {\n                $(\n                    crate::metrics_rs::describe_metric_ref!(transform_fn, $doc, $name: $kind<$unit> $opts);\n                )*\n                $(\n                    crate::metrics_rs::describe_metric_ref!(transform_fn, $derived_doc, $derived_name: $derived_kind<$derived_unit> $derived_opts);\n                )*\n                $(\n                    #[cfg(tokio_unstable)]\n                    crate::metrics_rs::describe_metric_ref!(transform_fn, $unstable_doc, $unstable_name: $unstable_kind<$unstable_unit> $unstable_opts);\n                )*\n                $(\n                    #[cfg(tokio_unstable)]\n                    crate::metrics_rs::describe_metric_ref!(transform_fn, $unstable_derived_doc, $unstable_derived_name: $unstable_derived_kind<$unstable_derived_unit> $unstable_derived_opts);\n                )*\n            }\n        }\n\n        #[test]\n        fn test_no_fields_missing() {\n            // test that no fields are missing. We can't use exhaustive matching here\n            // since the metrics structs are #[non_exhaustive], so use a debug impl\n            let debug = format!(\"{:#?}\", <$metrics_name>::default());\n            for line in debug.lines() {\n                // Only look at top-level field lines: exactly 4 spaces of\n                // indentation and containing a `:` (field name separator).\n                // This skips the struct header/footer and any nested\n                // struct/vec Debug output from complex field types.\n                let is_top_level_field = line.starts_with(\"    \")\n                    && !line.starts_with(\"     \")\n                    && line.contains(':');\n                if !is_top_level_field {\n                    continue\n                }\n                $(\n                    let expected = format!(\"    {}:\", stringify!($ignore));\n                    if line.contains(&expected) {\n                        continue\n                    }\n                );*\n                $(\n                    let expected = format!(\"    {}:\", stringify!($name));\n                    eprintln!(\"{}\", expected);\n                    if line.contains(&expected) {\n                        continue\n                    }\n                );*\n                $(\n                    let expected = format!(\"    {}:\", stringify!($unstable_name));\n                    eprintln!(\"{}\", expected);\n                    if line.contains(&expected) {\n                        continue\n                    }\n                );*\n                panic!(\"missing metric {:?}\", line);\n            }\n        }\n\n        #[test]\n        fn test_no_derived_metrics_missing() {\n            // test that no derived metrics are missing.\n            for derived_metric in <$metrics_name>::DERIVED_METRICS {\n                $(\n                    if *derived_metric == stringify!($derived_name) {\n                        continue\n                    }\n                );*\n                panic!(\"missing metric {:?}\", derived_metric);\n            }\n            #[cfg(tokio_unstable)]\n            for unstable_derived_metric in <$metrics_name>::UNSTABLE_DERIVED_METRICS {\n                $(\n                    if *unstable_derived_metric == stringify!($unstable_derived_name) {\n                        continue\n                    }\n                );*\n                panic!(\"missing metric {:?}\", unstable_derived_metric);\n            }\n        }\n    }\n}\n\npub(crate) use capture_metric_ref;\npub(crate) use describe_metric_ref;\npub(crate) use kind_to_type;\npub(crate) use metric_key;\npub(crate) use metric_refs;\n\npub(crate) trait MyMetricOp<T> {\n    fn op(self, t: T);\n}\n\nimpl<T> MyMetricOp<T> for (&metrics::Counter, Duration) {\n    fn op(self, _: T) {\n        self.0\n            .increment(self.1.as_micros().try_into().unwrap_or(u64::MAX));\n    }\n}\n\nimpl<T> MyMetricOp<T> for (&metrics::Counter, u64) {\n    fn op(self, _t: T) {\n        self.0.increment(self.1);\n    }\n}\n\nimpl<T> MyMetricOp<T> for (&metrics::Gauge, Duration) {\n    fn op(self, _t: T) {\n        self.0.set(self.1.as_micros() as f64);\n    }\n}\n\nimpl<T> MyMetricOp<T> for (&metrics::Gauge, u64) {\n    fn op(self, _: T) {\n        self.0.set(self.1 as f64);\n    }\n}\n\nimpl<T> MyMetricOp<T> for (&metrics::Gauge, usize) {\n    fn op(self, _t: T) {\n        self.0.set(self.1 as f64);\n    }\n}\n\nimpl<T> MyMetricOp<T> for (&metrics::Gauge, f64) {\n    fn op(self, _t: T) {\n        self.0.set(self.1);\n    }\n}\n\n#[cfg(all(feature = \"rt\", tokio_unstable))]\nimpl<T> MyMetricOp<T> for (&metrics::Histogram, crate::runtime::PollTimeHistogram) {\n    fn op(self, _: T) {\n        for bucket in self.1.buckets() {\n            if bucket.count() > 0 {\n                // Use range.start as the representative value; the metrics-rs\n                // histogram handles its own bucketing from these raw values.\n                self.0.record_many(\n                    bucket.range_start().as_micros() as f64,\n                    bucket.count() as usize,\n                );\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "src/runtime/metrics_rs_integration.rs",
    "content": "use std::{fmt, time::Duration};\n\nuse tokio::runtime::Handle;\n\nuse super::{RuntimeIntervals, RuntimeMetrics, RuntimeMonitor};\nuse crate::metrics_rs::{metric_refs, DEFAULT_METRIC_SAMPLING_INTERVAL};\n\n/// A builder for the [`RuntimeMetricsReporter`] that wraps the RuntimeMonitor, periodically\n/// reporting RuntimeMetrics to any configured [metrics-rs] recorder.\n///\n/// ### Published Metrics\n///\n/// The published metrics are the fields of [RuntimeMetrics], but with the\n/// `tokio_` prefix added, for example, `tokio_workers_count`. If desired, you\n/// can use the [`with_metrics_transformer`] function to customize the metric names.\n///\n/// ### Usage\n///\n/// To upload metrics via [metrics-rs], you need to set up a reporter, which\n/// is actually what exports the metrics outside of the program. You must set\n/// up the reporter before you call [`describe_and_run`].\n///\n/// You can find exporters within the [metrics-rs] docs. One such reporter\n/// is the [metrics_exporter_prometheus] reporter, which makes metrics visible\n/// through Prometheus.\n///\n/// You can use it for example to export Prometheus metrics by listening on a local Unix socket\n/// called `prometheus.sock`, which you can access for debugging by\n/// `curl --unix-socket prometheus.sock localhost`, as follows:\n///\n/// ```\n/// use std::time::Duration;\n///\n/// #[tokio::main]\n/// async fn main() {\n///     metrics_exporter_prometheus::PrometheusBuilder::new()\n///         .with_http_uds_listener(\"prometheus.sock\")\n///         .install()\n///         .unwrap();\n///     tokio::task::spawn(\n///         tokio_metrics::RuntimeMetricsReporterBuilder::default()\n///             // the default metric sampling interval is 30 seconds, which is\n///             // too long for quick tests, so have it be 1 second.\n///             .with_interval(std::time::Duration::from_secs(1))\n///             .describe_and_run(),\n///     );\n///     // Run some code\n///     tokio::task::spawn(async move {\n///         for _ in 0..1000 {\n///             tokio::time::sleep(Duration::from_millis(10)).await;\n///         }\n///     })\n///     .await\n///     .unwrap();\n/// }\n/// ```\n///\n/// [`describe_and_run`]: RuntimeMetricsReporterBuilder::describe_and_run\n/// [`with_metrics_transformer`]: RuntimeMetricsReporterBuilder::with_metrics_transformer\n/// [metrics-rs]: metrics\n/// [metrics_exporter_prometheus]: https://docs.rs/metrics_exporter_prometheus\npub struct RuntimeMetricsReporterBuilder {\n    interval: Duration,\n    metrics_transformer: Box<dyn FnMut(&'static str) -> metrics::Key + Send>,\n}\n\nimpl fmt::Debug for RuntimeMetricsReporterBuilder {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        f.debug_struct(\"RuntimeMetricsReporterBuilder\")\n            .field(\"interval\", &self.interval)\n            // skip metrics_transformer field\n            .finish()\n    }\n}\n\nimpl Default for RuntimeMetricsReporterBuilder {\n    fn default() -> Self {\n        RuntimeMetricsReporterBuilder {\n            interval: DEFAULT_METRIC_SAMPLING_INTERVAL,\n            metrics_transformer: Box::new(metrics::Key::from_static_name),\n        }\n    }\n}\n\nimpl RuntimeMetricsReporterBuilder {\n    /// Set the metric sampling interval, default: 30 seconds.\n    ///\n    /// Note that this is the interval on which metrics are *sampled* from\n    /// the Tokio runtime and then set on the [metrics-rs] reporter. Uploading the\n    /// metrics upstream is controlled by the reporter set up in the\n    /// application, and is normally controlled by a different period.\n    ///\n    /// For example, if metrics are exported via Prometheus, that\n    /// normally operates at a pull-based fashion, and the actual collection\n    /// period is controlled by the Prometheus server, which periodically polls the\n    /// application's Prometheus exporter to get the latest value of the metrics.\n    ///\n    /// [metrics-rs]: metrics\n    pub fn with_interval(mut self, interval: Duration) -> Self {\n        self.interval = interval;\n        self\n    }\n\n    /// Set a custom \"metrics transformer\", which is used during `build` to transform the metric\n    /// names into metric keys, for example to add dimensions. The string metric names used by this reporter\n    /// all start with `tokio_`. The default transformer is just [`metrics::Key::from_static_name`]\n    ///\n    /// For example, to attach a dimension named \"application\" with value \"my_app\", and to replace\n    /// `tokio_` with `my_app_`\n    /// ```\n    /// # use metrics::Key;\n    ///\n    /// #[tokio::main]\n    /// async fn main() {\n    ///     metrics_exporter_prometheus::PrometheusBuilder::new()\n    ///         .with_http_uds_listener(\"prometheus.sock\")\n    ///         .install()\n    ///         .unwrap();\n    ///     tokio::task::spawn(\n    ///         tokio_metrics::RuntimeMetricsReporterBuilder::default().with_metrics_transformer(|name| {\n    ///             let name = name.replacen(\"tokio_\", \"my_app_\", 1);\n    ///             Key::from_parts(name, &[(\"application\", \"my_app\")])\n    ///         })\n    ///         .describe_and_run()\n    ///     );\n    /// }\n    /// ```\n    pub fn with_metrics_transformer(\n        mut self,\n        transformer: impl FnMut(&'static str) -> metrics::Key + Send + 'static,\n    ) -> Self {\n        self.metrics_transformer = Box::new(transformer);\n        self\n    }\n\n    /// Build the [`RuntimeMetricsReporter`] for the current Tokio runtime. This function will capture\n    /// the [`Counter`]s, [`Gauge`]s and [`Histogram`]s from the current [metrics-rs] reporter,\n    /// so if you are using [`with_local_recorder`], you should wrap this function and [`describe`] with it.\n    ///\n    /// For example:\n    /// ```\n    /// # use std::sync::Arc;\n    ///\n    /// #[tokio::main]\n    /// async fn main() {\n    ///     let builder = tokio_metrics::RuntimeMetricsReporterBuilder::default();\n    ///     let recorder = Arc::new(metrics_util::debugging::DebuggingRecorder::new());\n    ///     let metrics_reporter = metrics::with_local_recorder(&recorder, || builder.describe().build());\n    ///\n    ///     // no need to wrap `run()`, since the metrics are already captured\n    ///     tokio::task::spawn(metrics_reporter.run());\n    /// }\n    /// ```\n    ///\n    ///\n    /// [`Counter`]: metrics::Counter\n    /// [`Gauge`]: metrics::Counter\n    /// [`Histogram`]: metrics::Counter\n    /// [metrics-rs]: metrics\n    /// [`with_local_recorder`]: metrics::with_local_recorder\n    /// [`describe`]: Self::describe\n    #[must_use = \"reporter does nothing unless run\"]\n    pub fn build(self) -> RuntimeMetricsReporter {\n        self.build_with_monitor(RuntimeMonitor::new(&Handle::current()))\n    }\n\n    /// Build the [`RuntimeMetricsReporter`] with a specific [`RuntimeMonitor`]. This function will capture\n    /// the [`Counter`]s, [`Gauge`]s and [`Histogram`]s from the current [metrics-rs] reporter,\n    /// so if you are using [`with_local_recorder`], you should wrap this function and [`describe`]\n    /// with it.\n    ///\n    /// [`Counter`]: metrics::Counter\n    /// [`Gauge`]: metrics::Counter\n    /// [`Histogram`]: metrics::Counter\n    /// [metrics-rs]: metrics\n    /// [`with_local_recorder`]: metrics::with_local_recorder\n    /// [`describe`]: Self::describe\n    #[must_use = \"reporter does nothing unless run\"]\n    pub fn build_with_monitor(mut self, monitor: RuntimeMonitor) -> RuntimeMetricsReporter {\n        RuntimeMetricsReporter {\n            interval: self.interval,\n            intervals: monitor.intervals(),\n            emitter: RuntimeMetricRefs::capture(&mut self.metrics_transformer),\n        }\n    }\n\n    /// Call [`describe_counter`] etc. to describe the emitted metrics.\n    ///\n    /// Describing metrics makes the reporter attach descriptions and units to them,\n    /// which makes them easier to use. However, some reporters don't support\n    /// describing the same metric name more than once, so it is generally a good\n    /// idea to only call this function once per metric reporter.\n    ///\n    /// [`describe_counter`]: metrics::describe_counter\n    /// [metrics-rs]: metrics\n    pub fn describe(mut self) -> Self {\n        RuntimeMetricRefs::describe(&mut self.metrics_transformer);\n        self\n    }\n\n    /// Runs the reporter (within the returned future), [describing] the metrics beforehand.\n    ///\n    /// Describing metrics makes the reporter attach descriptions and units to them,\n    /// which makes them easier to use. However, some reporters don't support\n    /// describing the same metric name more than once. If you are emitting multiple\n    /// metrics via a single reporter, try to call [`describe`] once and [`run`] for each\n    /// runtime metrics reporter.\n    ///\n    /// ### Working with a custom reporter\n    ///\n    /// If you want to set a local metrics reporter, you shouldn't be calling this method,\n    /// but you should instead call `.describe().build()` within [`with_local_recorder`] and then\n    /// call `run` (see the docs on [`build`]).\n    ///\n    /// [describing]: Self::describe\n    /// [`describe`]: Self::describe\n    /// [`build`]: Self::build.\n    /// [`run`]: RuntimeMetricsReporter::run\n    /// [`with_local_recorder`]: metrics::with_local_recorder\n    pub async fn describe_and_run(self) {\n        self.describe().build().run().await;\n    }\n\n    /// Runs the reporter (within the returned future), not describing the metrics beforehand.\n    ///\n    /// ### Working with a custom reporter\n    ///\n    /// If you want to set a local metrics reporter, you shouldn't be calling this method,\n    /// but you should instead call `.describe().build()` within [`with_local_recorder`] and then\n    /// call [`run`] (see the docs on [`build`]).\n    ///\n    /// [`build`]: Self::build\n    /// [`run`]: RuntimeMetricsReporter::run\n    /// [`with_local_recorder`]: metrics::with_local_recorder\n    pub async fn run_without_describing(self) {\n        self.build().run().await;\n    }\n}\n\n/// Collects metrics from a Tokio runtime and uploads them to [metrics_rs](metrics).\npub struct RuntimeMetricsReporter {\n    interval: Duration,\n    intervals: RuntimeIntervals,\n    emitter: RuntimeMetricRefs,\n}\n\nmetric_refs! {\n    [RuntimeMetricRefs] [elapsed] [RuntimeMetrics] [&tokio::runtime::RuntimeMetrics] {\n        stable {\n            /// The number of worker threads used by the runtime\n            workers_count: Gauge<Count> [],\n            /// The current number of alive tasks in the runtime.\n            live_tasks_count: Gauge<Count> [],\n            /// The number of times worker threads parked\n            max_park_count: Gauge<Count> [],\n            /// The minimum number of times any worker thread parked\n            min_park_count: Gauge<Count> [],\n            /// The number of times worker threads parked\n            total_park_count: Gauge<Count> [],\n            /// The amount of time worker threads were busy\n            total_busy_duration: Counter<Microseconds> [],\n            /// The maximum amount of time a worker thread was busy\n            max_busy_duration: Counter<Microseconds> [],\n            /// The minimum amount of time a worker thread was busy\n            min_busy_duration: Counter<Microseconds> [],\n            /// The number of tasks currently scheduled in the runtime's global queue\n            global_queue_depth: Gauge<Count> [],\n        }\n        stable_derived {\n            /// The ratio of the [`RuntimeMetrics::total_busy_duration`] to the [`RuntimeMetrics::elapsed`].\n            busy_ratio: Gauge<Percent> [],\n        }\n        unstable {\n            /// The average duration of a single invocation of poll on a task\n            mean_poll_duration: Gauge<Microseconds> [],\n            /// The average duration of a single invocation of poll on a task on the worker with the lowest value\n            mean_poll_duration_worker_min: Gauge<Microseconds> [],\n            /// The average duration of a single invocation of poll on a task on the worker with the highest value\n            mean_poll_duration_worker_max: Gauge<Microseconds> [],\n            /// A histogram of task polls since the previous probe grouped by poll times\n            poll_time_histogram: PollTimeHistogram<Microseconds> [],\n            /// The number of times worker threads unparked but performed no work before parking again\n            total_noop_count: Counter<Count> [],\n            /// The maximum number of times any worker thread unparked but performed no work before parking again\n            max_noop_count: Counter<Count> [],\n            /// The minimum number of times any worker thread unparked but performed no work before parking again\n            min_noop_count: Counter<Count> [],\n            /// The number of tasks worker threads stole from another worker thread\n            total_steal_count: Counter<Count> [],\n            /// The maximum number of tasks any worker thread stole from another worker thread.\n            max_steal_count: Counter<Count> [],\n            /// The minimum number of tasks any worker thread stole from another worker thread\n            min_steal_count: Counter<Count> [],\n            /// The number of times worker threads stole tasks from another worker thread\n            total_steal_operations: Counter<Count> [],\n            /// The maximum number of times any worker thread stole tasks from another worker thread\n            max_steal_operations: Counter<Count> [],\n            /// The minimum number of times any worker thread stole tasks from another worker thread\n            min_steal_operations: Counter<Count> [],\n            /// The number of tasks scheduled from **outside** of the runtime\n            num_remote_schedules: Counter<Count> [],\n            /// The number of tasks scheduled from worker threads\n            total_local_schedule_count: Counter<Count> [],\n            /// The maximum number of tasks scheduled from any one worker thread\n            max_local_schedule_count: Counter<Count> [],\n            /// The minimum number of tasks scheduled from any one worker thread\n            min_local_schedule_count: Counter<Count> [],\n            /// The number of times worker threads saturated their local queues\n            total_overflow_count: Counter<Count> [],\n            /// The maximum number of times any one worker saturated its local queue\n            max_overflow_count: Counter<Count> [],\n            /// The minimum number of times any one worker saturated its local queue\n            min_overflow_count: Counter<Count> [],\n            /// The number of tasks that have been polled across all worker threads\n            total_polls_count: Counter<Count> [],\n            /// The maximum number of tasks that have been polled in any worker thread\n            max_polls_count: Counter<Count> [],\n            /// The minimum number of tasks that have been polled in any worker thread\n            min_polls_count: Counter<Count> [],\n            /// The total number of tasks currently scheduled in workers' local queues\n            total_local_queue_depth: Gauge<Count> [],\n            /// The maximum number of tasks currently scheduled any worker's local queue\n            max_local_queue_depth: Gauge<Count> [],\n            /// The minimum number of tasks currently scheduled any worker's local queue\n            min_local_queue_depth: Gauge<Count> [],\n            /// The number of tasks currently waiting to be executed in the runtime's blocking threadpool.\n            blocking_queue_depth: Gauge<Count> [],\n            /// The number of additional threads spawned by the runtime.\n            blocking_threads_count: Gauge<Count> [],\n            /// The number of idle threads, which have spawned by the runtime for `spawn_blocking` calls.\n            idle_blocking_threads_count: Gauge<Count> [],\n            /// Returns the number of times that tasks have been forced to yield back to the scheduler after exhausting their task budgets\n            budget_forced_yield_count: Counter<Count> [],\n            /// Returns the number of ready events processed by the runtime’s I/O driver\n            io_driver_ready_count: Counter<Count> [],\n        }\n        unstable_derived {\n            /// The ratio of the [`RuntimeMetrics::total_polls_count`] to the [`RuntimeMetrics::total_noop_count`].\n            mean_polls_per_park: Gauge<Percent> [],\n        }\n    }\n}\n\nimpl fmt::Debug for RuntimeMetricsReporter {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        f.debug_struct(\"RuntimeMetricsReporter\")\n            .field(\"interval\", &self.interval)\n            // skip intervals field\n            .finish()\n    }\n}\n\nimpl RuntimeMetricsReporter {\n    /// Collect and publish metrics once to the configured [metrics_rs](metrics) reporter.\n    pub fn run_once(&mut self) {\n        let metrics = self\n            .intervals\n            .next()\n            .expect(\"RuntimeIntervals::next never returns None\");\n        self.emitter.emit(metrics, &self.intervals.runtime);\n    }\n\n    /// Collect and publish metrics periodically to the configured [metrics_rs](metrics) reporter.\n    ///\n    /// You probably want to run this within its own task (using [`tokio::task::spawn`])\n    pub async fn run(mut self) {\n        loop {\n            self.run_once();\n            tokio::time::sleep(self.interval).await;\n        }\n    }\n}\n"
  },
  {
    "path": "src/runtime/poll_time_histogram.rs",
    "content": "use std::time::Duration;\n\n/// A histogram of task poll durations, pairing each bucket's count with its\n/// time range from the runtime configuration.\n///\n/// This type is returned as part of [`RuntimeMetrics`][super::RuntimeMetrics]\n/// when the runtime has poll time histograms enabled via\n/// [`enable_metrics_poll_time_histogram`][tokio::runtime::Builder::enable_metrics_poll_time_histogram].\n///\n/// Each bucket contains the [`Duration`] range configured for that bucket and\n/// the count of task polls that fell into that range during the sampling\n/// interval.\n#[derive(Debug, Clone, Default)]\n#[non_exhaustive]\npub struct PollTimeHistogram {\n    buckets: Vec<HistogramBucket>,\n}\n\nimpl PollTimeHistogram {\n    pub(crate) fn new(buckets: Vec<HistogramBucket>) -> Self {\n        Self { buckets }\n    }\n\n    /// Returns the histogram buckets.\n    pub fn buckets(&self) -> &[HistogramBucket] {\n        &self.buckets\n    }\n\n    pub(crate) fn buckets_mut(&mut self) -> &mut [HistogramBucket] {\n        &mut self.buckets\n    }\n\n    /// Returns just the bucket counts as a `Vec<u64>`.\n    pub fn as_counts(&self) -> Vec<u64> {\n        self.buckets.iter().map(|b| b.count).collect()\n    }\n}\n\n/// A single bucket in a [`PollTimeHistogram`].\n#[derive(Debug, Clone, Copy, Default)]\n#[non_exhaustive]\npub struct HistogramBucket {\n    range_start: Duration,\n    range_end: Duration,\n    count: u64,\n}\n\nimpl HistogramBucket {\n    pub(crate) fn new(range_start: Duration, range_end: Duration, count: u64) -> Self {\n        Self { range_start, range_end, count }\n    }\n\n    /// The start of the time range for this bucket (inclusive).\n    pub fn range_start(&self) -> Duration {\n        self.range_start\n    }\n\n    /// The end of the time range for this bucket (exclusive).\n    pub fn range_end(&self) -> Duration {\n        self.range_end\n    }\n\n    /// Returns the poll count for this bucket during the interval.\n    pub fn count(&self) -> u64 {\n        self.count\n    }\n\n    /// Adds to the count of this bucket.\n    pub(crate) fn add_count(&mut self, delta: u64) {\n        self.count = self.count.saturating_add(delta);\n    }\n}\n\n#[cfg(feature = \"metrique-integration\")]\nimpl metrique::writer::Value for PollTimeHistogram {\n    fn write(&self, writer: impl metrique::writer::ValueWriter) {\n        use metrique::writer::unit::NegativeScale;\n        use metrique::writer::{MetricFlags, Observation, Unit};\n\n        // Use the bucket midpoint as the representative value. \n        // Tokio's last bucket has range_end of Duration::from_nanos(u64::MAX),\n        // so use range_start for it since the midpoint wouldn't be representative.\n        const LAST_BUCKET_END: Duration = Duration::from_nanos(u64::MAX);\n        writer.metric(\n            self.buckets.iter().filter(|b| b.count > 0).map(|b| {\n                let value_us = if b.range_end == LAST_BUCKET_END {\n                    b.range_start.as_micros() as f64\n                } else {\n                    #[allow(clippy::incompatible_msrv)] // metrique-integration requires 1.89+\n                    f64::midpoint(\n                        b.range_start.as_micros() as f64,\n                        b.range_end.as_micros() as f64,\n                    )\n                };\n                Observation::Repeated {\n                    total: value_us * b.count as f64,\n                    occurrences: b.count,\n                }\n            }),\n            Unit::Second(NegativeScale::Micro),\n            [],\n            MetricFlags::empty(),\n        );\n    }\n}\n\n#[cfg(feature = \"metrique-integration\")]\nimpl metrique::CloseValue for PollTimeHistogram {\n    type Closed = Self;\n\n    fn close(self) -> Self {\n        self\n    }\n}\n\n#[cfg(all(test, feature = \"metrique-integration\"))]\nmod tests {\n    use super::*;\n    use crate::runtime::RuntimeMetrics;\n    use metrique::CloseValue;\n    use metrique::test_util::test_metric;\n\n    #[test]\n    fn poll_time_histogram_close_value() {\n        let hist = PollTimeHistogram::new(vec![\n            HistogramBucket::new(Duration::from_micros(0), Duration::from_micros(100), 5),\n            HistogramBucket::new(Duration::from_micros(100), Duration::from_micros(200), 0),\n            HistogramBucket::new(Duration::from_micros(200), Duration::from_micros(500), 3),\n        ]);\n\n        let closed = hist.close();\n        let buckets = closed.buckets();\n        assert_eq!(buckets.len(), 3);\n        assert_eq!(buckets[0].count(), 5);\n        assert_eq!(buckets[0].range_start(), Duration::from_micros(0));\n        assert_eq!(buckets[0].range_end(), Duration::from_micros(100));\n        assert_eq!(buckets[1].count(), 0);\n        assert_eq!(buckets[2].count(), 3);\n        assert_eq!(buckets[2].range_start(), Duration::from_micros(200));\n        assert_eq!(buckets[2].range_end(), Duration::from_micros(500));\n    }\n\n    #[test]\n    fn poll_time_histogram_last_bucket_uses_range_start() {\n        let last_bucket_start = Duration::from_millis(500);\n        let metrics = RuntimeMetrics {\n            poll_time_histogram: PollTimeHistogram::new(vec![\n                HistogramBucket::new(Duration::from_micros(0), Duration::from_micros(100), 0),\n                HistogramBucket::new(last_bucket_start, Duration::from_nanos(u64::MAX), 2),\n            ]),\n            ..Default::default()\n        };\n\n        let entry = test_metric(metrics);\n        let hist = &entry.metrics[\"poll_time_histogram\"];\n        assert_eq!(hist.distribution.len(), 1);\n\n        match hist.distribution[0] {\n            metrique::writer::Observation::Repeated { total, occurrences } => {\n                assert_eq!(occurrences, 2);\n                let expected = last_bucket_start.as_micros() as f64 * 2.0;\n                assert!((total - expected).abs() < 0.01);\n            }\n            other => panic!(\"expected Repeated, got {other:?}\"),\n        }\n    }\n}\n"
  },
  {
    "path": "src/runtime.rs",
    "content": "use crate::derived_metrics::derived_metrics;\n#[cfg(tokio_unstable)]\nuse std::ops::Range;\nuse std::time::{Duration, Instant};\nuse tokio::runtime;\n\n#[cfg(tokio_unstable)]\nmod poll_time_histogram;\n#[cfg(tokio_unstable)]\npub use poll_time_histogram::{HistogramBucket, PollTimeHistogram};\n\n#[cfg(feature = \"metrics-rs-integration\")]\npub(crate) mod metrics_rs_integration;\n\n/// Monitors key metrics of the tokio runtime.\n///\n/// ### Usage\n/// ```\n/// use std::time::Duration;\n/// use tokio_metrics::RuntimeMonitor;\n///\n/// #[tokio::main]\n/// async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {\n///     let handle = tokio::runtime::Handle::current();\n///\n///     // print runtime metrics every 500ms\n///     {\n///         let runtime_monitor = RuntimeMonitor::new(&handle);\n///         tokio::spawn(async move {\n///             for interval in runtime_monitor.intervals() {\n///                 // pretty-print the metric interval\n///                 println!(\"{:?}\", interval);\n///                 // wait 500ms\n///                 tokio::time::sleep(Duration::from_millis(500)).await;\n///             }\n///         });\n///     }\n///\n///     // await some tasks\n///     tokio::join![\n///         do_work(),\n///         do_work(),\n///         do_work(),\n///     ];\n///\n///     Ok(())\n/// }\n///\n/// async fn do_work() {\n///     for _ in 0..25 {\n///         tokio::task::yield_now().await;\n///         tokio::time::sleep(Duration::from_millis(100)).await;\n///     }\n/// }\n/// ```\n#[derive(Debug)]\npub struct RuntimeMonitor {\n    /// Handle to the runtime\n    runtime: runtime::RuntimeMetrics,\n}\n\nmacro_rules! define_runtime_metrics {\n    (\n    stable {\n        $(\n            $(#[$($attributes:tt)*])*\n            $vis:vis $name:ident: $ty:ty\n        ),*\n        $(,)?\n    }\n    unstable {\n        $(\n            $(#[$($unstable_attributes:tt)*])*\n            $unstable_vis:vis $unstable_name:ident: $unstable_ty:ty\n        ),*\n        $(,)?\n    }\n    ) => {\n        /// Key runtime metrics.\n        #[non_exhaustive]\n        #[cfg_attr(feature = \"metrique-integration\", metrique::unit_of_work::metrics(subfield_owned))]\n        #[derive(Default, Debug, Clone)]\n        pub struct RuntimeMetrics {\n            $(\n                $(#[$($attributes)*])*\n                #[cfg_attr(docsrs, doc(cfg(feature = \"rt\")))]\n                $vis $name: $ty,\n            )*\n            $(\n                $(#[$($unstable_attributes)*])*\n                #[cfg(tokio_unstable)]\n                #[cfg_attr(docsrs, doc(cfg(all(feature = \"rt\", tokio_unstable))))]\n                $unstable_vis $unstable_name: $unstable_ty,\n            )*\n        }\n    };\n}\n\ndefine_runtime_metrics! {\n    stable {\n        /// The number of worker threads used by the runtime.\n        ///\n        /// This metric is static for a runtime.\n        ///\n        /// This metric is always equal to [`tokio::runtime::RuntimeMetrics::num_workers`].\n        /// When using the `current_thread` runtime, the return value is always `1`.\n        ///\n        /// The number of workers is set by configuring\n        /// [`worker_threads`][`tokio::runtime::Builder::worker_threads`] with\n        /// [`tokio::runtime::Builder`], or by parameterizing [`tokio::main`].\n        ///\n        /// ##### Examples\n        /// In the below example, the number of workers is set by parameterizing [`tokio::main`]:\n        /// ```\n        /// use tokio::runtime::Handle;\n        ///\n        /// #[tokio::main(flavor = \"multi_thread\", worker_threads = 10)]\n        /// async fn main() {\n        ///     let handle = tokio::runtime::Handle::current();\n        ///     let monitor = tokio_metrics::RuntimeMonitor::new(&handle);\n        ///     let mut intervals = monitor.intervals();\n        ///     let mut next_interval = || intervals.next().unwrap();\n        ///\n        ///     assert_eq!(next_interval().workers_count, 10);\n        /// }\n        /// ```\n        ///\n        /// [`tokio::main`]: https://docs.rs/tokio/latest/tokio/attr.main.html\n        ///\n        /// When using the `current_thread` runtime, the return value is always `1`; e.g.:\n        /// ```\n        /// use tokio::runtime::Handle;\n        ///\n        /// #[tokio::main(flavor = \"current_thread\")]\n        /// async fn main() {\n        ///     let handle = tokio::runtime::Handle::current();\n        ///     let monitor = tokio_metrics::RuntimeMonitor::new(&handle);\n        ///     let mut intervals = monitor.intervals();\n        ///     let mut next_interval = || intervals.next().unwrap();\n        ///\n        ///     assert_eq!(next_interval().workers_count, 1);\n        /// }\n        /// ```\n        ///\n        /// This metric is always equal to [`tokio::runtime::RuntimeMetrics::num_workers`]; e.g.:\n        /// ```\n        /// use tokio::runtime::Handle;\n        ///\n        /// #[tokio::main]\n        /// async fn main() {\n        ///     let handle = Handle::current();\n        ///     let monitor = tokio_metrics::RuntimeMonitor::new(&handle);\n        ///     let mut intervals = monitor.intervals();\n        ///     let mut next_interval = || intervals.next().unwrap();\n        ///\n        ///     assert_eq!(next_interval().workers_count, handle.metrics().num_workers());\n        /// }\n        /// ```\n        pub workers_count: usize,\n\n        /// The current number of alive tasks in the runtime.\n        ///\n        /// ##### Definition\n        /// This metric is derived from [`tokio::runtime::RuntimeMetrics::num_alive_tasks`].\n        pub live_tasks_count: usize,\n\n        /// The number of times worker threads parked.\n        ///\n        /// The worker park count increases by one each time the worker parks the thread waiting for\n        /// new inbound events to process. This usually means the worker has processed all pending work\n        /// and is currently idle.\n        ///\n        /// ##### Definition\n        /// This metric is derived from the sum of [`tokio::runtime::RuntimeMetrics::worker_park_count`]\n        /// across all worker threads.\n        ///\n        /// ##### See also\n        /// - [`RuntimeMetrics::max_park_count`]\n        /// - [`RuntimeMetrics::min_park_count`]\n        ///\n        /// ##### Examples\n        /// ```\n        /// #[tokio::main(flavor = \"multi_thread\", worker_threads = 2)]\n        /// async fn main() {\n        ///     let handle = tokio::runtime::Handle::current();\n        ///     let monitor = tokio_metrics::RuntimeMonitor::new(&handle);\n        ///     let mut intervals = monitor.intervals();\n        ///     let mut next_interval = || intervals.next().unwrap();\n        ///\n        ///     let interval = next_interval(); // end of interval 1\n        ///     assert_eq!(interval.total_park_count, 0);\n        ///\n        ///     induce_parks().await;\n        ///\n        ///     let interval = next_interval(); // end of interval 2\n        ///     assert!(interval.total_park_count >= 1); // usually 1 or 2 parks\n        /// }\n        ///\n        /// async fn induce_parks() {\n        ///     let _ = tokio::time::timeout(std::time::Duration::ZERO, async {\n        ///         loop { tokio::task::yield_now().await; }\n        ///     }).await;\n        /// }\n        /// ```\n        pub total_park_count: u64,\n\n        /// The maximum number of times any worker thread parked.\n        ///\n        /// ##### Definition\n        /// This metric is derived from the maximum of\n        /// [`tokio::runtime::RuntimeMetrics::worker_park_count`] across all worker threads.\n        ///\n        /// ##### See also\n        /// - [`RuntimeMetrics::total_park_count`]\n        /// - [`RuntimeMetrics::min_park_count`]\n        pub max_park_count: u64,\n\n        /// The minimum number of times any worker thread parked.\n        ///\n        /// ##### Definition\n        /// This metric is derived from the maximum of\n        /// [`tokio::runtime::RuntimeMetrics::worker_park_count`] across all worker threads.\n        ///\n        /// ##### See also\n        /// - [`RuntimeMetrics::total_park_count`]\n        /// - [`RuntimeMetrics::max_park_count`]\n        pub min_park_count: u64,\n\n        /// The amount of time worker threads were busy.\n        ///\n        /// The worker busy duration increases whenever the worker is spending time processing work.\n        /// Using this value can indicate the total load of workers.\n        ///\n        /// ##### Definition\n        /// This metric is derived from the sum of\n        /// [`tokio::runtime::RuntimeMetrics::worker_total_busy_duration`] across all worker threads.\n        ///\n        /// ##### See also\n        /// - [`RuntimeMetrics::min_busy_duration`]\n        /// - [`RuntimeMetrics::max_busy_duration`]\n        ///\n        /// ##### Examples\n        /// In the below example, tasks spend a total of 3s busy:\n        /// ```\n        /// use tokio::time::Duration;\n        ///\n        /// fn main() {\n        ///     let start = tokio::time::Instant::now();\n        ///\n        ///     let rt = tokio::runtime::Builder::new_current_thread()\n        ///         .enable_all()\n        ///         .build()\n        ///         .unwrap();\n        ///\n        ///     let handle = rt.handle();\n        ///     let monitor = tokio_metrics::RuntimeMonitor::new(&handle);\n        ///     let mut intervals = monitor.intervals();\n        ///     let mut next_interval = || intervals.next().unwrap();\n        ///\n        ///     let delay_1s = Duration::from_secs(1);\n        ///     let delay_3s = Duration::from_secs(3);\n        ///\n        ///     rt.block_on(async {\n        ///         // keep the main task busy for 1s\n        ///         spin_for(delay_1s);\n        ///\n        ///         // spawn a task and keep it busy for 2s\n        ///         let _ = tokio::spawn(async move {\n        ///             spin_for(delay_3s);\n        ///         }).await;\n        ///     });\n        ///\n        ///     // flush metrics\n        ///     drop(rt);\n        ///\n        ///     let elapsed = start.elapsed();\n        ///\n        ///     let interval =  next_interval(); // end of interval 2\n        ///     assert!(interval.total_busy_duration >= delay_1s + delay_3s);\n        ///     assert!(interval.total_busy_duration <= elapsed);\n        /// }\n        ///\n        /// fn time<F>(task: F) -> Duration\n        /// where\n        ///     F: Fn() -> ()\n        /// {\n        ///     let start = tokio::time::Instant::now();\n        ///     task();\n        ///     start.elapsed()\n        /// }\n        ///\n        /// /// Block the current thread for a given `duration`.\n        /// fn spin_for(duration: Duration) {\n        ///     let start = tokio::time::Instant::now();\n        ///     while start.elapsed() <= duration {}\n        /// }\n        /// ```\n        ///\n        /// Busy times may not accumulate as the above example suggests (FIXME: Why?); e.g., if we\n        /// remove the three second delay, the time spent busy falls to mere microseconds:\n        /// ```should_panic\n        /// use tokio::time::Duration;\n        ///\n        /// fn main() {\n        ///     let rt = tokio::runtime::Builder::new_current_thread()\n        ///         .enable_all()\n        ///         .build()\n        ///         .unwrap();\n        ///\n        ///     let handle = rt.handle();\n        ///     let monitor = tokio_metrics::RuntimeMonitor::new(&handle);\n        ///     let mut intervals = monitor.intervals();\n        ///     let mut next_interval = || intervals.next().unwrap();\n        ///\n        ///     let delay_1s = Duration::from_secs(1);\n        ///\n        ///     let elapsed = time(|| rt.block_on(async {\n        ///         // keep the main task busy for 1s\n        ///         spin_for(delay_1s);\n        ///     }));\n        ///\n        ///     // flush metrics\n        ///     drop(rt);\n        ///\n        ///     let interval =  next_interval(); // end of interval 2\n        ///     assert!(interval.total_busy_duration >= delay_1s); // FAIL\n        ///     assert!(interval.total_busy_duration <= elapsed);\n        /// }\n        ///\n        /// fn time<F>(task: F) -> Duration\n        /// where\n        ///     F: Fn() -> ()\n        /// {\n        ///     let start = tokio::time::Instant::now();\n        ///     task();\n        ///     start.elapsed()\n        /// }\n        ///\n        /// /// Block the current thread for a given `duration`.\n        /// fn spin_for(duration: Duration) {\n        ///     let start = tokio::time::Instant::now();\n        ///     while start.elapsed() <= duration {}\n        /// }\n        /// ```\n        pub total_busy_duration: Duration,\n\n        /// The maximum amount of time a worker thread was busy.\n        ///\n        /// ##### Definition\n        /// This metric is derived from the maximum of\n        /// [`tokio::runtime::RuntimeMetrics::worker_total_busy_duration`] across all worker threads.\n        ///\n        /// ##### See also\n        /// - [`RuntimeMetrics::total_busy_duration`]\n        /// - [`RuntimeMetrics::min_busy_duration`]\n        pub max_busy_duration: Duration,\n\n        /// The minimum amount of time a worker thread was busy.\n        ///\n        /// ##### Definition\n        /// This metric is derived from the minimum of\n        /// [`tokio::runtime::RuntimeMetrics::worker_total_busy_duration`] across all worker threads.\n        ///\n        /// ##### See also\n        /// - [`RuntimeMetrics::total_busy_duration`]\n        /// - [`RuntimeMetrics::max_busy_duration`]\n        pub min_busy_duration: Duration,\n\n        /// The number of tasks currently scheduled in the runtime's global queue.\n        ///\n        /// Tasks that are spawned or notified from a non-runtime thread are scheduled using the\n        /// runtime's global queue. This metric returns the **current** number of tasks pending in\n        /// the global queue. As such, the returned value may increase or decrease as new tasks are\n        /// scheduled and processed.\n        ///\n        /// ##### Definition\n        /// This metric is derived from [`tokio::runtime::RuntimeMetrics::global_queue_depth`].\n        ///\n        /// ##### Example\n        /// ```\n        /// # let current_thread = tokio::runtime::Builder::new_current_thread()\n        /// #     .enable_all()\n        /// #     .build()\n        /// #     .unwrap();\n        /// #\n        /// # let multi_thread = tokio::runtime::Builder::new_multi_thread()\n        /// #     .worker_threads(2)\n        /// #     .enable_all()\n        /// #     .build()\n        /// #     .unwrap();\n        /// #\n        /// # for runtime in [current_thread, multi_thread] {\n        /// let handle = runtime.handle().clone();\n        /// let monitor = tokio_metrics::RuntimeMonitor::new(&handle);\n        /// let mut intervals = monitor.intervals();\n        /// let mut next_interval = || intervals.next().unwrap();\n        ///\n        /// let interval = next_interval(); // end of interval 1\n        /// # #[cfg(tokio_unstable)]\n        /// assert_eq!(interval.num_remote_schedules, 0);\n        ///\n        /// // spawn a system thread outside of the runtime\n        /// std::thread::spawn(move || {\n        ///     // spawn two tasks from this non-runtime thread\n        ///     handle.spawn(async {});\n        ///     handle.spawn(async {});\n        /// }).join().unwrap();\n        ///\n        /// // flush metrics\n        /// drop(runtime);\n        ///\n        /// let interval = next_interval(); // end of interval 2\n        /// # #[cfg(tokio_unstable)]\n        /// assert_eq!(interval.num_remote_schedules, 2);\n        /// # }\n        /// ```\n        pub global_queue_depth: usize,\n\n        /// Total amount of time elapsed since observing runtime metrics.\n        pub elapsed: Duration,\n    }\n    unstable {\n        /// The average duration of a single invocation of poll on a task.\n        ///\n        /// This average is an exponentially-weighted moving average of the duration\n        /// of task polls on all runtime workers.\n        ///\n        /// ##### Examples\n        /// ```\n        /// #[tokio::main(flavor = \"multi_thread\", worker_threads = 2)]\n        /// async fn main() {\n        ///     let handle = tokio::runtime::Handle::current();\n        ///     let monitor = tokio_metrics::RuntimeMonitor::new(&handle);\n        ///     let mut intervals = monitor.intervals();\n        ///     let mut next_interval = || intervals.next().unwrap();\n        ///\n        ///     let interval = next_interval();\n        ///     println!(\"mean task poll duration is {:?}\", interval.mean_poll_duration);\n        /// }\n        /// ```\n        pub mean_poll_duration: Duration,\n\n        /// The average duration of a single invocation of poll on a task on the\n        /// worker with the lowest value.\n        ///\n        /// This average is an exponentially-weighted moving average of the duration\n        /// of task polls on the runtime worker with the lowest value.\n        ///\n        /// ##### Examples\n        /// ```\n        /// #[tokio::main(flavor = \"multi_thread\", worker_threads = 2)]\n        /// async fn main() {\n        ///     let handle = tokio::runtime::Handle::current();\n        ///     let monitor = tokio_metrics::RuntimeMonitor::new(&handle);\n        ///     let mut intervals = monitor.intervals();\n        ///     let mut next_interval = || intervals.next().unwrap();\n        ///\n        ///     let interval = next_interval();\n        ///     println!(\"min mean task poll duration is {:?}\", interval.mean_poll_duration_worker_min);\n        /// }\n        /// ```\n        pub mean_poll_duration_worker_min: Duration,\n\n        /// The average duration of a single invocation of poll on a task on the\n        /// worker with the highest value.\n        ///\n        /// This average is an exponentially-weighted moving average of the duration\n        /// of task polls on the runtime worker with the highest value.\n        ///\n        /// ##### Examples\n        /// ```\n        /// #[tokio::main(flavor = \"multi_thread\", worker_threads = 2)]\n        /// async fn main() {\n        ///     let handle = tokio::runtime::Handle::current();\n        ///     let monitor = tokio_metrics::RuntimeMonitor::new(&handle);\n        ///     let mut intervals = monitor.intervals();\n        ///     let mut next_interval = || intervals.next().unwrap();\n        ///\n        ///     let interval = next_interval();\n        ///     println!(\"max mean task poll duration is {:?}\", interval.mean_poll_duration_worker_max);\n        /// }\n        /// ```\n        pub mean_poll_duration_worker_max: Duration,\n\n        /// A histogram of task polls since the previous probe grouped by poll\n        /// times.\n        ///\n        /// Each bucket contains the configured [`Duration`] range and the count\n        /// of task polls that fell into that range during the interval. Use\n        /// [`PollTimeHistogram::as_counts`] to get just the raw counts as a\n        /// `Vec<u64>`.\n        ///\n        /// This metric must be explicitly enabled when creating the runtime with\n        /// [`enable_metrics_poll_time_histogram`][tokio::runtime::Builder::enable_metrics_poll_time_histogram].\n        /// Bucket sizes are fixed and configured at the runtime level. See\n        /// configuration options on\n        /// [`runtime::Builder`][tokio::runtime::Builder::enable_metrics_poll_time_histogram].\n        ///\n        /// ##### Examples\n        /// ```\n        /// use tokio::runtime::HistogramConfiguration;\n        /// use std::time::Duration;\n        ///\n        /// let config = HistogramConfiguration::linear(Duration::from_micros(50), 12);\n        ///\n        /// let rt = tokio::runtime::Builder::new_multi_thread()\n        ///     .enable_metrics_poll_time_histogram()\n        ///     .metrics_poll_time_histogram_configuration(config)\n        ///     .build()\n        ///     .unwrap();\n        ///\n        /// rt.block_on(async {\n        ///     let handle = tokio::runtime::Handle::current();\n        ///     let monitor = tokio_metrics::RuntimeMonitor::new(&handle);\n        ///     let mut intervals = monitor.intervals();\n        ///     let mut next_interval = || intervals.next().unwrap();\n        ///\n        ///     let interval = next_interval();\n        ///     for bucket in interval.poll_time_histogram.buckets() {\n        ///         println!(\"{:?}..{:?} => {} polls\", bucket.range_start(), bucket.range_end(), bucket.count());\n        ///     }\n        /// });\n        /// ```\n        pub poll_time_histogram: PollTimeHistogram,\n\n        /// The number of times worker threads unparked but performed no work before parking again.\n        ///\n        /// The worker no-op count increases by one each time the worker unparks the thread but finds\n        /// no new work and goes back to sleep. This indicates a false-positive wake up.\n        ///\n        /// ##### Definition\n        /// This metric is derived from the sum of [`tokio::runtime::RuntimeMetrics::worker_noop_count`]\n        /// across all worker threads.\n        ///\n        /// ##### Examples\n        /// Unfortunately, there isn't a great way to reliably induce no-op parks, as they occur as\n        /// false-positive events under concurrency.\n        ///\n        /// The below example triggers fewer than two parks in the single-threaded runtime:\n        /// ```\n        /// #[tokio::main(flavor = \"current_thread\")]\n        /// async fn main() {\n        ///     let handle = tokio::runtime::Handle::current();\n        ///     let monitor = tokio_metrics::RuntimeMonitor::new(&handle);\n        ///     let mut intervals = monitor.intervals();\n        ///     let mut next_interval = || intervals.next().unwrap();\n        ///\n        ///     assert_eq!(next_interval().total_park_count, 0);\n        ///\n        ///     async {\n        ///         tokio::time::sleep(std::time::Duration::from_millis(1)).await;\n        ///     }.await;\n        ///\n        ///     assert!(next_interval().total_park_count > 0);\n        /// }\n        /// ```\n        ///\n        /// The below example triggers fewer than two parks in the multi-threaded runtime:\n        /// ```\n        /// #[tokio::main(flavor = \"multi_thread\")]\n        /// async fn main() {\n        ///     let handle = tokio::runtime::Handle::current();\n        ///     let monitor = tokio_metrics::RuntimeMonitor::new(&handle);\n        ///     let mut intervals = monitor.intervals();\n        ///     let mut next_interval = || intervals.next().unwrap();\n        ///\n        ///     async {\n        ///         tokio::time::sleep(std::time::Duration::from_millis(1)).await;\n        ///     }.await;\n        ///\n        ///     assert!(next_interval().total_noop_count > 0);\n        /// }\n        /// ```\n        pub total_noop_count: u64,\n\n        /// The maximum number of times any worker thread unparked but performed no work before parking\n        /// again.\n        ///\n        /// ##### Definition\n        /// This metric is derived from the maximum of\n        /// [`tokio::runtime::RuntimeMetrics::worker_noop_count`] across all worker threads.\n        ///\n        /// ##### See also\n        /// - [`RuntimeMetrics::total_noop_count`]\n        /// - [`RuntimeMetrics::min_noop_count`]\n        pub max_noop_count: u64,\n\n        /// The minimum number of times any worker thread unparked but performed no work before parking\n        /// again.\n        ///\n        /// ##### Definition\n        /// This metric is derived from the minimum of\n        /// [`tokio::runtime::RuntimeMetrics::worker_noop_count`] across all worker threads.\n        ///\n        /// ##### See also\n        /// - [`RuntimeMetrics::total_noop_count`]\n        /// - [`RuntimeMetrics::max_noop_count`]\n        pub min_noop_count: u64,\n\n        /// The number of tasks worker threads stole from another worker thread.\n        ///\n        /// The worker steal count increases by the amount of stolen tasks each time the worker\n        /// has processed its scheduled queue and successfully steals more pending tasks from another\n        /// worker.\n        ///\n        /// This metric only applies to the **multi-threaded** runtime and will always return `0` when\n        /// using the current thread runtime.\n        ///\n        /// ##### Definition\n        /// This metric is derived from the sum of [`tokio::runtime::RuntimeMetrics::worker_steal_count`] for\n        /// all worker threads.\n        ///\n        /// ##### See also\n        /// - [`RuntimeMetrics::min_steal_count`]\n        /// - [`RuntimeMetrics::max_steal_count`]\n        ///\n        /// ##### Examples\n        /// In the below example, a blocking channel is used to backup one worker thread:\n        /// ```\n        /// #[tokio::main(flavor = \"multi_thread\", worker_threads = 2)]\n        /// async fn main() {\n        ///     let handle = tokio::runtime::Handle::current();\n        ///     let monitor = tokio_metrics::RuntimeMonitor::new(&handle);\n        ///     let mut intervals = monitor.intervals();\n        ///     let mut next_interval = || intervals.next().unwrap();\n        ///\n        ///     let interval = next_interval(); // end of first sampling interval\n        ///     assert_eq!(interval.total_steal_count, 0);\n        ///     assert_eq!(interval.min_steal_count, 0);\n        ///     assert_eq!(interval.max_steal_count, 0);\n        ///\n        ///     // induce a steal\n        ///     async {\n        ///         let (tx, rx) = std::sync::mpsc::channel();\n        ///         // Move to the runtime.\n        ///         tokio::spawn(async move {\n        ///             // Spawn the task that sends to the channel\n        ///             tokio::spawn(async move {\n        ///                 tx.send(()).unwrap();\n        ///             });\n        ///             // Spawn a task that bumps the previous task out of the \"next\n        ///             // scheduled\" slot.\n        ///             tokio::spawn(async {});\n        ///             // Blocking receive on the channel.\n        ///             rx.recv().unwrap();\n        ///             flush_metrics().await;\n        ///         }).await.unwrap();\n        ///         flush_metrics().await;\n        ///     }.await;\n        ///\n        ///     let interval = { flush_metrics().await; next_interval() }; // end of interval 2\n        ///     println!(\"total={}; min={}; max={}\", interval.total_steal_count, interval.min_steal_count, interval.max_steal_count);\n        ///\n        ///     let interval = { flush_metrics().await; next_interval() }; // end of interval 3\n        ///     println!(\"total={}; min={}; max={}\", interval.total_steal_count, interval.min_steal_count, interval.max_steal_count);\n        /// }\n        ///\n        /// async fn flush_metrics() {\n        ///     let _ = tokio::time::sleep(std::time::Duration::ZERO).await;\n        /// }\n        /// ```\n        pub total_steal_count: u64,\n\n        /// The maximum number of tasks any worker thread stole from another worker thread.\n        ///\n        /// ##### Definition\n        /// This metric is derived from the maximum of [`tokio::runtime::RuntimeMetrics::worker_steal_count`]\n        /// across all worker threads.\n        ///\n        /// ##### See also\n        /// - [`RuntimeMetrics::total_steal_count`]\n        /// - [`RuntimeMetrics::min_steal_count`]\n        pub max_steal_count: u64,\n\n        /// The minimum number of tasks any worker thread stole from another worker thread.\n        ///\n        /// ##### Definition\n        /// This metric is derived from the minimum of [`tokio::runtime::RuntimeMetrics::worker_steal_count`]\n        /// across all worker threads.\n        ///\n        /// ##### See also\n        /// - [`RuntimeMetrics::total_steal_count`]\n        /// - [`RuntimeMetrics::max_steal_count`]\n        pub min_steal_count: u64,\n\n        /// The number of times worker threads stole tasks from another worker thread.\n        ///\n        /// The worker steal operations increases by one each time the worker has processed its\n        /// scheduled queue and successfully steals more pending tasks from another worker.\n        ///\n        /// This metric only applies to the **multi-threaded** runtime and will always return `0` when\n        /// using the current thread runtime.\n        ///\n        /// ##### Definition\n        /// This metric is derived from the sum of [`tokio::runtime::RuntimeMetrics::worker_steal_operations`]\n        /// for all worker threads.\n        ///\n        /// ##### See also\n        /// - [`RuntimeMetrics::min_steal_operations`]\n        /// - [`RuntimeMetrics::max_steal_operations`]\n        ///\n        /// ##### Examples\n        /// In the below example, a blocking channel is used to backup one worker thread:\n        /// ```\n        /// #[tokio::main(flavor = \"multi_thread\", worker_threads = 2)]\n        /// async fn main() {\n        ///     let handle = tokio::runtime::Handle::current();\n        ///     let monitor = tokio_metrics::RuntimeMonitor::new(&handle);\n        ///     let mut intervals = monitor.intervals();\n        ///     let mut next_interval = || intervals.next().unwrap();\n        ///\n        ///     let interval = next_interval(); // end of first sampling interval\n        ///     assert_eq!(interval.total_steal_operations, 0);\n        ///     assert_eq!(interval.min_steal_operations, 0);\n        ///     assert_eq!(interval.max_steal_operations, 0);\n        ///\n        ///     // induce a steal\n        ///     async {\n        ///         let (tx, rx) = std::sync::mpsc::channel();\n        ///         // Move to the runtime.\n        ///         tokio::spawn(async move {\n        ///             // Spawn the task that sends to the channel\n        ///             tokio::spawn(async move {\n        ///                 tx.send(()).unwrap();\n        ///             });\n        ///             // Spawn a task that bumps the previous task out of the \"next\n        ///             // scheduled\" slot.\n        ///             tokio::spawn(async {});\n        ///             // Blocking receive on the channe.\n        ///             rx.recv().unwrap();\n        ///             flush_metrics().await;\n        ///         }).await.unwrap();\n        ///         flush_metrics().await;\n        ///     }.await;\n        ///\n        ///     let interval = { flush_metrics().await; next_interval() }; // end of interval 2\n        ///     println!(\"total={}; min={}; max={}\", interval.total_steal_operations, interval.min_steal_operations, interval.max_steal_operations);\n        ///\n        ///     let interval = { flush_metrics().await; next_interval() }; // end of interval 3\n        ///     println!(\"total={}; min={}; max={}\", interval.total_steal_operations, interval.min_steal_operations, interval.max_steal_operations);\n        /// }\n        ///\n        /// async fn flush_metrics() {\n        ///     let _ = tokio::time::sleep(std::time::Duration::ZERO).await;\n        /// }\n        /// ```\n        pub total_steal_operations: u64,\n\n        /// The maximum number of times any worker thread stole tasks from another worker thread.\n        ///\n        /// ##### Definition\n        /// This metric is derived from the maximum of [`tokio::runtime::RuntimeMetrics::worker_steal_operations`]\n        /// across all worker threads.\n        ///\n        /// ##### See also\n        /// - [`RuntimeMetrics::total_steal_operations`]\n        /// - [`RuntimeMetrics::min_steal_operations`]\n        pub max_steal_operations: u64,\n\n        /// The minimum number of times any worker thread stole tasks from another worker thread.\n        ///\n        /// ##### Definition\n        /// This metric is derived from the minimum of [`tokio::runtime::RuntimeMetrics::worker_steal_operations`]\n        /// across all worker threads.\n        ///\n        /// ##### See also\n        /// - [`RuntimeMetrics::total_steal_operations`]\n        /// - [`RuntimeMetrics::max_steal_operations`]\n        pub min_steal_operations: u64,\n\n        /// The number of tasks scheduled from **outside** of the runtime.\n        ///\n        /// The remote schedule count increases by one each time a task is woken from **outside** of\n        /// the runtime. This usually means that a task is spawned or notified from a non-runtime\n        /// thread and must be queued using the Runtime's global queue, which tends to be slower.\n        ///\n        /// ##### Definition\n        /// This metric is derived from [`tokio::runtime::RuntimeMetrics::remote_schedule_count`].\n        ///\n        /// ##### Examples\n        /// In the below example, a remote schedule is induced by spawning a system thread, then\n        /// spawning a tokio task from that system thread:\n        /// ```\n        /// #[tokio::main(flavor = \"multi_thread\", worker_threads = 2)]\n        /// async fn main() {\n        ///     let handle = tokio::runtime::Handle::current();\n        ///     let monitor = tokio_metrics::RuntimeMonitor::new(&handle);\n        ///     let mut intervals = monitor.intervals();\n        ///     let mut next_interval = || intervals.next().unwrap();\n        ///\n        ///     let interval = next_interval(); // end of first sampling interval\n        ///     assert_eq!(interval.num_remote_schedules, 0);\n        ///\n        ///     // spawn a non-runtime thread\n        ///     std::thread::spawn(move || {\n        ///         // spawn two tasks from this non-runtime thread\n        ///         async move {\n        ///             handle.spawn(async {}).await;\n        ///             handle.spawn(async {}).await;\n        ///         }\n        ///     }).join().unwrap().await;\n        ///\n        ///     let interval = next_interval(); // end of second sampling interval\n        ///     assert_eq!(interval.num_remote_schedules, 2);\n        ///\n        ///     let interval = next_interval(); // end of third sampling interval\n        ///     assert_eq!(interval.num_remote_schedules, 0);\n        /// }\n        /// ```\n        pub num_remote_schedules: u64,\n\n        /// The number of tasks scheduled from worker threads.\n        ///\n        /// The local schedule count increases by one each time a task is woken from **inside** of the\n        /// runtime. This usually means that a task is spawned or notified from within a runtime thread\n        /// and will be queued on the worker-local queue.\n        ///\n        /// ##### Definition\n        /// This metric is derived from the sum of\n        /// [`tokio::runtime::RuntimeMetrics::worker_local_schedule_count`] across all worker threads.\n        ///\n        /// ##### See also\n        /// - [`RuntimeMetrics::min_local_schedule_count`]\n        /// - [`RuntimeMetrics::max_local_schedule_count`]\n        ///\n        /// ##### Examples\n        /// ###### With `current_thread` runtime\n        /// In the below example, two tasks are spawned from the context of a third tokio task:\n        /// ```\n        /// #[tokio::main(flavor = \"current_thread\")]\n        /// async fn main() {\n        ///     let handle = tokio::runtime::Handle::current();\n        ///     let monitor = tokio_metrics::RuntimeMonitor::new(&handle);\n        ///     let mut intervals = monitor.intervals();\n        ///     let mut next_interval = || intervals.next().unwrap();\n        ///\n        ///     let interval = { flush_metrics().await; next_interval() }; // end interval 2\n        ///     assert_eq!(interval.total_local_schedule_count, 0);\n        ///\n        ///     let task = async {\n        ///         tokio::spawn(async {}); // local schedule 1\n        ///         tokio::spawn(async {}); // local schedule 2\n        ///     };\n        ///\n        ///     let handle = tokio::spawn(task); // local schedule 3\n        ///\n        ///     let interval = { flush_metrics().await; next_interval() }; // end interval 2\n        ///     assert_eq!(interval.total_local_schedule_count, 3);\n        ///\n        ///     let _ = handle.await;\n        ///\n        ///     let interval = { flush_metrics().await; next_interval() }; // end interval 3\n        ///     assert_eq!(interval.total_local_schedule_count, 0);\n        /// }\n        ///\n        /// async fn flush_metrics() {\n        ///     tokio::task::yield_now().await;\n        /// }\n        /// ```\n        ///\n        /// ###### With `multi_thread` runtime\n        /// In the below example, 100 tasks are spawned:\n        /// ```\n        /// #[tokio::main(flavor = \"multi_thread\", worker_threads = 2)]\n        /// async fn main() {\n        ///     let handle = tokio::runtime::Handle::current();\n        ///     let monitor = tokio_metrics::RuntimeMonitor::new(&handle);\n        ///     let mut intervals = monitor.intervals();\n        ///     let mut next_interval = || intervals.next().unwrap();\n        ///\n        ///     let interval = next_interval(); // end of interval 1\n        ///     assert_eq!(interval.total_local_schedule_count, 0);\n        ///\n        ///     use std::sync::atomic::{AtomicBool, Ordering};\n        ///     static SPINLOCK: AtomicBool = AtomicBool::new(true);\n        ///\n        ///     // block the other worker thread\n        ///     tokio::spawn(async {\n        ///         while SPINLOCK.load(Ordering::SeqCst) {}\n        ///     });\n        ///\n        ///     // FIXME: why does this need to be in a `spawn`?\n        ///     let _ = tokio::spawn(async {\n        ///         // spawn 100 tasks\n        ///         for _ in 0..100 {\n        ///             tokio::spawn(async {});\n        ///         }\n        ///         // this spawns 1 more task\n        ///         flush_metrics().await;\n        ///     }).await;\n        ///\n        ///     // unblock the other worker thread\n        ///     SPINLOCK.store(false, Ordering::SeqCst);\n        ///\n        ///     let interval = { flush_metrics().await; next_interval() }; // end of interval 2\n        ///     assert_eq!(interval.total_local_schedule_count, 100 + 1);\n        /// }\n        ///\n        /// async fn flush_metrics() {\n        ///     let _ = tokio::time::sleep(std::time::Duration::ZERO).await;\n        /// }\n        /// ```\n        pub total_local_schedule_count: u64,\n\n        /// The maximum number of tasks scheduled from any one worker thread.\n        ///\n        /// ##### Definition\n        /// This metric is derived from the maximum of\n        /// [`tokio::runtime::RuntimeMetrics::worker_local_schedule_count`] for all worker threads.\n        ///\n        /// ##### See also\n        /// - [`RuntimeMetrics::total_local_schedule_count`]\n        /// - [`RuntimeMetrics::min_local_schedule_count`]\n        pub max_local_schedule_count: u64,\n\n        /// The minimum number of tasks scheduled from any one worker thread.\n        ///\n        /// ##### Definition\n        /// This metric is derived from the minimum of\n        /// [`tokio::runtime::RuntimeMetrics::worker_local_schedule_count`] for all worker threads.\n        ///\n        /// ##### See also\n        /// - [`RuntimeMetrics::total_local_schedule_count`]\n        /// - [`RuntimeMetrics::max_local_schedule_count`]\n        pub min_local_schedule_count: u64,\n\n        /// The number of times worker threads saturated their local queues.\n        ///\n        /// The worker steal count increases by one each time the worker attempts to schedule a task\n        /// locally, but its local queue is full. When this happens, half of the\n        /// local queue is moved to the global queue.\n        ///\n        /// This metric only applies to the **multi-threaded** scheduler.\n        ///\n        /// ##### Definition\n        /// This metric is derived from the sum of\n        /// [`tokio::runtime::RuntimeMetrics::worker_overflow_count`] across all worker threads.\n        ///\n        /// ##### See also\n        /// - [`RuntimeMetrics::min_overflow_count`]\n        /// - [`RuntimeMetrics::max_overflow_count`]\n        ///\n        /// ##### Examples\n        /// ```\n        /// #[tokio::main(flavor = \"multi_thread\", worker_threads = 1)]\n        /// async fn main() {\n        ///     let handle = tokio::runtime::Handle::current();\n        ///     let monitor = tokio_metrics::RuntimeMonitor::new(&handle);\n        ///     let mut intervals = monitor.intervals();\n        ///     let mut next_interval = || intervals.next().unwrap();\n        ///\n        ///     let interval = next_interval(); // end of interval 1\n        ///     assert_eq!(interval.total_overflow_count, 0);\n        ///\n        ///     use std::sync::atomic::{AtomicBool, Ordering};\n        ///\n        ///     // spawn a ton of tasks\n        ///     let _ = tokio::spawn(async {\n        ///         // we do this in a `tokio::spawn` because it is impossible to\n        ///         // overflow the main task\n        ///         for _ in 0..300 {\n        ///             tokio::spawn(async {});\n        ///         }\n        ///     }).await;\n        ///\n        ///     let interval = { flush_metrics().await; next_interval() }; // end of interval 2\n        ///     assert_eq!(interval.total_overflow_count, 1);\n        /// }\n        ///\n        /// async fn flush_metrics() {\n        ///     let _ = tokio::time::sleep(std::time::Duration::from_millis(1)).await;\n        /// }\n        /// ```\n        pub total_overflow_count: u64,\n\n        /// The maximum number of times any one worker saturated its local queue.\n        ///\n        /// ##### Definition\n        /// This metric is derived from the maximum of\n        /// [`tokio::runtime::RuntimeMetrics::worker_overflow_count`] across all worker threads.\n        ///\n        /// ##### See also\n        /// - [`RuntimeMetrics::total_overflow_count`]\n        /// - [`RuntimeMetrics::min_overflow_count`]\n        pub max_overflow_count: u64,\n\n        /// The minimum number of times any one worker saturated its local queue.\n        ///\n        /// ##### Definition\n        /// This metric is derived from the maximum of\n        /// [`tokio::runtime::RuntimeMetrics::worker_overflow_count`] across all worker threads.\n        ///\n        /// ##### See also\n        /// - [`RuntimeMetrics::total_overflow_count`]\n        /// - [`RuntimeMetrics::max_overflow_count`]\n        pub min_overflow_count: u64,\n\n        /// The number of tasks that have been polled across all worker threads.\n        ///\n        /// The worker poll count increases by one each time a worker polls a scheduled task.\n        ///\n        /// ##### Definition\n        /// This metric is derived from the sum of\n        /// [`tokio::runtime::RuntimeMetrics::worker_poll_count`] across all worker threads.\n        ///\n        /// ##### See also\n        /// - [`RuntimeMetrics::min_polls_count`]\n        /// - [`RuntimeMetrics::max_polls_count`]\n        ///\n        /// ##### Examples\n        /// In the below example, 42 tasks are spawned and polled:\n        /// ```\n        /// #[tokio::main(flavor = \"current_thread\")]\n        /// async fn main() {\n        ///     let handle = tokio::runtime::Handle::current();\n        ///     let monitor = tokio_metrics::RuntimeMonitor::new(&handle);\n        ///     let mut intervals = monitor.intervals();\n        ///     let mut next_interval = || intervals.next().unwrap();\n        ///\n        ///     let interval = { flush_metrics().await; next_interval() }; // end of interval 1\n        ///     assert_eq!(interval.total_polls_count, 0);\n        ///     assert_eq!(interval.min_polls_count, 0);\n        ///     assert_eq!(interval.max_polls_count, 0);\n        ///\n        ///     const N: u64 = 42;\n        ///\n        ///     for _ in 0..N {\n        ///         let _ = tokio::spawn(async {}).await;\n        ///     }\n        ///\n        ///     let interval = { flush_metrics().await; next_interval() }; // end of interval 2\n        ///     assert_eq!(interval.total_polls_count, N);\n        ///     assert_eq!(interval.min_polls_count, N);\n        ///     assert_eq!(interval.max_polls_count, N);\n        /// }\n        ///\n        /// async fn flush_metrics() {\n        ///     let _ = tokio::task::yield_now().await;\n        /// }\n        /// ```\n        pub total_polls_count: u64,\n\n        /// The maximum number of tasks that have been polled in any worker thread.\n        ///\n        /// ##### Definition\n        /// This metric is derived from the maximum of\n        /// [`tokio::runtime::RuntimeMetrics::worker_poll_count`] across all worker threads.\n        ///\n        /// ##### See also\n        /// - [`RuntimeMetrics::total_polls_count`]\n        /// - [`RuntimeMetrics::min_polls_count`]\n        pub max_polls_count: u64,\n\n        /// The minimum number of tasks that have been polled in any worker thread.\n        ///\n        /// ##### Definition\n        /// This metric is derived from the minimum of\n        /// [`tokio::runtime::RuntimeMetrics::worker_poll_count`] across all worker threads.\n        ///\n        /// ##### See also\n        /// - [`RuntimeMetrics::total_polls_count`]\n        /// - [`RuntimeMetrics::max_polls_count`]\n        pub min_polls_count: u64,\n\n        /// The total number of tasks currently scheduled in workers' local queues.\n        ///\n        /// Tasks that are spawned or notified from within a runtime thread are scheduled using that\n        /// worker's local queue. This metric returns the **current** number of tasks pending in all\n        /// workers' local queues. As such, the returned value may increase or decrease as new tasks\n        /// are scheduled and processed.\n        ///\n        /// ##### Definition\n        /// This metric is derived from [`tokio::runtime::RuntimeMetrics::worker_local_queue_depth`].\n        ///\n        /// ##### See also\n        /// - [`RuntimeMetrics::min_local_queue_depth`]\n        /// - [`RuntimeMetrics::max_local_queue_depth`]\n        ///\n        /// ##### Example\n        ///\n        /// ###### With `current_thread` runtime\n        /// The below example spawns 100 tasks:\n        /// ```\n        /// #[tokio::main(flavor = \"current_thread\")]\n        /// async fn main() {\n        ///     const N: usize = 100;\n        ///\n        ///     let handle = tokio::runtime::Handle::current();\n        ///     let monitor = tokio_metrics::RuntimeMonitor::new(&handle);\n        ///     let mut intervals = monitor.intervals();\n        ///     let mut next_interval = || intervals.next().unwrap();\n        ///\n        ///     let interval =  next_interval(); // end of interval 1\n        ///     assert_eq!(interval.total_local_queue_depth, 0);\n        ///\n        ///\n        ///     for _ in 0..N {\n        ///         tokio::spawn(async {});\n        ///     }\n        ///     let interval =  next_interval(); // end of interval 2\n        ///     assert_eq!(interval.total_local_queue_depth, N);\n        /// }\n        /// ```\n        ///\n        /// ###### With `multi_thread` runtime\n        /// The below example spawns 100 tasks and observes them in the\n        /// local queue:\n        /// ```\n        /// #[tokio::main(flavor = \"multi_thread\", worker_threads = 2)]\n        /// async fn main() {\n        ///     use std::sync::mpsc;\n        ///     use tokio::sync::oneshot;\n        ///\n        ///     const N: usize = 100;\n        /// \n        ///     let handle = tokio::runtime::Handle::current();\n        ///\n        ///     // block one worker so the other is the only one running\n        ///     let (block_tx, block_rx) = mpsc::channel::<()>();\n        ///     let (started_tx, started_rx) = oneshot::channel();\n        ///     tokio::spawn(async move {\n        ///         let _ = started_tx.send(());\n        ///         let _ = block_rx.recv();\n        ///     });\n        ///     let _ = started_rx.await;\n        ///\n        ///     // spawn + sample from the free worker thread\n        ///     let (depth_tx, depth_rx) = oneshot::channel();\n        ///     tokio::spawn(async move {\n        ///         let monitor = tokio_metrics::RuntimeMonitor::new(&handle);\n        ///         let mut intervals = monitor.intervals();\n        ///         let _ = intervals.next().unwrap(); // baseline\n        ///\n        ///         for _ in 0..N {\n        ///             tokio::spawn(async {});\n        ///         }\n        ///\n        ///         let depth = intervals.next().unwrap().total_local_queue_depth;\n        ///         let _ = depth_tx.send(depth);\n        ///     });\n        ///\n        ///     let depth = depth_rx.await.unwrap();\n        ///\n        ///     // Tokio may place one spawned task in a LIFO slot rather than the\n        ///     // local queue, which may not be reflected in `worker_local_queue_depth`,\n        ///     // so accept N or N - 1.\n        ///     assert!(depth == N || depth == N - 1, \"depth = {depth}\");\n        ///\n        ///     let _ = block_tx.send(());\n        /// }\n        /// ```\n        pub total_local_queue_depth: usize,\n\n        /// The maximum number of tasks currently scheduled any worker's local queue.\n        ///\n        /// ##### Definition\n        /// This metric is derived from the maximum of\n        /// [`tokio::runtime::RuntimeMetrics::worker_local_queue_depth`] across all worker threads.\n        ///\n        /// ##### See also\n        /// - [`RuntimeMetrics::total_local_queue_depth`]\n        /// - [`RuntimeMetrics::min_local_queue_depth`]\n        pub max_local_queue_depth: usize,\n\n        /// The minimum number of tasks currently scheduled any worker's local queue.\n        ///\n        /// ##### Definition\n        /// This metric is derived from the minimum of\n        /// [`tokio::runtime::RuntimeMetrics::worker_local_queue_depth`] across all worker threads.\n        ///\n        /// ##### See also\n        /// - [`RuntimeMetrics::total_local_queue_depth`]\n        /// - [`RuntimeMetrics::max_local_queue_depth`]\n        pub min_local_queue_depth: usize,\n\n        /// The number of tasks currently waiting to be executed in the runtime's blocking threadpool.\n        ///\n        /// ##### Definition\n        /// This metric is derived from [`tokio::runtime::RuntimeMetrics::blocking_queue_depth`].\n        pub blocking_queue_depth: usize,\n\n        /// The number of additional threads spawned by the runtime.\n        ///\n        /// ##### Definition\n        /// This metric is derived from [`tokio::runtime::RuntimeMetrics::num_blocking_threads`].\n        pub blocking_threads_count: usize,\n\n        /// The number of idle threads, which have spawned by the runtime for `spawn_blocking` calls.\n        ///\n        /// ##### Definition\n        /// This metric is derived from [`tokio::runtime::RuntimeMetrics::num_idle_blocking_threads`].\n        pub idle_blocking_threads_count: usize,\n\n        /// Returns the number of times that tasks have been forced to yield back to the scheduler after exhausting their task budgets.\n        ///\n        /// This count starts at zero when the runtime is created and increases by one each time a task yields due to exhausting its budget.\n        ///\n        /// The counter is monotonically increasing. It is never decremented or reset to zero.\n        ///\n        /// ##### Definition\n        /// This metric is derived from [`tokio::runtime::RuntimeMetrics::budget_forced_yield_count`].\n        pub budget_forced_yield_count: u64,\n\n        /// Returns the number of ready events processed by the runtime’s I/O driver.\n        ///\n        /// ##### Definition\n        /// This metric is derived from [`tokio::runtime::RuntimeMetrics::io_driver_ready_count`].\n        pub io_driver_ready_count: u64,\n    }\n}\n\nmacro_rules! define_semi_stable {\n    (\n    $(#[$($attributes:tt)*])*\n    $vis:vis struct $name:ident {\n        stable {\n            $($stable_name:ident: $stable_ty:ty),*\n            $(,)?\n        }\n        $(,)?\n        unstable {\n            $($unstable_name:ident: $unstable_ty:ty),*\n            $(,)?\n        }\n    }\n    ) => {\n        $(#[$($attributes)*])*\n        $vis struct $name {\n            $(\n                $stable_name: $stable_ty,\n            )*\n            $(\n                #[cfg(tokio_unstable)]\n                #[cfg_attr(docsrs, doc(cfg(all(feature = \"rt\", tokio_unstable))))]\n                $unstable_name: $unstable_ty,\n            )*\n        }\n    };\n}\n\ndefine_semi_stable! {\n    /// Snapshot of per-worker metrics\n    #[derive(Debug, Default)]\n    struct Worker {\n        stable {\n            worker: usize,\n            total_park_count: u64,\n            total_busy_duration: Duration,\n        }\n        unstable {\n            total_noop_count: u64,\n            total_steal_count: u64,\n            total_steal_operations: u64,\n            total_local_schedule_count: u64,\n            total_overflow_count: u64,\n            total_polls_count: u64,\n            poll_time_histogram: Vec<u64>,\n        }\n    }\n}\n\ndefine_semi_stable! {\n    /// Iterator returned by [`RuntimeMonitor::intervals`].\n    ///\n    /// See that method's documentation for more details.\n    #[derive(Debug)]\n    pub struct RuntimeIntervals {\n        stable {\n            runtime: runtime::RuntimeMetrics,\n            started_at: Instant,\n            workers: Vec<Worker>,\n        }\n        unstable {\n            // Number of tasks scheduled from *outside* of the runtime\n            num_remote_schedules: u64,\n            budget_forced_yield_count: u64,\n            io_driver_ready_count: u64,\n            // Cached bucket ranges, static config that doesn't change after runtime creation.\n            bucket_ranges: Vec<Range<Duration>>,\n        }\n    }\n}\n\nimpl RuntimeIntervals {\n    fn probe(&mut self) -> RuntimeMetrics {\n        let now = Instant::now();\n\n        let mut metrics = RuntimeMetrics {\n            workers_count: self.runtime.num_workers(),\n            live_tasks_count: self.runtime.num_alive_tasks(),\n            elapsed: now.saturating_duration_since(self.started_at),\n            global_queue_depth: self.runtime.global_queue_depth(),\n            min_park_count: u64::MAX,\n            min_busy_duration: Duration::from_secs(1000000000),\n            ..Default::default()\n        };\n\n        #[cfg(tokio_unstable)]\n        {\n            let num_remote_schedules = self.runtime.remote_schedule_count();\n            let budget_forced_yields = self.runtime.budget_forced_yield_count();\n            let io_driver_ready_events = self.runtime.io_driver_ready_count();\n\n            metrics.num_remote_schedules = num_remote_schedules.saturating_sub(self.num_remote_schedules);\n            metrics.min_noop_count = u64::MAX;\n            metrics.min_steal_count = u64::MAX;\n            metrics.min_local_schedule_count = u64::MAX;\n            metrics.min_overflow_count = u64::MAX;\n            metrics.min_polls_count = u64::MAX;\n            metrics.min_local_queue_depth = usize::MAX;\n            metrics.mean_poll_duration_worker_min = Duration::MAX;\n            metrics.poll_time_histogram = PollTimeHistogram::new(\n                self.bucket_ranges\n                    .iter()\n                    .map(|range| HistogramBucket::new(range.start, range.end, 0))\n                    .collect(),\n            );\n            metrics.budget_forced_yield_count =\n                budget_forced_yields.saturating_sub(self.budget_forced_yield_count);\n            metrics.io_driver_ready_count = io_driver_ready_events.saturating_sub(self.io_driver_ready_count);\n\n            self.num_remote_schedules = num_remote_schedules;\n            self.budget_forced_yield_count = budget_forced_yields;\n            self.io_driver_ready_count = io_driver_ready_events;\n        }\n        self.started_at = now;\n\n        for worker in &mut self.workers {\n            worker.probe(&self.runtime, &mut metrics);\n        }\n\n        #[cfg(tokio_unstable)]\n        {\n            if metrics.total_polls_count == 0 {\n                debug_assert_eq!(metrics.mean_poll_duration, Duration::default());\n\n                metrics.mean_poll_duration_worker_max = Duration::default();\n                metrics.mean_poll_duration_worker_min = Duration::default();\n            }\n        }\n\n        metrics\n    }\n}\n\nimpl Iterator for RuntimeIntervals {\n    type Item = RuntimeMetrics;\n\n    fn next(&mut self) -> Option<RuntimeMetrics> {\n        Some(self.probe())\n    }\n}\n\nimpl RuntimeMonitor {\n    /// Creates a new [`RuntimeMonitor`].\n    pub fn new(runtime: &runtime::Handle) -> RuntimeMonitor {\n        let runtime = runtime.metrics();\n\n        RuntimeMonitor { runtime }\n    }\n\n    /// Produces an unending iterator of [`RuntimeMetrics`].\n    ///\n    /// Each sampling interval is defined by the time elapsed between advancements of the iterator\n    /// produced by [`RuntimeMonitor::intervals`]. The item type of this iterator is [`RuntimeMetrics`],\n    /// which is a bundle of runtime metrics that describe *only* changes occurring within that sampling\n    /// interval.\n    ///\n    /// # Example\n    ///\n    /// ```\n    /// use std::time::Duration;\n    ///\n    /// #[tokio::main]\n    /// async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {\n    ///     let handle = tokio::runtime::Handle::current();\n    ///     // construct the runtime metrics monitor\n    ///     let runtime_monitor = tokio_metrics::RuntimeMonitor::new(&handle);\n    ///\n    ///     // print runtime metrics every 500ms\n    ///     {\n    ///         tokio::spawn(async move {\n    ///             for interval in runtime_monitor.intervals() {\n    ///                 // pretty-print the metric interval\n    ///                 println!(\"{:?}\", interval);\n    ///                 // wait 500ms\n    ///                 tokio::time::sleep(Duration::from_millis(500)).await;\n    ///             }\n    ///         });\n    ///     }\n    ///\n    ///     // await some tasks\n    ///     tokio::join![\n    ///         do_work(),\n    ///         do_work(),\n    ///         do_work(),\n    ///     ];\n    ///\n    ///     Ok(())\n    /// }\n    ///\n    /// async fn do_work() {\n    ///     for _ in 0..25 {\n    ///         tokio::task::yield_now().await;\n    ///         tokio::time::sleep(Duration::from_millis(100)).await;\n    ///     }\n    /// }\n    /// ```\n    pub fn intervals(&self) -> RuntimeIntervals {\n        let started_at = Instant::now();\n\n        let workers = (0..self.runtime.num_workers())\n            .map(|worker| Worker::new(worker, &self.runtime))\n            .collect();\n\n        RuntimeIntervals {\n            runtime: self.runtime.clone(),\n            started_at,\n            workers,\n\n            #[cfg(tokio_unstable)]\n            num_remote_schedules: self.runtime.remote_schedule_count(),\n            #[cfg(tokio_unstable)]\n            budget_forced_yield_count: self.runtime.budget_forced_yield_count(),\n            #[cfg(tokio_unstable)]\n            io_driver_ready_count: self.runtime.io_driver_ready_count(),\n            #[cfg(tokio_unstable)]\n            bucket_ranges: (0..self.runtime.poll_time_histogram_num_buckets())\n                .map(|i| self.runtime.poll_time_histogram_bucket_range(i))\n                .collect(),\n        }\n    }\n}\n\nimpl Worker {\n    fn new(worker: usize, rt: &runtime::RuntimeMetrics) -> Worker {\n        #[allow(unused_mut, clippy::needless_update)]\n        let mut wrk = Worker {\n            worker,\n            total_park_count: rt.worker_park_count(worker),\n            total_busy_duration: rt.worker_total_busy_duration(worker),\n            ..Default::default()\n        };\n\n        #[cfg(tokio_unstable)]\n        {\n            let poll_time_histogram = if rt.poll_time_histogram_enabled() {\n                vec![0; rt.poll_time_histogram_num_buckets()]\n            } else {\n                vec![]\n            };\n            wrk.total_noop_count = rt.worker_noop_count(worker);\n            wrk.total_steal_count = rt.worker_steal_count(worker);\n            wrk.total_steal_operations = rt.worker_steal_operations(worker);\n            wrk.total_local_schedule_count = rt.worker_local_schedule_count(worker);\n            wrk.total_overflow_count = rt.worker_overflow_count(worker);\n            wrk.total_polls_count = rt.worker_poll_count(worker);\n            wrk.poll_time_histogram = poll_time_histogram;\n        };\n        wrk\n    }\n\n    fn probe(&mut self, rt: &runtime::RuntimeMetrics, metrics: &mut RuntimeMetrics) {\n        macro_rules! metric {\n            ( $sum:ident, $max:ident, $min:ident, $probe:ident ) => {{\n                let val = rt.$probe(self.worker);\n                let delta = val - self.$sum;\n                self.$sum = val;\n\n                metrics.$sum += delta;\n\n                if delta > metrics.$max {\n                    metrics.$max = delta;\n                }\n\n                if delta < metrics.$min {\n                    metrics.$min = delta;\n                }\n            }};\n        }\n\n        metric!(\n            total_park_count,\n            max_park_count,\n            min_park_count,\n            worker_park_count\n        );\n        metric!(\n            total_busy_duration,\n            max_busy_duration,\n            min_busy_duration,\n            worker_total_busy_duration\n        );\n\n        #[cfg(tokio_unstable)]\n        {\n            let mut worker_polls_count = self.total_polls_count;\n            let total_polls_count = metrics.total_polls_count;\n\n            metric!(\n                total_noop_count,\n                max_noop_count,\n                min_noop_count,\n                worker_noop_count\n            );\n            metric!(\n                total_steal_count,\n                max_steal_count,\n                min_steal_count,\n                worker_steal_count\n            );\n            metric!(\n                total_steal_operations,\n                max_steal_operations,\n                min_steal_operations,\n                worker_steal_operations\n            );\n            metric!(\n                total_local_schedule_count,\n                max_local_schedule_count,\n                min_local_schedule_count,\n                worker_local_schedule_count\n            );\n            metric!(\n                total_overflow_count,\n                max_overflow_count,\n                min_overflow_count,\n                worker_overflow_count\n            );\n            metric!(\n                total_polls_count,\n                max_polls_count,\n                min_polls_count,\n                worker_poll_count\n            );\n\n            // Get the number of polls since last probe\n            worker_polls_count = self.total_polls_count.saturating_sub(worker_polls_count);\n\n            // Update the mean task poll duration if there were polls\n            if worker_polls_count > 0 {\n                let val = rt.worker_mean_poll_time(self.worker);\n\n                if val > metrics.mean_poll_duration_worker_max {\n                    metrics.mean_poll_duration_worker_max = val;\n                }\n\n                if val < metrics.mean_poll_duration_worker_min {\n                    metrics.mean_poll_duration_worker_min = val;\n                }\n\n                // First, scale the current value down\n                let ratio = total_polls_count as f64 / metrics.total_polls_count as f64;\n                let mut mean = metrics.mean_poll_duration.as_nanos() as f64 * ratio;\n\n                // Add the scaled current worker's mean poll duration\n                let ratio = worker_polls_count as f64 / metrics.total_polls_count as f64;\n                mean += val.as_nanos() as f64 * ratio;\n\n                metrics.mean_poll_duration = Duration::from_nanos(mean as u64);\n            }\n\n            // Update the histogram counts if there were polls since last count\n            if worker_polls_count > 0 {\n                for (bucket, entry) in metrics.poll_time_histogram.buckets_mut().iter_mut().enumerate() {\n                    let new = rt.poll_time_histogram_bucket_count(self.worker, bucket);\n                    let delta = new.saturating_sub(self.poll_time_histogram[bucket]);\n                    self.poll_time_histogram[bucket] = new;\n\n                    entry.add_count(delta);\n                }\n            }\n\n            // Local scheduled tasks is an absolute value\n            let local_scheduled_tasks = rt.worker_local_queue_depth(self.worker);\n            metrics.total_local_queue_depth = metrics.total_local_queue_depth.saturating_add(local_scheduled_tasks);\n\n            if local_scheduled_tasks > metrics.max_local_queue_depth {\n                metrics.max_local_queue_depth = local_scheduled_tasks;\n            }\n\n            if local_scheduled_tasks < metrics.min_local_queue_depth {\n                metrics.min_local_queue_depth = local_scheduled_tasks;\n            }\n\n            // Blocking queue depth is an absolute value too\n            metrics.blocking_queue_depth = rt.blocking_queue_depth();\n\n            metrics.blocking_threads_count = rt.num_blocking_threads();\n            metrics.idle_blocking_threads_count = rt.num_idle_blocking_threads();\n        }\n    }\n}\n\nderived_metrics!(\n    [RuntimeMetrics] {\n        stable {\n            /// Returns the ratio of the [`RuntimeMetrics::total_busy_duration`] to the [`RuntimeMetrics::elapsed`].\n            pub fn busy_ratio(&self) -> f64 {\n                self.total_busy_duration.as_nanos() as f64 / self.elapsed.as_nanos() as f64\n            }\n        }\n        unstable {\n            /// Returns the ratio of the [`RuntimeMetrics::total_polls_count`] to the [`RuntimeMetrics::total_noop_count`].\n            pub fn mean_polls_per_park(&self) -> f64 {\n                let total_park_count = self.total_park_count.saturating_sub(self.total_noop_count);\n                if total_park_count == 0 {\n                    0.0\n                } else {\n                    self.total_polls_count as f64 / total_park_count as f64\n                }\n            }\n        }\n    }\n);\n\n#[cfg(all(test, tokio_unstable, feature = \"metrique-integration\"))]\nmod metrique_integration_tests {\n    use super::*;\n    use metrique::test_util::test_metric;\n\n    /// Compile-time regression: if a field is added whose type doesn't\n    /// implement `CloseValue`, this will fail to compile.\n    #[test]\n    fn metrique_integration_produces_expected_fields() {\n        let metrics = RuntimeMetrics {\n            workers_count: 4,\n            total_park_count: 100,\n            poll_time_histogram: PollTimeHistogram::new(vec![\n                HistogramBucket::new(Duration::from_micros(0), Duration::from_micros(100), 10),\n                HistogramBucket::new(Duration::from_micros(100), Duration::from_micros(200), 0),\n                HistogramBucket::new(Duration::from_micros(200), Duration::from_micros(500), 3),\n            ]),\n            ..Default::default()\n        };\n\n        let entry = test_metric(metrics);\n\n        // Stable fields\n        assert_eq!(entry.metrics[\"workers_count\"], 4);\n        assert_eq!(entry.metrics[\"total_park_count\"], 100);\n        assert_eq!(entry.metrics[\"elapsed\"].as_f64(), 0.0);\n        assert_eq!(entry.metrics[\"total_busy_duration\"].as_f64(), 0.0);\n        assert_eq!(entry.metrics[\"global_queue_depth\"].as_u64(), 0);\n\n        // Unstable fields\n        assert_eq!(entry.metrics[\"mean_poll_duration\"].as_f64(), 0.0);\n        assert_eq!(entry.metrics[\"total_steal_count\"].as_u64(), 0);\n        assert_eq!(entry.metrics[\"total_polls_count\"].as_u64(), 0);\n\n        // 2 non-zero buckets (count 10 and 3) should produce 2 observations\n        let hist = &entry.metrics[\"poll_time_histogram\"];\n        assert_eq!(hist.distribution.len(), 2, \"expected 2 non-zero buckets\");\n\n        // midpoint of 0..100µs = 50µs, count = 10\n        match hist.distribution[0] {\n            metrique::writer::Observation::Repeated { total, occurrences } => {\n                assert_eq!(occurrences, 10);\n                assert!((total - 500.0).abs() < 0.01, \"expected 50 * 10 = 500, got {total}\");\n            }\n            other => panic!(\"expected Repeated, got {other:?}\"),\n        }\n\n        // midpoint of 200..500µs = 350µs, count = 3\n        match hist.distribution[1] {\n            metrique::writer::Observation::Repeated { total, occurrences } => {\n                assert_eq!(occurrences, 3);\n                assert!((total - 1050.0).abs() < 0.01, \"expected 350 * 3 = 1050, got {total}\");\n            }\n            other => panic!(\"expected Repeated, got {other:?}\"),\n        }\n    }\n\n    /// Collect `RuntimeMetrics` from a live Tokio runtime and verify the pipeline produces valid output.\n    #[cfg(feature = \"rt\")]\n    #[test]\n    fn metrique_end_to_end() {\n        let rt = tokio::runtime::Builder::new_current_thread()\n            .enable_all()\n            .enable_metrics_poll_time_histogram()\n            .build()\n            .unwrap();\n\n        rt.block_on(async {\n            let handle = tokio::runtime::Handle::current();\n            let monitor = RuntimeMonitor::new(&handle);\n            let mut intervals = monitor.intervals();\n\n            let _ = intervals.next().unwrap();\n\n            // Spawn tasks to create some work for the runtime to poll.\n            let mut metrics_with_polls = None;\n            for _ in 0..4 {\n                for _ in 0..25 {\n                    tokio::spawn(async {\n                        tokio::task::yield_now().await;\n                    })\n                    .await\n                    .unwrap();\n                }\n                // Slow poll (>900µs) to land in the last histogram bucket.\n                tokio::spawn(async {\n                    std::thread::sleep(Duration::from_millis(1));\n                })\n                .await\n                .unwrap();\n\n                let metrics = intervals.next().unwrap();\n                let total_polls: u64 = metrics.poll_time_histogram.buckets().iter().map(|b| b.count()).sum();\n                if total_polls > 0 {\n                    metrics_with_polls = Some(metrics);\n                    break;\n                }\n            }\n            let metrics = metrics_with_polls.expect(\"expected polls to be recorded within 4 sampled intervals\");\n\n            let expected_workers_count = metrics.workers_count;\n            let expected_non_zero_buckets = metrics\n                .poll_time_histogram\n                .buckets()\n                .iter()\n                .filter(|b| b.count() > 0)\n                .count();\n\n            let expected_total_polls: u64 = metrics.poll_time_histogram.buckets().iter().map(|b| b.count()).sum();\n            assert!(expected_workers_count > 0);\n            assert!(expected_total_polls > 0);\n\n            let last_bucket = metrics.poll_time_histogram.buckets().last().unwrap();\n\n            // Sanity check: Tokio's last histogram bucket ends at Duration::from_nanos(u64::MAX)\n            assert_eq!(last_bucket.range_end(), Duration::from_nanos(u64::MAX));\n            assert!(last_bucket.count() > 0, \"expected slow poll to land in last bucket\");\n            let last_bucket_start_us = last_bucket.range_start().as_micros() as f64;\n            let last_bucket_count = last_bucket.count();\n\n            let entry = test_metric(metrics);\n\n            assert_eq!(entry.metrics[\"workers_count\"], expected_workers_count as u64);\n            assert!(entry.metrics[\"elapsed\"].as_f64() >= 0.0);\n            assert!(entry.metrics[\"total_busy_duration\"].as_f64() >= 0.0);\n\n            let hist = &entry.metrics[\"poll_time_histogram\"];\n            assert_eq!(hist.distribution.len(), expected_non_zero_buckets);\n            let observed_total_occurrences: u64 = hist\n                .distribution\n                .iter()\n                .map(|obs| match obs {\n                    metrique::writer::Observation::Repeated { occurrences, .. } => *occurrences,\n                    other => panic!(\"expected Repeated, got {other:?}\"),\n                })\n                .sum();\n            assert_eq!(observed_total_occurrences, expected_total_polls);\n\n            // The last observation corresponds to the last histogram bucket.\n            // Verify it uses range_start as the representative value instead of a midpoint,\n            // since the last bucket range_end is Duration::from_nanos(u64::MAX).\n            let last_obs = hist.distribution.last().unwrap();\n            match last_obs {\n                metrique::writer::Observation::Repeated { total, occurrences } => {\n                    assert_eq!(*occurrences, last_bucket_count);\n                    let expected_total = last_bucket_start_us * last_bucket_count as f64;\n                    assert!(\n                        (total - expected_total).abs() < 0.01,\n                        \"last bucket should use range_start ({last_bucket_start_us}µs) as representative value, \\\n                         expected total={expected_total}, got {total}\"\n                    );\n                }\n                other => panic!(\"expected Repeated, got {other:?}\"),\n            }\n        });\n    }\n}\n"
  },
  {
    "path": "src/task/metrics_rs_integration.rs",
    "content": "use std::{fmt, time::Duration};\n\nuse super::{TaskIntervals, TaskMetrics, TaskMonitor};\nuse crate::metrics_rs::{metric_refs, DEFAULT_METRIC_SAMPLING_INTERVAL};\n\n/// A builder for the [`TaskMetricsReporter`] that wraps the [`TaskMonitor`], periodically\n/// reporting [`TaskMetrics`] to any configured [metrics-rs] recorder.\n///\n/// ### Published Metrics\n///\n/// The published metrics are the fields of [`TaskMetrics`], but with the\n/// `tokio_` prefix added, for example, `tokio_instrumented_count`. If you have multiple\n/// [`TaskMonitor`]s then it is strongly recommended to give each [`TaskMonitor`] a unique metric\n/// name or dimension value.\n///\n/// ### Usage\n///\n/// To upload metrics via [metrics-rs], you need to set up a reporter, which\n/// is actually what exports the metrics outside of the program. You must set\n/// up the reporter before you call [`describe_and_run`].\n///\n/// You can find exporters within the [metrics-rs] docs. One such reporter\n/// is the [metrics_exporter_prometheus] reporter, which makes metrics visible\n/// through Prometheus.\n///\n/// You can use it for example to export Prometheus metrics by listening on a local Unix socket\n/// called `prometheus.sock`, which you can access for debugging by\n/// `curl --unix-socket prometheus.sock localhost`, as follows:\n///\n/// ```\n/// use std::time::Duration;\n///\n/// use metrics::Key;\n///\n/// #[tokio::main]\n/// async fn main() {\n///     metrics_exporter_prometheus::PrometheusBuilder::new()\n///         .with_http_uds_listener(\"prometheus.sock\")\n///         .install()\n///         .unwrap();\n///     let monitor = tokio_metrics::TaskMonitor::new();\n///     tokio::task::spawn(\n///         tokio_metrics::TaskMetricsReporterBuilder::new(|name| {\n///             let name = name.replacen(\"tokio_\", \"my_task_\", 1);\n///             Key::from_parts(name, &[(\"application\", \"my_app\")])\n///         })\n///         // the default metric sampling interval is 30 seconds, which is\n///         // too long for quick tests, so have it be 1 second.\n///         .with_interval(std::time::Duration::from_secs(1))\n///         .describe_and_run(monitor.clone()),\n///     );\n///     // Run some code\n///     tokio::task::spawn(monitor.instrument(async move {\n///         for _ in 0..1000 {\n///             tokio::time::sleep(Duration::from_millis(10)).await;\n///         }\n///     }))\n///     .await\n///     .unwrap();\n/// }\n/// ```\n///\n/// [`describe_and_run`]: TaskMetricsReporterBuilder::describe_and_run\n/// [metrics-rs]: metrics\n/// [metrics_exporter_prometheus]: https://docs.rs/metrics_exporter_prometheus\npub struct TaskMetricsReporterBuilder {\n    interval: Duration,\n    metrics_transformer: Box<dyn FnMut(&'static str) -> metrics::Key + Send>,\n}\n\nimpl fmt::Debug for TaskMetricsReporterBuilder {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        f.debug_struct(\"TaskMetricsReporterBuilder\")\n            .field(\"interval\", &self.interval)\n            // skip metrics_transformer field\n            .finish()\n    }\n}\n\nimpl TaskMetricsReporterBuilder {\n    /// Creates a new [`TaskMetricsReporterBuilder`] with a custom \"metrics transformer\". The custom\n    /// transformer is used during `build` to transform the metric names into metric keys, for\n    /// example to add dimensions. The string metric names used by this reporter all start with\n    /// `tokio_`. The default transformer is just [`metrics::Key::from_static_name`]\n    ///\n    /// For example, to attach a dimension named \"application\" with value \"my_app\", and to replace\n    /// `tokio_` with `my_task_`\n    /// ```\n    /// # use metrics::Key;\n    ///\n    /// #[tokio::main]\n    /// async fn main() {\n    ///     metrics_exporter_prometheus::PrometheusBuilder::new()\n    ///         .with_http_uds_listener(\"prometheus.sock\")\n    ///         .install()\n    ///         .unwrap();\n    ///     let monitor = tokio_metrics::TaskMonitor::new();\n    ///     tokio::task::spawn(\n    ///         tokio_metrics::TaskMetricsReporterBuilder::new(|name| {\n    ///             let name = name.replacen(\"tokio_\", \"my_task_\", 1);\n    ///             Key::from_parts(name, &[(\"application\", \"my_app\")])\n    ///         })\n    ///         .describe_and_run(monitor)\n    ///     );\n    /// }\n    /// ```\n    pub fn new(transformer: impl FnMut(&'static str) -> metrics::Key + Send + 'static) -> Self {\n        TaskMetricsReporterBuilder {\n            interval: DEFAULT_METRIC_SAMPLING_INTERVAL,\n            metrics_transformer: Box::new(transformer),\n        }\n    }\n\n    /// Set the metric sampling interval, default: 30 seconds.\n    ///\n    /// Note that this is the interval on which metrics are *sampled* from\n    /// the Tokio task and then set on the [metrics-rs] reporter. Uploading the\n    /// metrics upstream is controlled by the reporter set up in the\n    /// application, and is normally controlled by a different period.\n    ///\n    /// For example, if metrics are exported via Prometheus, that\n    /// normally operates at a pull-based fashion, and the actual collection\n    /// period is controlled by the Prometheus server, which periodically polls the\n    /// application's Prometheus exporter to get the latest value of the metrics.\n    ///\n    /// [metrics-rs]: metrics\n    pub fn with_interval(mut self, interval: Duration) -> Self {\n        self.interval = interval;\n        self\n    }\n\n    /// Build the [`TaskMetricsReporter`] with a specific [`TaskMonitor`]. This function will capture\n    /// the [`Counter`]s and [`Gauge`]s from the current [metrics-rs] reporter,\n    /// so if you are using [`with_local_recorder`], you should wrap this function and [`describe`]\n    /// with it.\n    ///\n    /// [`Counter`]: metrics::Counter\n    /// [`Gauge`]: metrics::Counter\n    /// [`Histogram`]: metrics::Counter\n    /// [metrics-rs]: metrics\n    /// [`with_local_recorder`]: metrics::with_local_recorder\n    /// [`describe`]: Self::describe\n    #[must_use = \"reporter does nothing unless run\"]\n    pub fn build_with_monitor(mut self, monitor: TaskMonitor) -> TaskMetricsReporter {\n        TaskMetricsReporter {\n            interval: self.interval,\n            intervals: monitor.intervals(),\n            emitter: TaskMetricRefs::capture(&mut self.metrics_transformer),\n        }\n    }\n\n    /// Call [`describe_counter`] etc. to describe the emitted metrics.\n    ///\n    /// Describing metrics makes the reporter attach descriptions and units to them,\n    /// which makes them easier to use. However, some reporters don't support\n    /// describing the same metric name more than once, so it is generally a good\n    /// idea to only call this function once per metric reporter.\n    ///\n    /// [`describe_counter`]: metrics::describe_counter\n    /// [metrics-rs]: metrics\n    pub fn describe(mut self) -> Self {\n        TaskMetricRefs::describe(&mut self.metrics_transformer);\n        self\n    }\n\n    /// Runs the reporter (within the returned future), [describing] the metrics beforehand.\n    ///\n    /// Describing metrics makes the reporter attach descriptions and units to them,\n    /// which makes them easier to use. However, some reporters don't support\n    /// describing the same metric name more than once. If you are emitting multiple\n    /// metrics via a single reporter, try to call [`describe`] once and [`run`] for each\n    /// task metrics reporter.\n    ///\n    /// ### Working with a custom reporter\n    ///\n    /// If you want to set a local metrics reporter, you shouldn't be calling this method,\n    /// but you should instead call `.describe().build()` within [`with_local_recorder`] and then\n    /// call `run` (see the docs on [`build_with_monitor`]).\n    ///\n    /// [describing]: Self::describe\n    /// [`describe`]: Self::describe\n    /// [`build_with_monitor`]: Self::build_with_monitor.\n    /// [`run`]: TaskMetricsReporter::run\n    /// [`with_local_recorder`]: metrics::with_local_recorder\n    #[cfg(feature = \"rt\")]\n    pub async fn describe_and_run(self, monitor: TaskMonitor) {\n        self.describe().build_with_monitor(monitor).run().await;\n    }\n\n    /// Runs the reporter (within the returned future), not describing the metrics beforehand.\n    ///\n    /// ### Working with a custom reporter\n    ///\n    /// If you want to set a local metrics reporter, you shouldn't be calling this method,\n    /// but you should instead call `.describe().build()` within [`with_local_recorder`] and then\n    /// call [`run`] (see the docs on [`build_with_monitor`]).\n    ///\n    /// [`build_with_monitor`]: Self::build_with_monitor\n    /// [`run`]: TaskMetricsReporter::run\n    /// [`with_local_recorder`]: metrics::with_local_recorder\n    #[cfg(feature = \"rt\")]\n    pub async fn run_without_describing(self, monitor: TaskMonitor) {\n        self.build_with_monitor(monitor).run().await;\n    }\n}\n\n/// Collects metrics from a Tokio task and uploads them to [metrics_rs](metrics).\npub struct TaskMetricsReporter {\n    interval: Duration,\n    intervals: TaskIntervals,\n    emitter: TaskMetricRefs,\n}\n\nmetric_refs! {\n    [TaskMetricRefs] [elapsed] [TaskMetrics] [()] {\n        stable {\n            /// The number of tasks instrumented.\n            instrumented_count: Gauge<Count> [],\n            /// The number of tasks dropped.\n            dropped_count: Gauge<Count> [],\n            /// The number of tasks polled for the first time.\n            first_poll_count: Gauge<Count> [],\n            /// The total duration elapsed between the instant tasks are instrumented, and the instant they are first polled.\n            total_first_poll_delay: Counter<Microseconds> [],\n            /// The total number of times that tasks idled, waiting to be awoken.\n            total_idled_count: Gauge<Count> [],\n            /// The total duration that tasks idled.\n            total_idle_duration: Counter<Microseconds> [],\n            /// The maximum idle duration that a task took.\n            max_idle_duration: Counter<Microseconds> [],\n            /// The total number of times that tasks were awoken (and then, presumably, scheduled for execution).\n            total_scheduled_count: Gauge<Count> [],\n            /// The total duration that tasks spent waiting to be polled after awakening.\n            total_scheduled_duration: Counter<Microseconds> [],\n            /// The total number of times that tasks were polled.\n            total_poll_count: Gauge<Count> [],\n            /// The total duration elapsed during polls.\n            total_poll_duration: Counter<Microseconds> [],\n            /// The total number of times that polling tasks completed swiftly.\n            total_fast_poll_count: Gauge<Count> [],\n            /// The total duration of fast polls.\n            total_fast_poll_duration: Counter<Microseconds> [],\n            /// The total number of times that polling tasks completed slowly.\n            total_slow_poll_count: Gauge<Count> [],\n            /// The total duration of slow polls.\n            total_slow_poll_duration: Counter<Microseconds> [],\n            /// The total count of tasks with short scheduling delays.\n            total_short_delay_count: Gauge<Count> [],\n            /// The total count of tasks with long scheduling delays.\n            total_long_delay_count: Gauge<Count> [],\n            /// The total duration of tasks with short scheduling delays.\n            total_short_delay_duration: Counter<Microseconds> [],\n            /// The total number of times that a task had a long scheduling duration.\n            total_long_delay_duration: Counter<Microseconds> [],\n        }\n        stable_derived {\n            /// The mean duration elapsed between the instant tasks are instrumented, and the instant they are first polled.\n            mean_first_poll_delay: Counter<Microseconds> [],\n            /// The mean duration of idles.\n            mean_idle_duration: Counter<Microseconds> [],\n            /// The mean duration that tasks spent waiting to be executed after awakening.\n            mean_scheduled_duration: Counter<Microseconds> [],\n            /// The mean duration of polls.\n            mean_poll_duration: Counter<Microseconds> [],\n            /// The ratio between the number polls categorized as slow and fast.\n            slow_poll_ratio: Gauge<Percent> [],\n            /// The ratio of tasks exceeding [`long_delay_threshold`][TaskMonitor::long_delay_threshold].\n            long_delay_ratio: Gauge<Percent> [],\n            /// The mean duration of fast polls.\n            mean_fast_poll_duration: Counter<Microseconds> [],\n            /// The average time taken for a task with a short scheduling delay to be executed after being scheduled.\n            mean_short_delay_duration: Counter<Microseconds> [],\n            /// The mean duration of slow polls.\n            mean_slow_poll_duration: Counter<Microseconds> [],\n            /// The average scheduling delay for a task which takes a long time to start executing after being scheduled.\n            mean_long_delay_duration: Counter<Microseconds> [],\n        }\n        unstable {}\n        unstable_derived {}\n    }\n}\n\nimpl fmt::Debug for TaskMetricsReporter {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        f.debug_struct(\"TaskMetricsReporter\")\n            .field(\"interval\", &self.interval)\n            // skip intervals field\n            .finish()\n    }\n}\n\nimpl TaskMetricsReporter {\n    /// Collect and publish metrics once to the configured [metrics_rs](metrics) reporter.\n    pub fn run_once(&mut self) {\n        let metrics = self\n            .intervals\n            .next()\n            .expect(\"TaskIntervals::next never returns None\");\n        self.emitter.emit(metrics, ());\n    }\n\n    /// Collect and publish metrics periodically to the configured [metrics_rs](metrics) reporter.\n    ///\n    /// You probably want to run this within its own task (using [`tokio::task::spawn`])\n    #[cfg(feature = \"rt\")]\n    pub async fn run(mut self) {\n        loop {\n            self.run_once();\n            tokio::time::sleep(self.interval).await;\n        }\n    }\n}\n"
  },
  {
    "path": "src/task.rs",
    "content": "use futures_util::task::{ArcWake, AtomicWaker};\nuse pin_project_lite::pin_project;\nuse std::future::Future;\nuse std::ops::Deref;\nuse std::pin::Pin;\nuse std::sync::atomic::{AtomicU64, Ordering::SeqCst};\nuse std::sync::Arc;\nuse std::task::{Context, Poll};\nuse tokio_stream::Stream;\n\n#[cfg(feature = \"rt\")]\nuse tokio::time::{Duration, Instant};\n\nuse crate::derived_metrics::derived_metrics;\n#[cfg(not(feature = \"rt\"))]\nuse std::time::{Duration, Instant};\n\n#[cfg(feature = \"metrics-rs-integration\")]\npub(crate) mod metrics_rs_integration;\n\n/// Monitors key metrics of instrumented tasks.\n///\n/// This struct is preferred for generating a variable number of monitors at runtime.\n/// If you can construct a fixed count of `static` monitors instead, see [`TaskMonitorCore`].\n///\n/// ### Basic Usage\n/// A [`TaskMonitor`] tracks key [metrics][TaskMetrics] of async tasks that have been\n/// [instrumented][`TaskMonitor::instrument`] with the monitor.\n///\n/// In the below example, a [`TaskMonitor`] is [constructed][TaskMonitor::new] and used to\n/// [instrument][TaskMonitor::instrument] three worker tasks; meanwhile, a fourth task\n/// prints [metrics][TaskMetrics] in 500ms [intervals][TaskMonitor::intervals].\n/// ```\n/// use std::time::Duration;\n///\n/// #[tokio::main]\n/// async fn main() {\n///     // construct a metrics monitor\n///     let metrics_monitor = tokio_metrics::TaskMonitor::new();\n///\n///     // print task metrics every 500ms\n///     {\n///         let metrics_monitor = metrics_monitor.clone();\n///         tokio::spawn(async move {\n///             for interval in metrics_monitor.intervals() {\n///                 // pretty-print the metric interval\n///                 println!(\"{:?}\", interval);\n///                 // wait 500ms\n///                 tokio::time::sleep(Duration::from_millis(500)).await;\n///             }\n///         });\n///     }\n///\n///     // instrument some tasks and await them\n///     // note that the same TaskMonitor can be used for multiple tasks\n///     tokio::join![\n///         metrics_monitor.instrument(do_work()),\n///         metrics_monitor.instrument(do_work()),\n///         metrics_monitor.instrument(do_work())\n///     ];\n/// }\n///\n/// async fn do_work() {\n///     for _ in 0..25 {\n///         tokio::task::yield_now().await;\n///         tokio::time::sleep(Duration::from_millis(100)).await;\n///     }\n/// }\n/// ```\n///\n/// ### What should I instrument?\n/// In most cases, you should construct a *distinct* [`TaskMonitor`] for each kind of key task.\n///\n/// #### Instrumenting a web application\n/// For instance, a web service should have a distinct [`TaskMonitor`] for each endpoint. Within\n/// each endpoint, it's prudent to additionally instrument major sub-tasks, each with their own\n/// distinct [`TaskMonitor`]s. [*Why are my tasks slow?*](#why-are-my-tasks-slow) explores a\n/// debugging scenario for a web service that takes this approach to instrumentation. This\n/// approach is exemplified in the below example:\n/// ```no_run\n/// // The unabridged version of this snippet is in the examples directory of this crate.\n///\n/// #[tokio::main]\n/// async fn main() {\n///     // construct a TaskMonitor for root endpoint\n///     let monitor_root = tokio_metrics::TaskMonitor::new();\n///\n///     // construct TaskMonitors for create_users endpoint\n///     let monitor_create_user = CreateUserMonitors {\n///         // monitor for the entire endpoint\n///         route: tokio_metrics::TaskMonitor::new(),\n///         // monitor for database insertion subtask\n///         insert: tokio_metrics::TaskMonitor::new(),\n///     };\n///\n///     // build our application with two instrumented endpoints\n///     let app = axum::Router::new()\n///         // `GET /` goes to `root`\n///         .route(\"/\", axum::routing::get({\n///             let monitor = monitor_root.clone();\n///             move || monitor.instrument(async { \"Hello, World!\" })\n///         }))\n///         // `POST /users` goes to `create_user`\n///         .route(\"/users\", axum::routing::post({\n///             let monitors = monitor_create_user.clone();\n///             let route = monitors.route.clone();\n///             move |payload| {\n///                 route.instrument(create_user(payload, monitors))\n///             }\n///         }));\n///\n///     // print task metrics for each endpoint every 1s\n///     let metrics_frequency = std::time::Duration::from_secs(1);\n///     tokio::spawn(async move {\n///         let root_intervals = monitor_root.intervals();\n///         let create_user_route_intervals =\n///             monitor_create_user.route.intervals();\n///         let create_user_insert_intervals =\n///             monitor_create_user.insert.intervals();\n///         let create_user_intervals =\n///             create_user_route_intervals.zip(create_user_insert_intervals);\n///\n///         let intervals = root_intervals.zip(create_user_intervals);\n///         for (root_route, (create_user_route, create_user_insert)) in intervals {\n///             println!(\"root_route = {:#?}\", root_route);\n///             println!(\"create_user_route = {:#?}\", create_user_route);\n///             println!(\"create_user_insert = {:#?}\", create_user_insert);\n///             tokio::time::sleep(metrics_frequency).await;\n///         }\n///     });\n///\n///     // run the server\n///     let addr = std::net::SocketAddr::from(([127, 0, 0, 1], 3000));\n///     let listener = tokio::net::TcpListener::bind(addr).await.unwrap();\n///     axum::serve(listener, app)\n///         .await\n///         .unwrap();\n/// }\n///\n/// async fn create_user(\n///     axum::Json(payload): axum::Json<CreateUser>,\n///     monitors: CreateUserMonitors,\n/// ) -> impl axum::response::IntoResponse {\n///     let user = User { id: 1337, username: payload.username, };\n///     // instrument inserting the user into the db:\n///     let _ = monitors.insert.instrument(insert_user(user.clone())).await;\n///     (axum::http::StatusCode::CREATED, axum::Json(user))\n/// }\n///\n/// /* definitions of CreateUserMonitors, CreateUser and User omitted for brevity */\n///\n/// #\n/// # #[derive(Clone)]\n/// # struct CreateUserMonitors {\n/// #     // monitor for the entire endpoint\n/// #     route: tokio_metrics::TaskMonitor,\n/// #     // monitor for database insertion subtask\n/// #     insert: tokio_metrics::TaskMonitor,\n/// # }\n/// #\n/// # #[derive(serde::Deserialize)] struct CreateUser { username: String, }\n/// # #[derive(Clone, serde::Serialize)] struct User { id: u64, username: String, }\n/// #\n/// // insert the user into the database\n/// async fn insert_user(_: User) {\n///     /* implementation details elided */\n///     tokio::time::sleep(std::time::Duration::from_secs(1)).await;\n/// }\n/// ```\n///\n/// ### Why are my tasks slow?\n/// **Scenario:** You track key, high-level metrics about the customer response time. An alarm warns\n/// you that P90 latency for an endpoint exceeds your targets. What is causing the increase?\n///\n/// #### Identifying the high-level culprits\n/// A set of tasks will appear to execute more slowly if:\n/// - they are taking longer to poll (i.e., they consume too much CPU time)\n/// - they are waiting longer to be polled (e.g., they're waiting longer in tokio's scheduling\n///   queues)\n/// - they are waiting longer on external events to complete (e.g., asynchronous network requests)\n///\n/// The culprits, at a high level, may be some combination of these sources of latency. Fortunately,\n/// you have instrumented the key tasks of each of your endpoints with distinct [`TaskMonitor`]s.\n/// Using the monitors on the endpoint experiencing elevated latency, you begin by answering:\n/// - [*Are my tasks taking longer to poll?*](#are-my-tasks-taking-longer-to-poll)\n/// - [*Are my tasks spending more time waiting to be polled?*](#are-my-tasks-spending-more-time-waiting-to-be-polled)\n/// - [*Are my tasks spending more time waiting on external events to complete?*](#are-my-tasks-spending-more-time-waiting-on-external-events-to-complete)\n///\n/// ##### Are my tasks taking longer to poll?\n/// - **Did [`mean_poll_duration`][TaskMetrics::mean_poll_duration] increase?**\n///   This metric reflects the mean poll duration. If it increased, it means that, on average,\n///   individual polls tended to take longer. However, this does not necessarily imply increased\n///   task latency: An increase in poll durations could be offset by fewer polls.\n/// - **Did [`slow_poll_ratio`][TaskMetrics::slow_poll_ratio] increase?**\n///   This metric reflects the proportion of polls that were 'slow'. If it increased, it means that\n///   a greater proportion of polls performed excessive computation before yielding. This does not\n///   necessarily imply increased task latency: An increase in the proportion of slow polls could be\n///   offset by fewer or faster polls.\n/// - **Did [`mean_slow_poll_duration`][TaskMetrics::mean_slow_poll_duration] increase?**\n///   This metric reflects the mean duration of slow polls. If it increased, it means that, on\n///   average, slow polls got slower. This does not necessarily imply increased task latency: An\n///   increase in average slow poll duration could be offset by fewer or faster polls.\n///\n/// If so, [*why are my tasks taking longer to poll?*](#why-are-my-tasks-taking-longer-to-poll)\n///\n/// ##### Are my tasks spending more time waiting to be polled?\n/// - **Did [`mean_first_poll_delay`][TaskMetrics::mean_first_poll_delay] increase?**\n///   This metric reflects the mean delay between the instant a task is first instrumented and the\n///   instant it is first polled. If it increases, it means that, on average, tasks spent longer\n///   waiting to be initially run.\n/// - **Did [`mean_scheduled_duration`][TaskMetrics::mean_scheduled_duration] increase?**\n///   This metric reflects the mean duration that tasks spent in the scheduled state. The\n///   'scheduled' state of a task is the duration between the instant a task is awoken and the\n///   instant it is subsequently polled. If this metric increases, it means that, on average, tasks\n///   spent longer in tokio's queues before being polled.\n/// - **Did [`long_delay_ratio`][TaskMetrics::long_delay_ratio] increase?**\n///   This metric reflects the proportion of scheduling delays which were 'long'. If it increased,\n///   it means that a greater proportion of tasks experienced excessive delays before they could\n///   execute after being woken. This does not necessarily indicate an increase in latency, as this\n///   could be offset by fewer or faster task polls.\n/// - **Did [`mean_long_delay_duration`][TaskMetrics::mean_long_delay_duration] increase?**\n///   This metric reflects the mean duration of long delays. If it increased, it means that, on\n///   average, long delays got even longer. This does not necessarily imply increased task latency:\n///   an increase in average long delay duration could be offset by fewer or faster polls or more\n///   short schedules.\n///\n/// If so, [*why are my tasks spending more time waiting to be polled?*](#why-are-my-tasks-spending-more-time-waiting-to-be-polled)\n///\n/// ##### Are my tasks spending more time waiting on external events to complete?\n/// - **Did [`mean_idle_duration`][TaskMetrics::mean_idle_duration] increase?**\n///   This metric reflects the mean duration that tasks spent in the idle state. The idle state is\n///   the duration spanning the instant a task completes a poll, and the instant that it is next\n///   awoken. Tasks inhabit this state when they are waiting for task-external events to complete\n///   (e.g., an asynchronous sleep, a network request, file I/O, etc.). If this metric increases,\n///   tasks, in aggregate, spent more time waiting for task-external events to complete.\n///\n/// If so, [*why are my tasks spending more time waiting on external events to complete?*](#why-are-my-tasks-spending-more-time-waiting-on-external-events-to-complete)\n///\n/// #### Digging deeper\n/// Having [established the high-level culprits](#identifying-the-high-level-culprits), you now\n/// search for further explanation...\n///\n/// ##### Why are my tasks taking longer to poll?\n/// You observed that [your tasks are taking longer to poll](#are-my-tasks-taking-longer-to-poll).\n/// The culprit is likely some combination of:\n/// - **Your tasks are accidentally blocking.** Common culprits include:\n///     1. Using the Rust standard library's [filesystem](https://doc.rust-lang.org/std/fs/) or\n///        [networking](https://doc.rust-lang.org/std/net/) APIs.\n///        These APIs are synchronous; use tokio's [filesystem](https://docs.rs/tokio/latest/tokio/fs/)\n///        and [networking](https://docs.rs/tokio/latest/tokio/net/) APIs, instead.\n///     3. Calling [`block_on`](https://docs.rs/tokio/latest/tokio/runtime/struct.Handle.html#method.block_on).\n///     4. Invoking `println!` or other synchronous logging routines.\n///        Invocations of `println!` involve acquiring an exclusive lock on stdout, followed by a\n///        synchronous write to stdout.\n/// 2. **Your tasks are computationally expensive.** Common culprits include:\n///     1. TLS/cryptographic routines\n///     2. doing a lot of processing on bytes\n///     3. calling non-Tokio resources\n///\n/// ##### Why are my tasks spending more time waiting to be polled?\n/// You observed that [your tasks are spending more time waiting to be polled](#are-my-tasks-spending-more-time-waiting-to-be-polled)\n/// suggesting some combination of:\n/// - Your application is inflating the time elapsed between instrumentation and first poll.\n/// - Your tasks are being scheduled into tokio's global queue.\n/// - Other tasks are spending too long without yielding, thus backing up tokio's queues.\n///\n/// Start by asking: [*Is time-to-first-poll unusually high?*](#is-time-to-first-poll-unusually-high)\n///\n/// ##### Why are my tasks spending more time waiting on external events to complete?\n/// You observed that [your tasks are spending more time waiting waiting on external events to\n/// complete](#are-my-tasks-spending-more-time-waiting-on-external-events-to-complete). But what\n/// event? Fortunately, within the task experiencing increased idle times, you monitored several\n/// sub-tasks with distinct [`TaskMonitor`]s. For each of these sub-tasks, you [*you try to identify\n/// the performance culprits...*](#identifying-the-high-level-culprits)\n///\n/// #### Digging even deeper\n///\n/// ##### Is time-to-first-poll unusually high?\n/// Contrast these two metrics:\n/// - **[`mean_first_poll_delay`][TaskMetrics::mean_first_poll_delay]**\n///   This metric reflects the mean delay between the instant a task is first instrumented and the\n///   instant it is *first* polled.\n/// - **[`mean_scheduled_duration`][TaskMetrics::mean_scheduled_duration]**\n///   This metric reflects the mean delay between the instant when tasks were awoken and the\n///   instant they were subsequently polled.\n///\n/// If the former metric exceeds the latter (or increased unexpectedly more than the latter), then\n/// start by investigating [*if your application is artificially delaying the time-to-first-poll*](#is-my-application-delaying-the-time-to-first-poll).\n///\n/// Otherwise, investigate [*if other tasks are polling too long without yielding*](#are-other-tasks-polling-too-long-without-yielding).\n///\n/// ##### Is my application delaying the time-to-first-poll?\n/// You observed that [`mean_first_poll_delay`][TaskMetrics::mean_first_poll_delay] increased, more\n/// than [`mean_scheduled_duration`][TaskMetrics::mean_scheduled_duration]. Your application may be\n/// needlessly inflating the time elapsed between instrumentation and first poll. Are you\n/// constructing (and instrumenting) tasks separately from awaiting or spawning them?\n///\n/// For instance, in the below example, the application induces 1 second delay between when `task`\n/// is instrumented and when it is awaited:\n/// ```rust\n/// #[tokio::main]\n/// async fn main() {\n///     use tokio::time::Duration;\n///     let monitor = tokio_metrics::TaskMonitor::new();\n///\n///     let task = monitor.instrument(async move {});\n///\n///     let one_sec = Duration::from_secs(1);\n///     tokio::time::sleep(one_sec).await;\n///\n///     let _ = tokio::spawn(task).await;\n///\n///     assert!(monitor.cumulative().total_first_poll_delay >= one_sec);\n/// }\n/// ```\n///\n/// Otherwise, [`mean_first_poll_delay`][TaskMetrics::mean_first_poll_delay] might be unusually high\n/// because [*your application is spawning key tasks into tokio's global queue...*](#is-my-application-spawning-more-tasks-into-tokio’s-global-queue)\n///\n/// ##### Is my application spawning more tasks into tokio's global queue?\n/// Tasks awoken from threads *not* managed by the tokio runtime are scheduled with a slower,\n/// global \"injection\" queue.\n///\n/// You may be notifying runtime tasks from off-runtime. For instance, Given the following:\n/// ```ignore\n/// #[tokio::main]\n/// async fn main() {\n///     for _ in 0..100 {\n///         let (tx, rx) = oneshot::channel();\n///         tokio::spawn(async move {\n///             tx.send(());\n///         })\n///\n///         rx.await;\n///     }\n/// }\n/// ```\n/// One would expect this to run efficiently, however, the main task is run *off* the main runtime\n/// and the spawned tasks are *on* runtime, which means the snippet will run much slower than:\n/// ```ignore\n/// #[tokio::main]\n/// async fn main() {\n///     tokio::spawn(async {\n///         for _ in 0..100 {\n///             let (tx, rx) = oneshot::channel();\n///             tokio::spawn(async move {\n///                 tx.send(());\n///             })\n///\n///             rx.await;\n///         }\n///     }).await;\n/// }\n/// ```\n/// The slowdown is caused by a higher time between the `rx` task being notified (in `tx.send()`)\n/// and the task being polled.\n///\n/// ##### Are other tasks polling too long without yielding?\n/// You suspect that your tasks are slow because they're backed up in tokio's scheduling queues. For\n/// *each* of your application's [`TaskMonitor`]s you check to see [*if their associated tasks are\n/// taking longer to poll...*](#are-my-tasks-taking-longer-to-poll)\n///\n/// ### Limitations\n/// The [`TaskMetrics`] type uses [`u64`] to represent both event counters and durations (measured\n/// in nanoseconds). Consequently, event counters are accurate for ≤ [`u64::MAX`] events, and\n/// durations are accurate for ≤ [`u64::MAX`] nanoseconds.\n///\n/// The counters and durations of [`TaskMetrics`] produced by [`TaskMonitor::cumulative`] increase\n/// monotonically with each successive invocation of [`TaskMonitor::cumulative`]. Upon overflow,\n/// counters and durations wrap.\n///\n/// The counters and durations of [`TaskMetrics`] produced by [`TaskMonitor::intervals`] are\n/// calculated by computing the difference of metrics in successive invocations of\n/// [`TaskMonitor::cumulative`]. If, within a monitoring interval, an event occurs more than\n/// [`u64::MAX`] times, or a monitored duration exceeds [`u64::MAX`] nanoseconds, the metrics for\n/// that interval will overflow and not be accurate.\n///\n/// ##### Examples at the limits\n/// Consider the [`TaskMetrics::total_first_poll_delay`] metric. This metric accurately reflects\n/// delays between instrumentation and first-poll ≤ [`u64::MAX`] nanoseconds:\n/// ```\n/// use tokio::time::Duration;\n///\n/// #[tokio::main(flavor = \"current_thread\", start_paused = true)]\n/// async fn main() {\n///     let monitor = tokio_metrics::TaskMonitor::new();\n///     let mut interval = monitor.intervals();\n///     let mut next_interval = || interval.next().unwrap();\n///\n///     // construct and instrument a task, but do not `await` it\n///     let task = monitor.instrument(async {});\n///\n///     // this is the maximum duration representable by tokio_metrics\n///     let max_duration = Duration::from_nanos(u64::MAX);\n///\n///     // let's advance the clock by this amount and poll `task`\n///     let _ = tokio::time::advance(max_duration).await;\n///     task.await;\n///\n///     // durations ≤ `max_duration` are accurately reflected in this metric\n///     assert_eq!(next_interval().total_first_poll_delay, max_duration);\n///     assert_eq!(monitor.cumulative().total_first_poll_delay, max_duration);\n/// }\n/// ```\n/// If the total delay between instrumentation and first poll exceeds [`u64::MAX`] nanoseconds,\n/// [`total_first_poll_delay`][TaskMetrics::total_first_poll_delay] will overflow:\n/// ```\n/// # use tokio::time::Duration;\n/// #\n/// # #[tokio::main(flavor = \"current_thread\", start_paused = true)]\n/// # async fn main() {\n/// #    let monitor = tokio_metrics::TaskMonitor::new();\n/// #\n///  // construct and instrument a task, but do not `await` it\n///  let task_a = monitor.instrument(async {});\n///  let task_b = monitor.instrument(async {});\n///\n///  // this is the maximum duration representable by tokio_metrics\n///  let max_duration = Duration::from_nanos(u64::MAX);\n///\n///  // let's advance the clock by 1.5x this amount and await `task`\n///  let _ = tokio::time::advance(3 * (max_duration / 2)).await;\n///  task_a.await;\n///  task_b.await;\n///\n///  // the `total_first_poll_delay` has overflowed\n///  assert!(monitor.cumulative().total_first_poll_delay < max_duration);\n/// # }\n/// ```\n/// If *many* tasks are spawned, it will take far less than a [`u64::MAX`]-nanosecond delay to bring\n/// this metric to the precipice of overflow:\n/// ```\n/// # use tokio::time::Duration;\n/// #\n/// # #[tokio::main(flavor = \"current_thread\", start_paused = true)]\n/// # async fn main() {\n/// #     let monitor = tokio_metrics::TaskMonitor::new();\n/// #     let mut interval = monitor.intervals();\n/// #     let mut next_interval = || interval.next().unwrap();\n/// #\n/// // construct and instrument u16::MAX tasks, but do not `await` them\n/// let first_poll_count = u16::MAX as u64;\n/// let mut tasks = Vec::with_capacity(first_poll_count as usize);\n/// for _ in 0..first_poll_count { tasks.push(monitor.instrument(async {})); }\n///\n/// // this is the maximum duration representable by tokio_metrics\n/// let max_duration = u64::MAX;\n///\n/// // let's advance the clock justenough such that all of the time-to-first-poll\n/// // delays summed nearly equals `max_duration_nanos`, less some remainder...\n/// let iffy_delay = max_duration / (first_poll_count as u64);\n/// let small_remainder = max_duration % first_poll_count;\n/// let _ = tokio::time::advance(Duration::from_nanos(iffy_delay)).await;\n///\n/// // ...then poll all of the instrumented tasks:\n/// for task in tasks { task.await; }\n///\n/// // `total_first_poll_delay` is at the precipice of overflowing!\n/// assert_eq!(\n///     next_interval().total_first_poll_delay.as_nanos(),\n///     (max_duration - small_remainder) as u128\n/// );\n/// assert_eq!(\n///     monitor.cumulative().total_first_poll_delay.as_nanos(),\n///     (max_duration - small_remainder) as u128\n/// );\n/// # }\n/// ```\n/// Frequent, interval-sampled metrics will retain their accuracy, even if the cumulative\n/// metrics counter overflows at most once in the midst of an interval:\n/// ```\n/// # use tokio::time::Duration;\n/// # use tokio_metrics::TaskMonitor;\n/// #\n/// # #[tokio::main(flavor = \"current_thread\", start_paused = true)]\n/// # async fn main() {\n/// #     let monitor = TaskMonitor::new();\n/// #     let mut interval = monitor.intervals();\n/// #     let mut next_interval = || interval.next().unwrap();\n/// #\n///  let first_poll_count = u16::MAX as u64;\n///  let batch_size = first_poll_count / 3;\n///\n///  let max_duration_ns = u64::MAX;\n///  let iffy_delay_ns = max_duration_ns / first_poll_count;\n///\n///  // Instrument `batch_size` number of tasks, wait for `delay` nanoseconds,\n///  // then await the instrumented tasks.\n///  async fn run_batch(monitor: &TaskMonitor, batch_size: usize, delay: u64) {\n///      let mut tasks = Vec::with_capacity(batch_size);\n///      for _ in 0..batch_size { tasks.push(monitor.instrument(async {})); }\n///      let _ = tokio::time::advance(Duration::from_nanos(delay)).await;\n///      for task in tasks { task.await; }\n///  }\n///\n///  // this is how much `total_time_to_first_poll_ns` will\n///  // increase with each batch we run\n///  let batch_delay = iffy_delay_ns * batch_size;\n///\n///  // run batches 1, 2, and 3\n///  for i in 1..=3 {\n///      run_batch(&monitor, batch_size as usize, iffy_delay_ns).await;\n///      assert_eq!(1 * batch_delay as u128, next_interval().total_first_poll_delay.as_nanos());\n///      assert_eq!(i * batch_delay as u128, monitor.cumulative().total_first_poll_delay.as_nanos());\n///  }\n///\n///  /* now, the `total_time_to_first_poll_ns` counter is at the precipice of overflow */\n///  assert_eq!(monitor.cumulative().total_first_poll_delay.as_nanos(), max_duration_ns as u128);\n///\n///  // run batch 4\n///  run_batch(&monitor, batch_size as usize, iffy_delay_ns).await;\n///  // the interval counter remains accurate\n///  assert_eq!(1 * batch_delay as u128, next_interval().total_first_poll_delay.as_nanos());\n///  // but the cumulative counter has overflowed\n///  assert_eq!(batch_delay as u128 - 1, monitor.cumulative().total_first_poll_delay.as_nanos());\n/// # }\n/// ```\n/// If a cumulative metric overflows *more than once* in the midst of an interval,\n/// its interval-sampled counterpart will also overflow.\n#[derive(Clone, Debug)]\npub struct TaskMonitor {\n    base: Arc<TaskMonitorCore>,\n}\n\nimpl Deref for TaskMonitor {\n    type Target = TaskMonitorCore;\n\n    fn deref(&self) -> &Self::Target {\n        &self.base\n    }\n}\n\nimpl AsRef<TaskMonitorCore> for TaskMonitor {\n    fn as_ref(&self) -> &TaskMonitorCore {\n        &self.base\n    }\n}\n\n/// A non-`Clone`, non-allocated, static-friendly version of [`TaskMonitor`].\n/// See full docs on the [`TaskMonitor`] struct.\n///\n/// You should use [`TaskMonitorCore`] if you have a known count of monitors\n/// that you want to initialize as compile-time `static` structs.\n///\n/// You can also use [`TaskMonitorCore`] if you are already passing around an `Arc`-wrapped\n/// struct that you want to store your monitor in. This way, you can avoid double-`Arc`'ing it.\n///\n/// For other most other non-static usage, [`TaskMonitor`] will be more ergonomic.\n///\n/// ##### Examples\n///\n/// Static usage:\n/// ```\n/// use tokio_metrics::TaskMonitorCore;\n///\n/// static MONITOR: TaskMonitorCore = TaskMonitorCore::new();\n///\n/// #[tokio::main]\n/// async fn main() {\n///     assert_eq!(MONITOR.cumulative().first_poll_count, 0);\n///\n///     MONITOR.instrument(async {}).await;\n///     assert_eq!(MONITOR.cumulative().first_poll_count, 1);\n/// }\n/// ```\n///\n/// Usage with wrapper struct and [`TaskMonitorCore::instrument_with`]:\n/// ```\n/// use std::sync::Arc;\n/// use tokio_metrics::TaskMonitorCore;\n///\n/// #[derive(Clone)]\n/// struct SharedState(Arc<SharedStateInner>);\n/// struct SharedStateInner {\n///     monitor: TaskMonitorCore,\n///     other_state: SomeOtherSharedState,\n/// }\n/// /// Imagine: a type that wasn't `Clone` that you want to pass around\n/// /// in a similar way as the monitor\n/// struct SomeOtherSharedState;\n///\n/// impl AsRef<TaskMonitorCore> for SharedState {\n///     fn as_ref(&self) -> &TaskMonitorCore {\n///         &self.0.monitor\n///     }\n/// }\n///\n/// #[tokio::main]\n/// async fn main() {\n///     let state = SharedState(Arc::new(SharedStateInner {\n///         monitor: TaskMonitorCore::new(),\n///         other_state: SomeOtherSharedState,\n///     }));\n///\n///     assert_eq!(state.0.monitor.cumulative().first_poll_count, 0);\n///\n///     TaskMonitorCore::instrument_with(async {}, state.clone()).await;\n///     assert_eq!(state.0.monitor.cumulative().first_poll_count, 1);\n/// }\n/// ```\n#[derive(Debug)]\npub struct TaskMonitorCore {\n    metrics: RawMetrics,\n}\n\n/// Provides an interface for constructing a [`TaskMonitor`] with specialized configuration\n/// parameters.\n#[derive(Clone, Debug, Default)]\npub struct TaskMonitorBuilder(TaskMonitorCoreBuilder);\n\nimpl TaskMonitorBuilder {\n    /// Creates a new [`TaskMonitorBuilder`].\n    pub fn new() -> Self {\n        Self(TaskMonitorCoreBuilder::new())\n    }\n\n    /// Specifies the threshold at which polls are considered 'slow'.\n    pub fn with_slow_poll_threshold(&mut self, threshold: Duration) -> &mut Self {\n        self.0.slow_poll_threshold = Some(threshold);\n        self\n    }\n\n    /// Specifies the threshold at which schedules are considered 'long'.\n    pub fn with_long_delay_threshold(&mut self, threshold: Duration) -> &mut Self {\n        self.0.long_delay_threshold = Some(threshold);\n        self\n    }\n\n    /// Consume the builder, producing a [`TaskMonitor`].\n    pub fn build(self) -> TaskMonitor {\n        TaskMonitor {\n            base: Arc::new(self.0.build()),\n        }\n    }\n}\n\n/// Provides an interface for constructing a [`TaskMonitorCore`] with specialized configuration\n/// parameters.\n///\n/// ```\n/// use std::time::Duration;\n/// use tokio_metrics::TaskMonitorCoreBuilder;\n///\n/// static MONITOR: tokio_metrics::TaskMonitorCore = TaskMonitorCoreBuilder::new()\n///     .with_slow_poll_threshold(Duration::from_micros(100))\n///     .build();\n/// ```\n#[derive(Clone, Debug, Default)]\npub struct TaskMonitorCoreBuilder {\n    slow_poll_threshold: Option<Duration>,\n    long_delay_threshold: Option<Duration>,\n}\n\nimpl TaskMonitorCoreBuilder {\n    /// Creates a new [`TaskMonitorCoreBuilder`].\n    pub const fn new() -> Self {\n        Self {\n            slow_poll_threshold: None,\n            long_delay_threshold: None,\n        }\n    }\n\n    /// Specifies the threshold at which polls are considered 'slow'.\n    pub const fn with_slow_poll_threshold(self, threshold: Duration) -> Self {\n        Self {\n            slow_poll_threshold: Some(threshold),\n            ..self\n        }\n    }\n\n    /// Specifies the threshold at which schedules are considered 'long'.\n    pub const fn with_long_delay_threshold(self, threshold: Duration) -> Self {\n        Self {\n            long_delay_threshold: Some(threshold),\n            ..self\n        }\n    }\n\n    /// Consume the builder, producing a [`TaskMonitorCore`].\n    pub const fn build(self) -> TaskMonitorCore {\n        let slow = match self.slow_poll_threshold {\n            Some(v) => v,\n            None => TaskMonitor::DEFAULT_SLOW_POLL_THRESHOLD,\n        };\n        let long = match self.long_delay_threshold {\n            Some(v) => v,\n            None => TaskMonitor::DEFAULT_LONG_DELAY_THRESHOLD,\n        };\n        TaskMonitorCore::create(slow, long)\n    }\n}\n\npin_project! {\n    /// An async task that has been instrumented with [`TaskMonitor::instrument`].\n    #[derive(Debug)]\n    pub struct Instrumented<T, M: AsRef<TaskMonitorCore> = TaskMonitor> {\n        // The task being instrumented\n        #[pin]\n        task: T,\n\n        // True when the task is polled for the first time\n        did_poll_once: bool,\n\n        // The instant, tracked as nanoseconds since `instrumented_at`, at which the future finished\n        // its last poll.\n        idled_at: u64,\n\n        // State shared between the task and its instrumented waker.\n        state: Arc<State<M>>,\n    }\n\n    impl<T, M: AsRef<TaskMonitorCore>> PinnedDrop for Instrumented<T, M> {\n        fn drop(this: Pin<&mut Self>) {\n            this.state.monitor.as_ref().metrics.dropped_count.fetch_add(1, SeqCst);\n        }\n    }\n}\n\n/// Key metrics of [instrumented][`TaskMonitor::instrument`] tasks.\n#[non_exhaustive]\n#[derive(Debug, Clone, Copy, Default)]\npub struct TaskMetrics {\n    /// The number of tasks instrumented.\n    ///\n    /// ##### Examples\n    /// ```\n    /// #[tokio::main]\n    /// async fn main() {\n    ///     let monitor = tokio_metrics::TaskMonitor::new();\n    ///     let mut interval = monitor.intervals();\n    ///     let mut next_interval = || interval.next().unwrap();\n    ///\n    ///     // 0 tasks have been instrumented\n    ///     assert_eq!(next_interval().instrumented_count, 0);\n    ///\n    ///     monitor.instrument(async {});\n    ///\n    ///     // 1 task has been instrumented\n    ///     assert_eq!(next_interval().instrumented_count, 1);\n    ///\n    ///     monitor.instrument(async {});\n    ///     monitor.instrument(async {});\n    ///\n    ///     // 2 tasks have been instrumented\n    ///     assert_eq!(next_interval().instrumented_count, 2);\n    ///\n    ///     // since the last interval was produced, 0 tasks have been instrumented\n    ///     assert_eq!(next_interval().instrumented_count, 0);\n    /// }\n    /// ```\n    pub instrumented_count: u64,\n\n    /// The number of tasks dropped.\n    ///\n    /// ##### Examples\n    /// ```\n    /// #[tokio::main]\n    /// async fn main() {\n    ///     let monitor = tokio_metrics::TaskMonitor::new();\n    ///     let mut interval = monitor.intervals();\n    ///     let mut next_interval = || interval.next().unwrap();\n    ///\n    ///     // 0 tasks have been dropped\n    ///     assert_eq!(next_interval().dropped_count, 0);\n    ///\n    ///     let _task = monitor.instrument(async {});\n    ///\n    ///     // 0 tasks have been dropped\n    ///     assert_eq!(next_interval().dropped_count, 0);\n    ///\n    ///     monitor.instrument(async {}).await;\n    ///     drop(monitor.instrument(async {}));\n    ///\n    ///     // 2 tasks have been dropped\n    ///     assert_eq!(next_interval().dropped_count, 2);\n    ///\n    ///     // since the last interval was produced, 0 tasks have been dropped\n    ///     assert_eq!(next_interval().dropped_count, 0);\n    /// }\n    /// ```\n    pub dropped_count: u64,\n\n    /// The number of tasks polled for the first time.\n    ///\n    /// ##### Derived metrics\n    /// - **[`mean_first_poll_delay`][TaskMetrics::mean_first_poll_delay]**\n    ///   The mean duration elapsed between the instant tasks are instrumented, and the instant they\n    ///   are first polled.\n    ///\n    /// ##### Examples\n    /// In the below example, no tasks are instrumented or polled in the first sampling interval;\n    /// one task is instrumented (but not polled) in the second sampling interval; that task is\n    /// awaited to completion (and, thus, polled at least once) in the third sampling interval; no\n    /// additional tasks are polled for the first time within the fourth sampling interval:\n    /// ```\n    /// #[tokio::main]\n    /// async fn main() {\n    ///     let metrics_monitor = tokio_metrics::TaskMonitor::new();\n    ///     let mut interval = metrics_monitor.intervals();\n    ///     let mut next_interval = || interval.next().unwrap();\n    ///\n    ///     // no tasks have been constructed, instrumented, and polled at least once\n    ///     assert_eq!(next_interval().first_poll_count, 0);\n    ///\n    ///     let task = metrics_monitor.instrument(async {});\n    ///\n    ///     // `task` has been constructed and instrumented, but has not yet been polled\n    ///     assert_eq!(next_interval().first_poll_count, 0);\n    ///\n    ///     // poll `task` to completion\n    ///     task.await;\n    ///\n    ///     // `task` has been constructed, instrumented, and polled at least once\n    ///     assert_eq!(next_interval().first_poll_count, 1);\n    ///\n    ///     // since the last interval was produced, 0 tasks have been constructed, instrumented and polled\n    ///     assert_eq!(next_interval().first_poll_count, 0);\n    ///\n    /// }\n    /// ```\n    pub first_poll_count: u64,\n\n    /// The total duration elapsed between the instant tasks are instrumented, and the instant they\n    /// are first polled.\n    ///\n    /// ##### Derived metrics\n    /// - **[`mean_first_poll_delay`][TaskMetrics::mean_first_poll_delay]**\n    ///   The mean duration elapsed between the instant tasks are instrumented, and the instant they\n    ///   are first polled.\n    ///\n    /// ##### Examples\n    /// In the below example, 0 tasks have been instrumented or polled within the first sampling\n    /// interval, a total of 500ms elapse between the instrumentation and polling of tasks within\n    /// the second sampling interval, and a total of 350ms elapse between the instrumentation and\n    /// polling of tasks within the third sampling interval:\n    /// ```\n    /// use tokio::time::Duration;\n    ///\n    /// #[tokio::main(flavor = \"current_thread\", start_paused = true)]\n    /// async fn main() {\n    ///     let monitor = tokio_metrics::TaskMonitor::new();\n    ///     let mut interval = monitor.intervals();\n    ///     let mut next_interval = || interval.next().unwrap();\n    ///\n    ///     // no tasks have yet been created, instrumented, or polled\n    ///     assert_eq!(monitor.cumulative().total_first_poll_delay, Duration::ZERO);\n    ///     assert_eq!(next_interval().total_first_poll_delay, Duration::ZERO);\n    ///\n    ///     // constructs and instruments a task, pauses a given duration, then awaits the task\n    ///     async fn instrument_pause_await(monitor: &tokio_metrics::TaskMonitor, pause: Duration) {\n    ///         let task = monitor.instrument(async move {});\n    ///         tokio::time::sleep(pause).await;\n    ///         task.await;\n    ///     }\n    ///\n    ///     // construct and await a task that pauses for 500ms between instrumentation and first poll\n    ///     let task_a_pause_time = Duration::from_millis(500);\n    ///     instrument_pause_await(&monitor, task_a_pause_time).await;\n    ///\n    ///     assert_eq!(next_interval().total_first_poll_delay, task_a_pause_time);\n    ///     assert_eq!(monitor.cumulative().total_first_poll_delay, task_a_pause_time);\n    ///\n    ///     // construct and await a task that pauses for 250ms between instrumentation and first poll\n    ///     let task_b_pause_time = Duration::from_millis(250);\n    ///     instrument_pause_await(&monitor, task_b_pause_time).await;\n    ///\n    ///     // construct and await a task that pauses for 100ms between instrumentation and first poll\n    ///     let task_c_pause_time = Duration::from_millis(100);\n    ///     instrument_pause_await(&monitor, task_c_pause_time).await;\n    ///\n    ///     assert_eq!(\n    ///         next_interval().total_first_poll_delay,\n    ///         task_b_pause_time + task_c_pause_time\n    ///     );\n    ///     assert_eq!(\n    ///         monitor.cumulative().total_first_poll_delay,\n    ///         task_a_pause_time + task_b_pause_time + task_c_pause_time\n    ///     );\n    /// }\n    /// ```\n    ///\n    /// ##### When is this metric recorded?\n    /// The delay between instrumentation and first poll is not recorded until the first poll\n    /// actually occurs:\n    /// ```\n    /// # use tokio::time::Duration;\n    /// #\n    /// # #[tokio::main(flavor = \"current_thread\", start_paused = true)]\n    /// # async fn main() {\n    /// #     let monitor = tokio_metrics::TaskMonitor::new();\n    /// #     let mut interval = monitor.intervals();\n    /// #     let mut next_interval = || interval.next().unwrap();\n    /// #\n    /// // we construct and instrument a task, but do not `await` it\n    /// let task = monitor.instrument(async {});\n    ///\n    /// // let's sleep for 1s before we poll `task`\n    /// let one_sec = Duration::from_secs(1);\n    /// let _ = tokio::time::sleep(one_sec).await;\n    ///\n    /// // although 1s has now elapsed since the instrumentation of `task`,\n    /// // this is not reflected in `total_first_poll_delay`...\n    /// assert_eq!(next_interval().total_first_poll_delay, Duration::ZERO);\n    /// assert_eq!(monitor.cumulative().total_first_poll_delay, Duration::ZERO);\n    ///\n    /// // ...and won't be until `task` is actually polled\n    /// task.await;\n    ///\n    /// // now, the 1s delay is reflected in `total_first_poll_delay`:\n    /// assert_eq!(next_interval().total_first_poll_delay, one_sec);\n    /// assert_eq!(monitor.cumulative().total_first_poll_delay, one_sec);\n    /// # }\n    /// ```\n    ///\n    /// ##### What if first-poll-delay is very large?\n    /// The first-poll-delay of *individual* tasks saturates at `u64::MAX` nanoseconds. However, if\n    /// the *total* first-poll-delay *across* monitored tasks exceeds `u64::MAX` nanoseconds, this\n    /// metric will wrap around:\n    /// ```\n    /// use tokio::time::Duration;\n    ///\n    /// #[tokio::main(flavor = \"current_thread\", start_paused = true)]\n    /// async fn main() {\n    ///     let monitor = tokio_metrics::TaskMonitor::new();\n    ///\n    ///     // construct and instrument a task, but do not `await` it\n    ///     let task = monitor.instrument(async {});\n    ///\n    ///     // this is the maximum duration representable by tokio_metrics\n    ///     let max_duration = Duration::from_nanos(u64::MAX);\n    ///\n    ///     // let's advance the clock by double this amount and await `task`\n    ///     let _ = tokio::time::advance(max_duration * 2).await;\n    ///     task.await;\n    ///\n    ///     // the time-to-first-poll of `task` saturates at `max_duration`\n    ///     assert_eq!(monitor.cumulative().total_first_poll_delay, max_duration);\n    ///\n    ///     // ...but note that the metric *will* wrap around if more tasks are involved\n    ///     let task = monitor.instrument(async {});\n    ///     let _ = tokio::time::advance(Duration::from_nanos(1)).await;\n    ///     task.await;\n    ///     assert_eq!(monitor.cumulative().total_first_poll_delay, Duration::ZERO);\n    /// }\n    /// ```\n    pub total_first_poll_delay: Duration,\n\n    /// The total number of times that tasks idled, waiting to be awoken.\n    ///\n    /// An idle is recorded as occurring if a non-zero duration elapses between the instant a\n    /// task completes a poll, and the instant that it is next awoken.\n    ///\n    /// ##### Derived metrics\n    /// - **[`mean_idle_duration`][TaskMetrics::mean_idle_duration]**\n    ///   The mean duration of idles.\n    ///\n    /// ##### Examples\n    /// ```\n    /// #[tokio::main(flavor = \"current_thread\", start_paused = true)]\n    /// async fn main() {\n    ///     let monitor = tokio_metrics::TaskMonitor::new();\n    ///     let mut interval = monitor.intervals();\n    ///     let mut next_interval = move || interval.next().unwrap();\n    ///     let one_sec = std::time::Duration::from_secs(1);\n    ///\n    ///     monitor.instrument(async {}).await;\n    ///\n    ///     assert_eq!(next_interval().total_idled_count, 0);\n    ///     assert_eq!(monitor.cumulative().total_idled_count, 0);\n    ///\n    ///     monitor.instrument(async move {\n    ///         tokio::time::sleep(one_sec).await;\n    ///     }).await;\n    ///\n    ///     assert_eq!(next_interval().total_idled_count, 1);\n    ///     assert_eq!(monitor.cumulative().total_idled_count, 1);\n    ///\n    ///     monitor.instrument(async {\n    ///         tokio::time::sleep(one_sec).await;\n    ///         tokio::time::sleep(one_sec).await;\n    ///     }).await;\n    ///\n    ///     assert_eq!(next_interval().total_idled_count, 2);\n    ///     assert_eq!(monitor.cumulative().total_idled_count, 3);\n    /// }\n    /// ```\n    pub total_idled_count: u64,\n\n    /// The total duration that tasks idled.\n    ///\n    /// An idle is recorded as occurring if a non-zero duration elapses between the instant a\n    /// task completes a poll, and the instant that it is next awoken.\n    ///\n    /// ##### Derived metrics\n    /// - **[`mean_idle_duration`][TaskMetrics::mean_idle_duration]**\n    ///   The mean duration of idles.\n    ///\n    /// ##### Examples\n    /// ```\n    /// #[tokio::main(flavor = \"current_thread\", start_paused = true)]\n    /// async fn main() {\n    ///     let monitor = tokio_metrics::TaskMonitor::new();\n    ///     let mut interval = monitor.intervals();\n    ///     let mut next_interval = move || interval.next().unwrap();\n    ///     let one_sec = std::time::Duration::from_secs(1);\n    ///     let two_sec = std::time::Duration::from_secs(2);\n    ///\n    ///     assert_eq!(next_interval().total_idle_duration.as_nanos(), 0);\n    ///     assert_eq!(monitor.cumulative().total_idle_duration.as_nanos(), 0);\n    ///\n    ///     monitor.instrument(async move {\n    ///         tokio::time::sleep(one_sec).await;\n    ///     }).await;\n    ///\n    ///     assert_eq!(next_interval().total_idle_duration, one_sec);\n    ///     assert_eq!(monitor.cumulative().total_idle_duration, one_sec);\n    ///\n    ///     monitor.instrument(async move {\n    ///         tokio::time::sleep(two_sec).await;\n    ///     }).await;\n    ///\n    ///     assert_eq!(next_interval().total_idle_duration, two_sec);\n    ///     assert_eq!(monitor.cumulative().total_idle_duration, one_sec + two_sec);\n    /// }\n    /// ```\n    pub total_idle_duration: Duration,\n\n    /// The maximum idle duration that a task took.\n    ///\n    /// An idle is recorded as occurring if a non-zero duration elapses between the instant a\n    /// task completes a poll, and the instant that it is next awoken.\n    ///\n    /// ##### Examples\n    /// ```\n    /// #[tokio::main(flavor = \"current_thread\", start_paused = true)]\n    /// async fn main() {\n    ///     let monitor = tokio_metrics::TaskMonitor::new();\n    ///     let mut interval = monitor.intervals();\n    ///     let mut next_interval = move || interval.next().unwrap();\n    ///     let one_sec = std::time::Duration::from_secs(1);\n    ///     let two_sec = std::time::Duration::from_secs(2);\n    ///\n    ///     assert_eq!(next_interval().max_idle_duration.as_nanos(), 0);\n    ///     assert_eq!(monitor.cumulative().max_idle_duration.as_nanos(), 0);\n    ///\n    ///     monitor.instrument(async move {\n    ///         tokio::time::sleep(one_sec).await;\n    ///     }).await;\n    ///\n    ///     assert_eq!(next_interval().max_idle_duration, one_sec);\n    ///     assert_eq!(monitor.cumulative().max_idle_duration, one_sec);\n    ///\n    ///     monitor.instrument(async move {\n    ///         tokio::time::sleep(two_sec).await;\n    ///     }).await;\n    ///\n    ///     assert_eq!(next_interval().max_idle_duration, two_sec);\n    ///     assert_eq!(monitor.cumulative().max_idle_duration, two_sec);\n    ///\n    ///     monitor.instrument(async move {\n    ///         tokio::time::sleep(one_sec).await;\n    ///     }).await;\n    ///\n    ///     assert_eq!(next_interval().max_idle_duration, one_sec);\n    ///     assert_eq!(monitor.cumulative().max_idle_duration, two_sec);\n    /// }\n    /// ```\n    pub max_idle_duration: Duration,\n\n    /// The total number of times that tasks were awoken (and then, presumably, scheduled for\n    /// execution).\n    ///\n    /// ##### Definition\n    /// This metric is equal to [`total_short_delay_count`][TaskMetrics::total_short_delay_count]\n    /// \\+ [`total_long_delay_count`][TaskMetrics::total_long_delay_count].\n    ///\n    /// ##### Derived metrics\n    /// - **[`mean_scheduled_duration`][TaskMetrics::mean_scheduled_duration]**\n    ///   The mean duration that tasks spent waiting to be executed after awakening.\n    ///\n    /// ##### Examples\n    /// In the below example, a task yields to the scheduler a varying number of times between\n    /// sampling intervals; this metric is equal to the number of times the task yielded:\n    /// ```\n    /// #[tokio::main]\n    /// async fn main(){\n    ///     let metrics_monitor = tokio_metrics::TaskMonitor::new();\n    ///\n    ///     // [A] no tasks have been created, instrumented, and polled more than once\n    ///     assert_eq!(metrics_monitor.cumulative().total_scheduled_count, 0);\n    ///\n    ///     // [B] a `task` is created and instrumented\n    ///     let task = {\n    ///         let monitor = metrics_monitor.clone();\n    ///         metrics_monitor.instrument(async move {\n    ///             let mut interval = monitor.intervals();\n    ///             let mut next_interval = move || interval.next().unwrap();\n    ///\n    ///             // [E] `task` has not yet yielded to the scheduler, and\n    ///             // thus has not yet been scheduled since its first `poll`\n    ///             assert_eq!(next_interval().total_scheduled_count, 0);\n    ///\n    ///             tokio::task::yield_now().await; // yield to the scheduler\n    ///\n    ///             // [F] `task` has yielded to the scheduler once (and thus been\n    ///             // scheduled once) since the last sampling interval\n    ///             assert_eq!(next_interval().total_scheduled_count, 1);\n    ///\n    ///             tokio::task::yield_now().await; // yield to the scheduler\n    ///             tokio::task::yield_now().await; // yield to the scheduler\n    ///             tokio::task::yield_now().await; // yield to the scheduler\n    ///\n    ///             // [G] `task` has yielded to the scheduler thrice (and thus been\n    ///             // scheduled thrice) since the last sampling interval\n    ///             assert_eq!(next_interval().total_scheduled_count, 3);\n    ///\n    ///             tokio::task::yield_now().await; // yield to the scheduler\n    ///\n    ///             next_interval\n    ///         })\n    ///     };\n    ///\n    ///     // [C] `task` has not yet been polled at all\n    ///     assert_eq!(metrics_monitor.cumulative().first_poll_count, 0);\n    ///     assert_eq!(metrics_monitor.cumulative().total_scheduled_count, 0);\n    ///\n    ///     // [D] poll `task` to completion\n    ///     let mut next_interval = task.await;\n    ///\n    ///     // [H] `task` has been polled 1 times since the last sample\n    ///     assert_eq!(next_interval().total_scheduled_count, 1);\n    ///\n    ///     // [I] `task` has been polled 0 times since the last sample\n    ///     assert_eq!(next_interval().total_scheduled_count, 0);\n    ///\n    ///     // [J] `task` has yielded to the scheduler a total of five times\n    ///     assert_eq!(metrics_monitor.cumulative().total_scheduled_count, 5);\n    /// }\n    /// ```\n    #[doc(alias = \"total_delay_count\")]\n    pub total_scheduled_count: u64,\n\n    /// The total duration that tasks spent waiting to be polled after awakening.\n    ///\n    /// ##### Definition\n    /// This metric is equal to [`total_short_delay_duration`][TaskMetrics::total_short_delay_duration]\n    /// \\+ [`total_long_delay_duration`][TaskMetrics::total_long_delay_duration].\n    ///\n    /// ##### Derived metrics\n    /// - **[`mean_scheduled_duration`][TaskMetrics::mean_scheduled_duration]**\n    ///   The mean duration that tasks spent waiting to be executed after awakening.\n    ///\n    /// ##### Examples\n    /// ```\n    /// use tokio::time::Duration;\n    ///\n    /// #[tokio::main(flavor = \"current_thread\")]\n    /// async fn main() {\n    ///     let metrics_monitor = tokio_metrics::TaskMonitor::new();\n    ///     let mut interval = metrics_monitor.intervals();\n    ///     let mut next_interval = || interval.next().unwrap();\n    ///\n    ///     // construct and instrument and spawn a task that yields endlessly\n    ///     tokio::spawn(metrics_monitor.instrument(async {\n    ///         loop { tokio::task::yield_now().await }\n    ///     }));\n    ///\n    ///     tokio::task::yield_now().await;\n    ///\n    ///     // block the executor for 1 second\n    ///     std::thread::sleep(Duration::from_millis(1000));\n    ///\n    ///     tokio::task::yield_now().await;\n    ///\n    ///     // `endless_task` will have spent approximately one second waiting\n    ///     let total_scheduled_duration = next_interval().total_scheduled_duration;\n    ///     assert!(total_scheduled_duration >= Duration::from_millis(1000));\n    ///     assert!(total_scheduled_duration <= Duration::from_millis(1100));\n    /// }\n    /// ```\n    #[doc(alias = \"total_delay_duration\")]\n    pub total_scheduled_duration: Duration,\n\n    /// The total number of times that tasks were polled.\n    ///\n    /// ##### Definition\n    /// This metric is equal to [`total_fast_poll_count`][TaskMetrics::total_fast_poll_count]\n    /// \\+ [`total_slow_poll_count`][TaskMetrics::total_slow_poll_count].\n    ///\n    /// ##### Derived metrics\n    /// - **[`mean_poll_duration`][TaskMetrics::mean_poll_duration]**\n    ///   The mean duration of polls.\n    ///\n    /// ##### Examples\n    /// In the below example, a task with multiple yield points is await'ed to completion; this\n    /// metric reflects the number of `await`s within each sampling interval:\n    /// ```\n    /// #[tokio::main]\n    /// async fn main() {\n    ///     let metrics_monitor = tokio_metrics::TaskMonitor::new();\n    ///\n    ///     // [A] no tasks have been created, instrumented, and polled more than once\n    ///     assert_eq!(metrics_monitor.cumulative().first_poll_count, 0);\n    ///\n    ///     // [B] a `task` is created and instrumented\n    ///     let task = {\n    ///         let monitor = metrics_monitor.clone();\n    ///         metrics_monitor.instrument(async move {\n    ///             let mut interval = monitor.intervals();\n    ///             let mut next_interval = move || interval.next().unwrap();\n    ///\n    ///             // [E] task is in the midst of its first poll\n    ///             assert_eq!(next_interval().total_poll_count, 0);\n    ///\n    ///             tokio::task::yield_now().await; // poll 1\n    ///\n    ///             // [F] task has been polled 1 time\n    ///             assert_eq!(next_interval().total_poll_count, 1);\n    ///\n    ///             tokio::task::yield_now().await; // poll 2\n    ///             tokio::task::yield_now().await; // poll 3\n    ///             tokio::task::yield_now().await; // poll 4\n    ///\n    ///             // [G] task has been polled 3 times\n    ///             assert_eq!(next_interval().total_poll_count, 3);\n    ///\n    ///             tokio::task::yield_now().await; // poll 5\n    ///\n    ///             next_interval                   // poll 6\n    ///         })\n    ///     };\n    ///\n    ///     // [C] `task` has not yet been polled at all\n    ///     assert_eq!(metrics_monitor.cumulative().total_poll_count, 0);\n    ///\n    ///     // [D] poll `task` to completion\n    ///     let mut next_interval = task.await;\n    ///\n    ///     // [H] `task` has been polled 2 times since the last sample\n    ///     assert_eq!(next_interval().total_poll_count, 2);\n    ///\n    ///     // [I] `task` has been polled 0 times since the last sample\n    ///     assert_eq!(next_interval().total_poll_count, 0);\n    ///\n    ///     // [J] `task` has been polled 6 times\n    ///     assert_eq!(metrics_monitor.cumulative().total_poll_count, 6);\n    /// }\n    /// ```\n    pub total_poll_count: u64,\n\n    /// The total duration elapsed during polls.\n    ///\n    /// ##### Definition\n    /// This metric is equal to [`total_fast_poll_duration`][TaskMetrics::total_fast_poll_duration]\n    /// \\+ [`total_slow_poll_duration`][TaskMetrics::total_slow_poll_duration].\n    ///\n    /// ##### Derived metrics\n    /// - **[`mean_poll_duration`][TaskMetrics::mean_poll_duration]**\n    ///   The mean duration of polls.\n    ///\n    /// #### Examples\n    /// ```\n    /// use tokio::time::Duration;\n    ///\n    /// #[tokio::main(flavor = \"current_thread\", start_paused = true)]\n    /// async fn main() {\n    ///     let monitor = tokio_metrics::TaskMonitor::new();\n    ///     let mut interval = monitor.intervals();\n    ///     let mut next_interval = move || interval.next().unwrap();\n    ///\n    ///     assert_eq!(next_interval().total_poll_duration, Duration::ZERO);\n    ///\n    ///     monitor.instrument(async {\n    ///         tokio::time::advance(Duration::from_secs(1)).await; // poll 1 (1s)\n    ///         tokio::time::advance(Duration::from_secs(1)).await; // poll 2 (1s)\n    ///         ()                                                  // poll 3 (0s)\n    ///     }).await;\n    ///\n    ///     assert_eq!(next_interval().total_poll_duration, Duration::from_secs(2));\n    /// }\n    /// ```\n    pub total_poll_duration: Duration,\n\n    /// The total number of times that polling tasks completed swiftly.\n    ///\n    /// Here, 'swiftly' is defined as completing in strictly less time than\n    /// [`slow_poll_threshold`][TaskMonitor::slow_poll_threshold].\n    ///\n    /// ##### Derived metrics\n    /// - **[`mean_fast_poll_duration`][TaskMetrics::mean_fast_poll_duration]**\n    ///   The mean duration of fast polls.\n    ///\n    /// ##### Examples\n    /// In the below example, 0 polls occur within the first sampling interval, 3 fast polls occur\n    /// within the second sampling interval, and 2 fast polls occur within the third sampling\n    /// interval:\n    /// ```\n    /// use std::future::Future;\n    /// use std::time::Duration;\n    ///\n    /// #[tokio::main]\n    /// async fn main() {\n    ///     let metrics_monitor = tokio_metrics::TaskMonitor::new();\n    ///     let mut interval = metrics_monitor.intervals();\n    ///     let mut next_interval = || interval.next().unwrap();\n    ///\n    ///     // no tasks have been constructed, instrumented, or polled\n    ///     assert_eq!(next_interval().total_fast_poll_count, 0);\n    ///\n    ///     let fast = Duration::ZERO;\n    ///\n    ///     // this task completes in three fast polls\n    ///     let _ = metrics_monitor.instrument(async {\n    ///         spin_for(fast).await; // fast poll 1\n    ///         spin_for(fast).await; // fast poll 2\n    ///         spin_for(fast)        // fast poll 3\n    ///     }).await;\n    ///\n    ///     assert_eq!(next_interval().total_fast_poll_count, 3);\n    ///\n    ///     // this task completes in two fast polls\n    ///     let _ = metrics_monitor.instrument(async {\n    ///         spin_for(fast).await; // fast poll 1\n    ///         spin_for(fast)        // fast poll 2\n    ///     }).await;\n    ///\n    ///     assert_eq!(next_interval().total_fast_poll_count, 2);\n    /// }\n    ///\n    /// /// Block the current thread for a given `duration`, then (optionally) yield to the scheduler.\n    /// fn spin_for(duration: Duration) -> impl Future<Output=()> {\n    ///     let start = tokio::time::Instant::now();\n    ///     while start.elapsed() <= duration {}\n    ///     tokio::task::yield_now()\n    /// }\n    /// ```\n    pub total_fast_poll_count: u64,\n\n    /// The total duration of fast polls.\n    ///\n    /// Here, 'fast' is defined as completing in strictly less time than\n    /// [`slow_poll_threshold`][TaskMonitor::slow_poll_threshold].\n    ///\n    /// ##### Derived metrics\n    /// - **[`mean_fast_poll_duration`][TaskMetrics::mean_fast_poll_duration]**\n    ///   The mean duration of fast polls.\n    ///\n    /// ##### Examples\n    /// In the below example, no tasks are polled in the first sampling interval; three fast polls\n    /// consume a total of 3μs time in the second sampling interval; and two fast polls consume a\n    /// total of 2μs time in the third sampling interval:\n    /// ```\n    /// use std::future::Future;\n    /// use std::time::Duration;\n    ///\n    /// #[tokio::main]\n    /// async fn main() {\n    ///     let metrics_monitor = tokio_metrics::TaskMonitor::new();\n    ///     let mut interval = metrics_monitor.intervals();\n    ///     let mut next_interval = || interval.next().unwrap();\n    ///\n    ///     // no tasks have been constructed, instrumented, or polled\n    ///     let interval = next_interval();\n    ///     assert_eq!(interval.total_fast_poll_duration, Duration::ZERO);\n    ///\n    ///     let fast = Duration::from_micros(1);\n    ///\n    ///     // this task completes in three fast polls\n    ///     let task_a_time = time(metrics_monitor.instrument(async {\n    ///         spin_for(fast).await; // fast poll 1\n    ///         spin_for(fast).await; // fast poll 2\n    ///         spin_for(fast)        // fast poll 3\n    ///     })).await;\n    ///\n    ///     let interval = next_interval();\n    ///     assert!(interval.total_fast_poll_duration >= fast * 3);\n    ///     assert!(interval.total_fast_poll_duration <= task_a_time);\n    ///\n    ///     // this task completes in two fast polls\n    ///     let task_b_time = time(metrics_monitor.instrument(async {\n    ///         spin_for(fast).await; // fast poll 1\n    ///         spin_for(fast)        // fast poll 2\n    ///     })).await;\n    ///\n    ///     let interval = next_interval();\n    ///     assert!(interval.total_fast_poll_duration >= fast * 2);\n    ///     assert!(interval.total_fast_poll_duration <= task_b_time);\n    /// }\n    ///\n    /// /// Produces the amount of time it took to await a given async task.\n    /// async fn time(task: impl Future) -> Duration {\n    ///     let start = tokio::time::Instant::now();\n    ///     task.await;\n    ///     start.elapsed()\n    /// }\n    ///\n    /// /// Block the current thread for a given `duration`, then (optionally) yield to the scheduler.\n    /// fn spin_for(duration: Duration) -> impl Future<Output=()> {\n    ///     let start = tokio::time::Instant::now();\n    ///     while start.elapsed() <= duration {}\n    ///     tokio::task::yield_now()\n    /// }\n    /// ```\n    pub total_fast_poll_duration: Duration,\n\n    /// The total number of times that polling tasks completed slowly.\n    ///\n    /// Here, 'slowly' is defined as completing in at least as much time as\n    /// [`slow_poll_threshold`][TaskMonitor::slow_poll_threshold].\n    ///\n    /// ##### Derived metrics\n    /// - **[`mean_slow_poll_duration`][`TaskMetrics::mean_slow_poll_duration`]**\n    ///   The mean duration of slow polls.\n    ///\n    /// ##### Examples\n    /// In the below example, 0 polls occur within the first sampling interval, 3 slow polls occur\n    /// within the second sampling interval, and 2 slow polls occur within the third sampling\n    /// interval:\n    /// ```\n    /// use std::future::Future;\n    /// use std::time::Duration;\n    ///\n    /// #[tokio::main]\n    /// async fn main() {\n    ///     let metrics_monitor = tokio_metrics::TaskMonitor::new();\n    ///     let mut interval = metrics_monitor.intervals();\n    ///     let mut next_interval = || interval.next().unwrap();\n    ///\n    ///     // no tasks have been constructed, instrumented, or polled\n    ///     assert_eq!(next_interval().total_slow_poll_count, 0);\n    ///\n    ///     let slow = 10 * metrics_monitor.slow_poll_threshold();\n    ///\n    ///     // this task completes in three slow polls\n    ///     let _ = metrics_monitor.instrument(async {\n    ///         spin_for(slow).await; // slow poll 1\n    ///         spin_for(slow).await; // slow poll 2\n    ///         spin_for(slow)        // slow poll 3\n    ///     }).await;\n    ///\n    ///     assert_eq!(next_interval().total_slow_poll_count, 3);\n    ///\n    ///     // this task completes in two slow polls\n    ///     let _ = metrics_monitor.instrument(async {\n    ///         spin_for(slow).await; // slow poll 1\n    ///         spin_for(slow)        // slow poll 2\n    ///     }).await;\n    ///\n    ///     assert_eq!(next_interval().total_slow_poll_count, 2);\n    /// }\n    ///\n    /// /// Block the current thread for a given `duration`, then (optionally) yield to the scheduler.\n    /// fn spin_for(duration: Duration) -> impl Future<Output=()> {\n    ///     let start = tokio::time::Instant::now();\n    ///     while start.elapsed() <= duration {}\n    ///     tokio::task::yield_now()\n    /// }\n    /// ```\n    pub total_slow_poll_count: u64,\n\n    /// The total duration of slow polls.\n    ///\n    /// Here, 'slowly' is defined as completing in at least as much time as\n    /// [`slow_poll_threshold`][TaskMonitor::slow_poll_threshold].\n    ///\n    /// ##### Derived metrics\n    /// - **[`mean_slow_poll_duration`][`TaskMetrics::mean_slow_poll_duration`]**\n    ///   The mean duration of slow polls.\n    ///\n    /// ##### Examples\n    /// In the below example, no tasks are polled in the first sampling interval; three slow polls\n    /// consume a total of\n    /// 30 × [`DEFAULT_SLOW_POLL_THRESHOLD`][TaskMonitor::DEFAULT_SLOW_POLL_THRESHOLD]\n    /// time in the second sampling interval; and two slow polls consume a total of\n    /// 20 × [`DEFAULT_SLOW_POLL_THRESHOLD`][TaskMonitor::DEFAULT_SLOW_POLL_THRESHOLD] time in the\n    /// third sampling interval:\n    /// ```\n    /// use std::future::Future;\n    /// use std::time::Duration;\n    ///\n    /// #[tokio::main]\n    /// async fn main() {\n    ///     let metrics_monitor = tokio_metrics::TaskMonitor::new();\n    ///     let mut interval = metrics_monitor.intervals();\n    ///     let mut next_interval = || interval.next().unwrap();\n    ///\n    ///     // no tasks have been constructed, instrumented, or polled\n    ///     let interval = next_interval();\n    ///     assert_eq!(interval.total_slow_poll_duration, Duration::ZERO);\n    ///\n    ///     let slow = 10 * metrics_monitor.slow_poll_threshold();\n    ///\n    ///     // this task completes in three slow polls\n    ///     let task_a_time = time(metrics_monitor.instrument(async {\n    ///         spin_for(slow).await; // slow poll 1\n    ///         spin_for(slow).await; // slow poll 2\n    ///         spin_for(slow)        // slow poll 3\n    ///     })).await;\n    ///\n    ///     let interval = next_interval();\n    ///     assert!(interval.total_slow_poll_duration >= slow * 3);\n    ///     assert!(interval.total_slow_poll_duration <= task_a_time);\n    ///\n    ///     // this task completes in two slow polls\n    ///     let task_b_time = time(metrics_monitor.instrument(async {\n    ///         spin_for(slow).await; // slow poll 1\n    ///         spin_for(slow)        // slow poll 2\n    ///     })).await;\n    ///\n    ///     let interval = next_interval();\n    ///     assert!(interval.total_slow_poll_duration >= slow * 2);\n    ///     assert!(interval.total_slow_poll_duration <= task_b_time);\n    /// }\n    ///\n    /// /// Produces the amount of time it took to await a given async task.\n    /// async fn time(task: impl Future) -> Duration {\n    ///     let start = tokio::time::Instant::now();\n    ///     task.await;\n    ///     start.elapsed()\n    /// }\n    ///\n    /// /// Block the current thread for a given `duration`, then (optionally) yield to the scheduler.\n    /// fn spin_for(duration: Duration) -> impl Future<Output=()> {\n    ///     let start = tokio::time::Instant::now();\n    ///     while start.elapsed() <= duration {}\n    ///     tokio::task::yield_now()\n    /// }\n    /// ```\n    pub total_slow_poll_duration: Duration,\n\n    /// The total count of tasks with short scheduling delays.\n    ///\n    /// This is defined as tasks taking strictly less than\n    /// [`long_delay_threshold`][TaskMonitor::long_delay_threshold] to be executed after being\n    /// scheduled.\n    ///\n    /// ##### Derived metrics\n    /// - **[`mean_short_delay_duration`][TaskMetrics::mean_short_delay_duration]**\n    ///   The mean duration of short scheduling delays.\n    pub total_short_delay_count: u64,\n\n    /// The total duration of tasks with short scheduling delays.\n    ///\n    /// This is defined as tasks taking strictly less than\n    /// [`long_delay_threshold`][TaskMonitor::long_delay_threshold] to be executed after being\n    /// scheduled.\n    ///\n    /// ##### Derived metrics\n    /// - **[`mean_short_delay_duration`][TaskMetrics::mean_short_delay_duration]**\n    ///   The mean duration of short scheduling delays.\n    pub total_short_delay_duration: Duration,\n\n    /// The total count of tasks with long scheduling delays.\n    ///\n    /// This is defined as tasks taking\n    /// [`long_delay_threshold`][TaskMonitor::long_delay_threshold] or longer to be executed\n    /// after being scheduled.\n    ///\n    /// ##### Derived metrics\n    /// - **[`mean_long_delay_duration`][TaskMetrics::mean_long_delay_duration]**\n    ///   The mean duration of short scheduling delays.\n    pub total_long_delay_count: u64,\n\n    /// The total duration of tasks with long scheduling delays.\n    ///\n    /// This is defined as tasks taking\n    /// [`long_delay_threshold`][TaskMonitor::long_delay_threshold] or longer to be executed\n    /// after being scheduled.\n    ///\n    /// ##### Derived metrics\n    /// - **[`mean_long_delay_duration`][TaskMetrics::mean_long_delay_duration]**\n    ///   The mean duration of short scheduling delays.\n    pub total_long_delay_duration: Duration,\n}\n\n/// Tracks the metrics, shared across the various types.\n#[derive(Debug)]\nstruct RawMetrics {\n    /// A task poll takes longer than this, it is considered a slow poll.\n    slow_poll_threshold: Duration,\n\n    /// A scheduling delay of at least this long will be considered a long delay\n    long_delay_threshold: Duration,\n\n    /// Total number of instrumented tasks.\n    instrumented_count: AtomicU64,\n\n    /// Total number of instrumented tasks polled at least once.\n    first_poll_count: AtomicU64,\n\n    /// Total number of times tasks entered the `idle` state.\n    total_idled_count: AtomicU64,\n\n    /// Total number of times tasks were scheduled.\n    total_scheduled_count: AtomicU64,\n\n    /// Total number of times tasks were polled fast\n    total_fast_poll_count: AtomicU64,\n\n    /// Total number of times tasks were polled slow\n    total_slow_poll_count: AtomicU64,\n\n    /// Total number of times tasks had long delay,\n    total_long_delay_count: AtomicU64,\n\n    /// Total number of times tasks had little delay\n    total_short_delay_count: AtomicU64,\n\n    /// Total number of times tasks were dropped\n    dropped_count: AtomicU64,\n\n    /// Total amount of time until the first poll\n    total_first_poll_delay_ns: AtomicU64,\n\n    /// Total amount of time tasks spent in the `idle` state.\n    total_idle_duration_ns: AtomicU64,\n\n    /// The longest time tasks spent in the `idle` state locally.\n    /// This will be used to track the local max between interval\n    /// metric snapshots.\n    local_max_idle_duration_ns: AtomicU64,\n\n    /// The longest time tasks spent in the `idle` state.\n    global_max_idle_duration_ns: AtomicU64,\n\n    /// Total amount of time tasks spent in the waking state.\n    total_scheduled_duration_ns: AtomicU64,\n\n    /// Total amount of time tasks spent being polled below the slow cut off.\n    total_fast_poll_duration_ns: AtomicU64,\n\n    /// Total amount of time tasks spent being polled above the slow cut off.\n    total_slow_poll_duration: AtomicU64,\n\n    /// Total amount of time tasks spent being polled below the long delay cut off.\n    total_short_delay_duration_ns: AtomicU64,\n\n    /// Total amount of time tasks spent being polled at or above the long delay cut off.\n    total_long_delay_duration_ns: AtomicU64,\n}\n\n#[derive(Debug)]\nstruct State<M> {\n    /// Where metrics should be recorded\n    monitor: M,\n\n    /// Instant at which the task was instrumented. This is used to track the time to first poll.\n    instrumented_at: Instant,\n\n    /// The instant, tracked as nanoseconds since `instrumented_at`, at which the future\n    /// was last woken.\n    woke_at: AtomicU64,\n\n    /// Waker to forward notifications to.\n    waker: AtomicWaker,\n}\n\nimpl TaskMonitor {\n    /// The default duration at which polls cross the threshold into being categorized as 'slow' is\n    /// 50μs.\n    #[cfg(not(test))]\n    pub const DEFAULT_SLOW_POLL_THRESHOLD: Duration = Duration::from_micros(50);\n    #[cfg(test)]\n    #[allow(missing_docs)]\n    pub const DEFAULT_SLOW_POLL_THRESHOLD: Duration = Duration::from_millis(500);\n\n    /// The default duration at which schedules cross the threshold into being categorized as 'long'\n    /// is 50μs.\n    #[cfg(not(test))]\n    pub const DEFAULT_LONG_DELAY_THRESHOLD: Duration = Duration::from_micros(50);\n    #[cfg(test)]\n    #[allow(missing_docs)]\n    pub const DEFAULT_LONG_DELAY_THRESHOLD: Duration = Duration::from_millis(500);\n\n    /// Constructs a new task monitor.\n    ///\n    /// Uses [`Self::DEFAULT_SLOW_POLL_THRESHOLD`] as the threshold at which polls will be\n    /// considered 'slow'.\n    ///\n    /// Uses [`Self::DEFAULT_LONG_DELAY_THRESHOLD`] as the threshold at which scheduling will be\n    /// considered 'long'.\n    pub fn new() -> TaskMonitor {\n        TaskMonitor::with_slow_poll_threshold(Self::DEFAULT_SLOW_POLL_THRESHOLD)\n    }\n\n    /// Constructs a builder for a task monitor.\n    pub fn builder() -> TaskMonitorBuilder {\n        TaskMonitorBuilder::new()\n    }\n\n    /// Constructs a new task monitor with a given threshold at which polls are considered 'slow'.\n    ///\n    /// ##### Selecting an appropriate threshold\n    /// TODO. What advice can we give here?\n    ///\n    /// ##### Examples\n    /// In the below example, low-threshold and high-threshold monitors are constructed and\n    /// instrument identical tasks; the low-threshold monitor reports4 slow polls, and the\n    /// high-threshold monitor reports only 2 slow polls:\n    /// ```\n    /// use std::future::Future;\n    /// use std::time::Duration;\n    /// use tokio_metrics::TaskMonitor;\n    ///\n    /// #[tokio::main]\n    /// async fn main() {\n    ///     let lo_threshold = Duration::from_micros(10);\n    ///     let hi_threshold = Duration::from_millis(10);\n    ///\n    ///     let lo_monitor = TaskMonitor::with_slow_poll_threshold(lo_threshold);\n    ///     let hi_monitor = TaskMonitor::with_slow_poll_threshold(hi_threshold);\n    ///\n    ///     let make_task = || async {\n    ///         spin_for(lo_threshold).await; // faster poll 1\n    ///         spin_for(lo_threshold).await; // faster poll 2\n    ///         spin_for(hi_threshold).await; // slower poll 3\n    ///         spin_for(hi_threshold).await  // slower poll 4\n    ///     };\n    ///\n    ///     lo_monitor.instrument(make_task()).await;\n    ///     hi_monitor.instrument(make_task()).await;\n    ///\n    ///     // the low-threshold monitor reported 4 slow polls:\n    ///     assert_eq!(lo_monitor.cumulative().total_slow_poll_count, 4);\n    ///     // the high-threshold monitor reported only 2 slow polls:\n    ///     assert_eq!(hi_monitor.cumulative().total_slow_poll_count, 2);\n    /// }\n    ///\n    /// /// Block the current thread for a given `duration`, then (optionally) yield to the scheduler.\n    /// fn spin_for(duration: Duration) -> impl Future<Output=()> {\n    ///     let start = tokio::time::Instant::now();\n    ///     while start.elapsed() <= duration {}\n    ///     tokio::task::yield_now()\n    /// }\n    /// ```\n    pub fn with_slow_poll_threshold(slow_poll_cut_off: Duration) -> TaskMonitor {\n        let base = TaskMonitorCore::create(slow_poll_cut_off, Self::DEFAULT_LONG_DELAY_THRESHOLD);\n        TaskMonitor {\n            base: Arc::new(base),\n        }\n    }\n\n    /// Produces the duration greater-than-or-equal-to at which polls are categorized as slow.\n    ///\n    /// ##### Examples\n    /// In the below example, [`TaskMonitor`] is initialized with [`TaskMonitor::new`];\n    /// consequently, its slow-poll threshold equals [`TaskMonitor::DEFAULT_SLOW_POLL_THRESHOLD`]:\n    /// ```\n    /// use tokio_metrics::TaskMonitor;\n    ///\n    /// #[tokio::main]\n    /// async fn main() {\n    ///     let metrics_monitor = TaskMonitor::new();\n    ///\n    ///     assert_eq!(\n    ///         metrics_monitor.slow_poll_threshold(),\n    ///         TaskMonitor::DEFAULT_SLOW_POLL_THRESHOLD\n    ///     );\n    /// }\n    /// ```\n    pub fn slow_poll_threshold(&self) -> Duration {\n        self.base.metrics.slow_poll_threshold\n    }\n\n    /// Produces the duration greater-than-or-equal-to at which scheduling delays are categorized\n    /// as long.\n    pub fn long_delay_threshold(&self) -> Duration {\n        self.base.metrics.long_delay_threshold\n    }\n\n    /// Produces an instrumented façade around a given async task.\n    ///\n    /// ##### Examples\n    /// Instrument an async task by passing it to [`TaskMonitor::instrument`]:\n    /// ```\n    /// #[tokio::main]\n    /// async fn main() {\n    ///     let metrics_monitor = tokio_metrics::TaskMonitor::new();\n    ///\n    ///     // 0 tasks have been instrumented, much less polled\n    ///     assert_eq!(metrics_monitor.cumulative().first_poll_count, 0);\n    ///\n    ///     // instrument a task and poll it to completion\n    ///     metrics_monitor.instrument(async {}).await;\n    ///\n    ///     // 1 task has been instrumented and polled\n    ///     assert_eq!(metrics_monitor.cumulative().first_poll_count, 1);\n    ///\n    ///     // instrument a task and poll it to completion\n    ///     metrics_monitor.instrument(async {}).await;\n    ///\n    ///     // 2 tasks have been instrumented and polled\n    ///     assert_eq!(metrics_monitor.cumulative().first_poll_count, 2);\n    /// }\n    /// ```\n    /// An aync task may be tracked by multiple [`TaskMonitor`]s; e.g.:\n    /// ```\n    /// #[tokio::main]\n    /// async fn main() {\n    ///     let monitor_a = tokio_metrics::TaskMonitor::new();\n    ///     let monitor_b = tokio_metrics::TaskMonitor::new();\n    ///\n    ///     // 0 tasks have been instrumented, much less polled\n    ///     assert_eq!(monitor_a.cumulative().first_poll_count, 0);\n    ///     assert_eq!(monitor_b.cumulative().first_poll_count, 0);\n    ///\n    ///     // instrument a task and poll it to completion\n    ///     monitor_a.instrument(monitor_b.instrument(async {})).await;\n    ///\n    ///     // 1 task has been instrumented and polled\n    ///     assert_eq!(monitor_a.cumulative().first_poll_count, 1);\n    ///     assert_eq!(monitor_b.cumulative().first_poll_count, 1);\n    /// }\n    /// ```\n    /// It is also possible (but probably undesirable) to instrument an async task multiple times\n    /// with the same [`TaskMonitor`]; e.g.:\n    /// ```\n    /// #[tokio::main]\n    /// async fn main() {\n    ///     let monitor = tokio_metrics::TaskMonitor::new();\n    ///\n    ///     // 0 tasks have been instrumented, much less polled\n    ///     assert_eq!(monitor.cumulative().first_poll_count, 0);\n    ///\n    ///     // instrument a task and poll it to completion\n    ///     monitor.instrument(monitor.instrument(async {})).await;\n    ///\n    ///     // 2 tasks have been instrumented and polled, supposedly\n    ///     assert_eq!(monitor.cumulative().first_poll_count, 2);\n    /// }\n    /// ```\n    pub fn instrument<F>(&self, task: F) -> Instrumented<F> {\n        TaskMonitorCore::instrument_with(task, self.clone())\n    }\n\n    /// Produces [`TaskMetrics`] for the tasks instrumented by this [`TaskMonitor`], collected since\n    /// the construction of [`TaskMonitor`].\n    ///\n    /// ##### See also\n    /// - [`TaskMonitor::intervals`]:\n    ///   produces [`TaskMetrics`] for user-defined sampling intervals, instead of cumulatively\n    ///\n    /// ##### Examples\n    /// In the below example, 0 polls occur within the first sampling interval, 3 slow polls occur\n    /// within the second sampling interval, and 2 slow polls occur within the third sampling\n    /// interval; five slow polls occur across all sampling intervals:\n    /// ```\n    /// use std::future::Future;\n    /// use std::time::Duration;\n    ///\n    /// #[tokio::main]\n    /// async fn main() {\n    ///     let metrics_monitor = tokio_metrics::TaskMonitor::new();\n    ///\n    ///     // initialize a stream of sampling intervals\n    ///     let mut intervals = metrics_monitor.intervals();\n    ///     // each call of `next_interval` will produce metrics for the last sampling interval\n    ///     let mut next_interval = || intervals.next().unwrap();\n    ///\n    ///     let slow = 10 * metrics_monitor.slow_poll_threshold();\n    ///\n    ///     // this task completes in three slow polls\n    ///     let _ = metrics_monitor.instrument(async {\n    ///         spin_for(slow).await; // slow poll 1\n    ///         spin_for(slow).await; // slow poll 2\n    ///         spin_for(slow)        // slow poll 3\n    ///     }).await;\n    ///\n    ///     // in the previous sampling interval, there were 3 slow polls\n    ///     assert_eq!(next_interval().total_slow_poll_count, 3);\n    ///     assert_eq!(metrics_monitor.cumulative().total_slow_poll_count, 3);\n    ///\n    ///     // this task completes in two slow polls\n    ///     let _ = metrics_monitor.instrument(async {\n    ///         spin_for(slow).await; // slow poll 1\n    ///         spin_for(slow)        // slow poll 2\n    ///     }).await;\n    ///\n    ///     // in the previous sampling interval, there were 2 slow polls\n    ///     assert_eq!(next_interval().total_slow_poll_count, 2);\n    ///\n    ///     // across all sampling interval, there were a total of 5 slow polls\n    ///     assert_eq!(metrics_monitor.cumulative().total_slow_poll_count, 5);\n    /// }\n    ///\n    /// /// Block the current thread for a given `duration`, then (optionally) yield to the scheduler.\n    /// fn spin_for(duration: Duration) -> impl Future<Output=()> {\n    ///     let start = tokio::time::Instant::now();\n    ///     while start.elapsed() <= duration {}\n    ///     tokio::task::yield_now()\n    /// }\n    /// ```\n    pub fn cumulative(&self) -> TaskMetrics {\n        self.base.metrics.metrics()\n    }\n\n    /// Produces an unending iterator of metric sampling intervals.\n    ///\n    /// Each sampling interval is defined by the time elapsed between advancements of the iterator\n    /// produced by [`TaskMonitor::intervals`]. The item type of this iterator is [`TaskMetrics`],\n    /// which is a bundle of task metrics that describe *only* events occurring within that sampling\n    /// interval.\n    ///\n    /// ##### Examples\n    /// In the below example, 0 polls occur within the first sampling interval, 3 slow polls occur\n    /// within the second sampling interval, and 2 slow polls occur within the third sampling\n    /// interval; five slow polls occur across all sampling intervals:\n    /// ```\n    /// use std::future::Future;\n    /// use std::time::Duration;\n    ///\n    /// #[tokio::main]\n    /// async fn main() {\n    ///     let metrics_monitor = tokio_metrics::TaskMonitor::new();\n    ///\n    ///     // initialize a stream of sampling intervals\n    ///     let mut intervals = metrics_monitor.intervals();\n    ///     // each call of `next_interval` will produce metrics for the last sampling interval\n    ///     let mut next_interval = || intervals.next().unwrap();\n    ///\n    ///     let slow = 10 * metrics_monitor.slow_poll_threshold();\n    ///\n    ///     // this task completes in three slow polls\n    ///     let _ = metrics_monitor.instrument(async {\n    ///         spin_for(slow).await; // slow poll 1\n    ///         spin_for(slow).await; // slow poll 2\n    ///         spin_for(slow)        // slow poll 3\n    ///     }).await;\n    ///\n    ///     // in the previous sampling interval, there were 3 slow polls\n    ///     assert_eq!(next_interval().total_slow_poll_count, 3);\n    ///\n    ///     // this task completes in two slow polls\n    ///     let _ = metrics_monitor.instrument(async {\n    ///         spin_for(slow).await; // slow poll 1\n    ///         spin_for(slow)        // slow poll 2\n    ///     }).await;\n    ///\n    ///     // in the previous sampling interval, there were 2 slow polls\n    ///     assert_eq!(next_interval().total_slow_poll_count, 2);\n    ///\n    ///     // across all sampling intervals, there were a total of 5 slow polls\n    ///     assert_eq!(metrics_monitor.cumulative().total_slow_poll_count, 5);\n    /// }\n    ///\n    /// /// Block the current thread for a given `duration`, then (optionally) yield to the scheduler.\n    /// fn spin_for(duration: Duration) -> impl Future<Output=()> {\n    ///     let start = tokio::time::Instant::now();\n    ///     while start.elapsed() <= duration {}\n    ///     tokio::task::yield_now()\n    /// }\n    /// ```\n    pub fn intervals(&self) -> TaskIntervals {\n        TaskIntervals {\n            monitor: self.clone(),\n            previous: None,\n        }\n    }\n}\n\nimpl TaskMonitorCore {\n    /// Returns a const-friendly [`TaskMonitorCoreBuilder`].\n    pub const fn builder() -> TaskMonitorCoreBuilder {\n        TaskMonitorCoreBuilder::new()\n    }\n\n    /// Constructs a new [`TaskMonitorCore`]. Refer to the struct documentation for more discussion\n    /// of benefits compared to [`TaskMonitor`].\n    ///\n    /// Uses [`TaskMonitor::DEFAULT_SLOW_POLL_THRESHOLD`] as the threshold at which polls will be\n    /// considered 'slow'.\n    ///\n    /// Uses [`TaskMonitor::DEFAULT_LONG_DELAY_THRESHOLD`] as the threshold at which scheduling will be\n    /// considered 'long'.\n    pub const fn new() -> TaskMonitorCore {\n        TaskMonitorCore::with_slow_poll_threshold(TaskMonitor::DEFAULT_SLOW_POLL_THRESHOLD)\n    }\n\n    /// Constructs a new task monitor with a given threshold at which polls are considered 'slow'.\n    ///\n    /// Refer to [`TaskMonitor::with_slow_poll_threshold`] for examples.\n    pub const fn with_slow_poll_threshold(slow_poll_cut_off: Duration) -> TaskMonitorCore {\n        Self::create(slow_poll_cut_off, TaskMonitor::DEFAULT_LONG_DELAY_THRESHOLD)\n    }\n\n    /// Produces the duration greater-than-or-equal-to at which polls are categorized as slow.\n    ///\n    /// Refer to [`TaskMonitor::slow_poll_threshold`] for examples.\n    pub fn slow_poll_threshold(&self) -> Duration {\n        self.metrics.slow_poll_threshold\n    }\n\n    /// Produces the duration greater-than-or-equal-to at which scheduling delays are categorized\n    /// as long.\n    pub fn long_delay_threshold(&self) -> Duration {\n        self.metrics.long_delay_threshold\n    }\n\n    /// Produces an instrumented façade around a given async task.\n    ///\n    /// ##### Examples\n    /// ```\n    /// use tokio_metrics::TaskMonitorCore;\n    ///\n    /// static MONITOR: TaskMonitorCore = TaskMonitorCore::new();\n    ///\n    /// #[tokio::main]\n    /// async fn main() {\n    ///     assert_eq!(MONITOR.cumulative().first_poll_count, 0);\n    ///\n    ///     MONITOR.instrument(async {}).await;\n    ///     assert_eq!(MONITOR.cumulative().first_poll_count, 1);\n    /// }\n    /// ```\n    pub fn instrument<F>(&'static self, task: F) -> Instrumented<F, &'static Self> {\n        Self::instrument_with(task, self)\n    }\n\n    /// Produces an instrumented façade around a given async task, with an explicit monitor.\n    ///\n    /// Use this when you have a non-static monitor reference, such as an `Arc<TaskMonitorCore>`.\n    ///\n    /// ##### Examples\n    /// ```\n    /// use std::sync::Arc;\n    /// use tokio_metrics::TaskMonitorCore;\n    ///\n    /// #[derive(Clone)]\n    /// struct SharedState(Arc<SharedStateInner>);\n    /// struct SharedStateInner {\n    ///     monitor: TaskMonitorCore,\n    ///     other_state: SomeOtherSharedState,\n    /// }\n    /// /// Imagine: a type that wasn't `Clone` that you want to pass around\n    /// /// in a similar way as the monitor\n    /// struct SomeOtherSharedState;\n    ///\n    /// impl AsRef<TaskMonitorCore> for SharedState {\n    ///     fn as_ref(&self) -> &TaskMonitorCore {\n    ///         &self.0.monitor\n    ///     }\n    /// }\n    ///\n    /// #[tokio::main]\n    /// async fn main() {\n    ///     let state = SharedState(Arc::new(SharedStateInner {\n    ///         monitor: TaskMonitorCore::new(),\n    ///         other_state: SomeOtherSharedState,\n    ///     }));\n    ///\n    ///     assert_eq!(state.0.monitor.cumulative().first_poll_count, 0);\n    ///\n    ///     TaskMonitorCore::instrument_with(async {}, state.clone()).await;\n    ///     assert_eq!(state.0.monitor.cumulative().first_poll_count, 1);\n    /// }\n    /// ```\n    pub fn instrument_with<F, M: AsRef<TaskMonitorCore> + Send + Sync + 'static>(\n        task: F,\n        monitor: M,\n    ) -> Instrumented<F, M> {\n        monitor\n            .as_ref()\n            .metrics\n            .instrumented_count\n            .fetch_add(1, SeqCst);\n\n        let state: State<M> = State {\n            monitor,\n            instrumented_at: Instant::now(),\n            woke_at: AtomicU64::new(0),\n            waker: AtomicWaker::new(),\n        };\n\n        let instrumented: Instrumented<F, M> = Instrumented {\n            task,\n            did_poll_once: false,\n            idled_at: 0,\n            state: Arc::new(state),\n        };\n\n        instrumented\n    }\n\n    /// Produces [`TaskMetrics`] for the tasks instrumented by this [`TaskMonitorCore`], collected since\n    /// the construction of [`TaskMonitorCore`].\n    ///\n    /// ##### See also\n    /// - [`TaskMonitorCore::intervals`]:\n    ///   produces [`TaskMetrics`] for user-defined sampling intervals, instead of cumulatively\n    ///\n    /// See [`TaskMonitor::cumulative`] for examples.\n    pub fn cumulative(&self) -> TaskMetrics {\n        self.metrics.metrics()\n    }\n\n    /// Produces an unending iterator of metric sampling intervals.\n    ///\n    /// Each sampling interval is defined by the time elapsed between advancements of the iterator\n    /// produced by [`TaskMonitorCore::intervals`]. The item type of this iterator is [`TaskMetrics`],\n    /// which is a bundle of task metrics that describe *only* events occurring within that sampling\n    /// interval.\n    ///\n    /// ##### Examples\n    /// The below example demonstrates construction of [`TaskIntervals`] with [`TaskMonitorCore`].\n    ///\n    /// See [`TaskMonitor::intervals`] for more usage examples.\n    ///\n    /// ```\n    /// use std::sync::Arc;\n    ///\n    /// fn main() {\n    ///     let metrics_monitor = Arc::new(tokio_metrics::TaskMonitorCore::new());\n    ///\n    ///     let mut _intervals = tokio_metrics::TaskMonitorCore::intervals(metrics_monitor);\n    /// }\n    /// ```\n    pub fn intervals<Monitor: AsRef<TaskMonitorCore> + Send + Sync + 'static>(\n        monitor: Monitor,\n    ) -> TaskIntervals<Monitor> {\n        let intervals: TaskIntervals<Monitor> = TaskIntervals {\n            monitor,\n            previous: None,\n        };\n\n        intervals\n    }\n}\n\nimpl AsRef<TaskMonitorCore> for TaskMonitorCore {\n    fn as_ref(&self) -> &TaskMonitorCore {\n        self\n    }\n}\n\nimpl TaskMonitorCore {\n    const fn create(slow_poll_cut_off: Duration, long_delay_cut_off: Duration) -> TaskMonitorCore {\n        TaskMonitorCore {\n            metrics: RawMetrics {\n                slow_poll_threshold: slow_poll_cut_off,\n                first_poll_count: AtomicU64::new(0),\n                total_idled_count: AtomicU64::new(0),\n                total_scheduled_count: AtomicU64::new(0),\n                total_fast_poll_count: AtomicU64::new(0),\n                total_slow_poll_count: AtomicU64::new(0),\n                total_long_delay_count: AtomicU64::new(0),\n                instrumented_count: AtomicU64::new(0),\n                dropped_count: AtomicU64::new(0),\n                total_first_poll_delay_ns: AtomicU64::new(0),\n                total_scheduled_duration_ns: AtomicU64::new(0),\n                local_max_idle_duration_ns: AtomicU64::new(0),\n                global_max_idle_duration_ns: AtomicU64::new(0),\n                total_idle_duration_ns: AtomicU64::new(0),\n                total_fast_poll_duration_ns: AtomicU64::new(0),\n                total_slow_poll_duration: AtomicU64::new(0),\n                total_short_delay_duration_ns: AtomicU64::new(0),\n                long_delay_threshold: long_delay_cut_off,\n                total_short_delay_count: AtomicU64::new(0),\n                total_long_delay_duration_ns: AtomicU64::new(0),\n            },\n        }\n    }\n}\n\nimpl RawMetrics {\n    fn get_and_reset_local_max_idle_duration(&self) -> Duration {\n        Duration::from_nanos(self.local_max_idle_duration_ns.swap(0, SeqCst))\n    }\n\n    fn metrics(&self) -> TaskMetrics {\n        let total_fast_poll_count = self.total_fast_poll_count.load(SeqCst);\n        let total_slow_poll_count = self.total_slow_poll_count.load(SeqCst);\n\n        let total_fast_poll_duration =\n            Duration::from_nanos(self.total_fast_poll_duration_ns.load(SeqCst));\n        let total_slow_poll_duration =\n            Duration::from_nanos(self.total_slow_poll_duration.load(SeqCst));\n\n        let total_poll_count = total_fast_poll_count.saturating_add(total_slow_poll_count);\n        let total_poll_duration = total_fast_poll_duration.saturating_add(total_slow_poll_duration);\n\n        TaskMetrics {\n            instrumented_count: self.instrumented_count.load(SeqCst),\n            dropped_count: self.dropped_count.load(SeqCst),\n\n            total_poll_count,\n            total_poll_duration,\n            first_poll_count: self.first_poll_count.load(SeqCst),\n            total_idled_count: self.total_idled_count.load(SeqCst),\n            total_scheduled_count: self.total_scheduled_count.load(SeqCst),\n            total_fast_poll_count: self.total_fast_poll_count.load(SeqCst),\n            total_slow_poll_count: self.total_slow_poll_count.load(SeqCst),\n            total_short_delay_count: self.total_short_delay_count.load(SeqCst),\n            total_long_delay_count: self.total_long_delay_count.load(SeqCst),\n            total_first_poll_delay: Duration::from_nanos(\n                self.total_first_poll_delay_ns.load(SeqCst),\n            ),\n            max_idle_duration: Duration::from_nanos(self.global_max_idle_duration_ns.load(SeqCst)),\n            total_idle_duration: Duration::from_nanos(self.total_idle_duration_ns.load(SeqCst)),\n            total_scheduled_duration: Duration::from_nanos(\n                self.total_scheduled_duration_ns.load(SeqCst),\n            ),\n            total_fast_poll_duration: Duration::from_nanos(\n                self.total_fast_poll_duration_ns.load(SeqCst),\n            ),\n            total_slow_poll_duration: Duration::from_nanos(\n                self.total_slow_poll_duration.load(SeqCst),\n            ),\n            total_short_delay_duration: Duration::from_nanos(\n                self.total_short_delay_duration_ns.load(SeqCst),\n            ),\n            total_long_delay_duration: Duration::from_nanos(\n                self.total_long_delay_duration_ns.load(SeqCst),\n            ),\n        }\n    }\n}\n\nimpl Default for TaskMonitor {\n    fn default() -> TaskMonitor {\n        TaskMonitor::new()\n    }\n}\n\nimpl Default for TaskMonitorCore {\n    fn default() -> TaskMonitorCore {\n        TaskMonitorCore::new()\n    }\n}\n\nderived_metrics!(\n    [TaskMetrics] {\n        stable {\n            /// The mean duration elapsed between the instant tasks are instrumented, and the instant they\n            /// are first polled.\n            ///\n            /// ##### Definition\n            /// This metric is derived from [`total_first_poll_delay`][TaskMetrics::total_first_poll_delay]\n            /// ÷ [`first_poll_count`][TaskMetrics::first_poll_count].\n            ///\n            /// ##### Interpretation\n            /// If this metric increases, it means that, on average, tasks spent longer waiting to be\n            /// initially polled.\n            ///\n            /// ##### See also\n            /// - **[`mean_scheduled_duration`][TaskMetrics::mean_scheduled_duration]**\n            ///   The mean duration that tasks spent waiting to be executed after awakening.\n            ///\n            /// ##### Examples\n            /// In the below example, no tasks are instrumented or polled within the first sampling\n            /// interval; in the second sampling interval, 500ms elapse between the instrumentation of a\n            /// task and its first poll; in the third sampling interval, a mean of 750ms elapse between the\n            /// instrumentation and first poll of two tasks:\n            /// ```\n            /// use std::time::Duration;\n            ///\n            /// #[tokio::main]\n            /// async fn main() {\n            ///     let metrics_monitor = tokio_metrics::TaskMonitor::new();\n            ///     let mut interval = metrics_monitor.intervals();\n            ///     let mut next_interval = || interval.next().unwrap();\n            ///\n            ///     // no tasks have yet been created, instrumented, or polled\n            ///     assert_eq!(next_interval().mean_first_poll_delay(), Duration::ZERO);\n            ///\n            ///     // constructs and instruments a task, pauses for `pause_time`, awaits the task, then\n            ///     // produces the total time it took to do all of the aforementioned\n            ///     async fn instrument_pause_await(\n            ///         metrics_monitor: &tokio_metrics::TaskMonitor,\n            ///         pause_time: Duration\n            ///     ) -> Duration\n            ///     {\n            ///         let before_instrumentation = tokio::time::Instant::now();\n            ///         let task = metrics_monitor.instrument(async move {});\n            ///         tokio::time::sleep(pause_time).await;\n            ///         task.await;\n            ///         before_instrumentation.elapsed()\n            ///     }\n            ///\n            ///     // construct and await a task that pauses for 500ms between instrumentation and first poll\n            ///     let task_a_pause_time = Duration::from_millis(500);\n            ///     let task_a_total_time = instrument_pause_await(&metrics_monitor, task_a_pause_time).await;\n            ///\n            ///     // the `mean_first_poll_delay` will be some duration greater-than-or-equal-to the\n            ///     // pause time of 500ms, and less-than-or-equal-to the total runtime of `task_a`\n            ///     let mean_first_poll_delay = next_interval().mean_first_poll_delay();\n            ///     assert!(mean_first_poll_delay >= task_a_pause_time);\n            ///     assert!(mean_first_poll_delay <= task_a_total_time);\n            ///\n            ///     // construct and await a task that pauses for 500ms between instrumentation and first poll\n            ///     let task_b_pause_time = Duration::from_millis(500);\n            ///     let task_b_total_time = instrument_pause_await(&metrics_monitor, task_b_pause_time).await;\n            ///\n            ///     // construct and await a task that pauses for 1000ms between instrumentation and first poll\n            ///     let task_c_pause_time = Duration::from_millis(1000);\n            ///     let task_c_total_time = instrument_pause_await(&metrics_monitor, task_c_pause_time).await;\n            ///\n            ///     // the `mean_first_poll_delay` will be some duration greater-than-or-equal-to the\n            ///     // average pause time of 500ms, and less-than-or-equal-to the combined total runtime of\n            ///     // `task_b` and `task_c`\n            ///     let mean_first_poll_delay = next_interval().mean_first_poll_delay();\n            ///     assert!(mean_first_poll_delay >= (task_b_pause_time + task_c_pause_time) / 2);\n            ///     assert!(mean_first_poll_delay <= (task_b_total_time + task_c_total_time) / 2);\n            /// }\n            /// ```\n            pub fn mean_first_poll_delay(&self) -> Duration {\n                mean(self.total_first_poll_delay, self.first_poll_count)\n            }\n\n            /// The mean duration of idles.\n            ///\n            /// ##### Definition\n            /// This metric is derived from [`total_idle_duration`][TaskMetrics::total_idle_duration] ÷\n            /// [`total_idled_count`][TaskMetrics::total_idled_count].\n            ///\n            /// ##### Interpretation\n            /// The idle state is the duration spanning the instant a task completes a poll, and the instant\n            /// that it is next awoken. Tasks inhabit this state when they are waiting for task-external\n            /// events to complete (e.g., an asynchronous sleep, a network request, file I/O, etc.). If this\n            /// metric increases, it means that tasks, in aggregate, spent more time waiting for\n            /// task-external events to complete.\n            ///\n            /// ##### Examples\n            /// ```\n            /// #[tokio::main]\n            /// async fn main() {\n            ///     let monitor = tokio_metrics::TaskMonitor::new();\n            ///     let one_sec = std::time::Duration::from_secs(1);\n            ///\n            ///     monitor.instrument(async move {\n            ///         tokio::time::sleep(one_sec).await;\n            ///     }).await;\n            ///\n            ///     assert!(monitor.cumulative().mean_idle_duration() >= one_sec);\n            /// }\n            /// ```\n            pub fn mean_idle_duration(&self) -> Duration {\n                mean(self.total_idle_duration, self.total_idled_count)\n            }\n\n            /// The mean duration that tasks spent waiting to be executed after awakening.\n            ///\n            /// ##### Definition\n            /// This metric is derived from\n            /// [`total_scheduled_duration`][TaskMetrics::total_scheduled_duration] ÷\n            /// [`total_scheduled_count`][`TaskMetrics::total_scheduled_count`].\n            ///\n            /// ##### Interpretation\n            /// If this metric increases, it means that, on average, tasks spent longer in the runtime's\n            /// queues before being polled.\n            ///\n            /// ##### See also\n            /// - **[`mean_first_poll_delay`][TaskMetrics::mean_first_poll_delay]**\n            ///   The mean duration elapsed between the instant tasks are instrumented, and the instant they\n            ///   are first polled.\n            ///\n            /// ##### Examples\n            /// ```\n            /// use tokio::time::Duration;\n            ///\n            /// #[tokio::main(flavor = \"current_thread\")]\n            /// async fn main() {\n            ///     let metrics_monitor = tokio_metrics::TaskMonitor::new();\n            ///     let mut interval = metrics_monitor.intervals();\n            ///     let mut next_interval = || interval.next().unwrap();\n            ///\n            ///     // construct and instrument and spawn a task that yields endlessly\n            ///     tokio::spawn(metrics_monitor.instrument(async {\n            ///         loop { tokio::task::yield_now().await }\n            ///     }));\n            ///\n            ///     tokio::task::yield_now().await;\n            ///\n            ///     // block the executor for 1 second\n            ///     std::thread::sleep(Duration::from_millis(1000));\n            ///\n            ///     // get the task to run twice\n            ///     // the first will have a 1 sec scheduling delay, the second will have almost none\n            ///     tokio::task::yield_now().await;\n            ///     tokio::task::yield_now().await;\n            ///\n            ///     // `endless_task` will have spent approximately one second waiting\n            ///     let mean_scheduled_duration = next_interval().mean_scheduled_duration();\n            ///     assert!(mean_scheduled_duration >= Duration::from_millis(500), \"{}\", mean_scheduled_duration.as_secs_f64());\n            ///     assert!(mean_scheduled_duration <= Duration::from_millis(600), \"{}\", mean_scheduled_duration.as_secs_f64());\n            /// }\n            /// ```\n            pub fn mean_scheduled_duration(&self) -> Duration {\n                mean(self.total_scheduled_duration, self.total_scheduled_count)\n            }\n\n            /// The mean duration of polls.\n            ///\n            /// ##### Definition\n            /// This metric is derived from [`total_poll_duration`][TaskMetrics::total_poll_duration] ÷\n            /// [`total_poll_count`][TaskMetrics::total_poll_count].\n            ///\n            /// ##### Interpretation\n            /// If this metric increases, it means that, on average, individual polls are tending to take\n            /// longer. However, this does not necessarily imply increased task latency: An increase in poll\n            /// durations could be offset by fewer polls.\n            ///\n            /// ##### See also\n            /// - **[`slow_poll_ratio`][TaskMetrics::slow_poll_ratio]**\n            ///   The ratio between the number polls categorized as slow and fast.\n            /// - **[`mean_slow_poll_duration`][TaskMetrics::mean_slow_poll_duration]**\n            ///   The mean duration of slow polls.\n            ///\n            /// ##### Examples\n            /// ```\n            /// use std::time::Duration;\n            ///\n            /// #[tokio::main(flavor = \"current_thread\", start_paused = true)]\n            /// async fn main() {\n            ///     let monitor = tokio_metrics::TaskMonitor::new();\n            ///     let mut interval = monitor.intervals();\n            ///     let mut next_interval = move || interval.next().unwrap();\n            ///\n            ///     assert_eq!(next_interval().mean_poll_duration(), Duration::ZERO);\n            ///\n            ///     monitor.instrument(async {\n            ///         tokio::time::advance(Duration::from_secs(1)).await; // poll 1 (1s)\n            ///         tokio::time::advance(Duration::from_secs(1)).await; // poll 2 (1s)\n            ///         ()                                                  // poll 3 (0s)\n            ///     }).await;\n            ///\n            ///     assert_eq!(next_interval().mean_poll_duration(), Duration::from_secs(2) / 3);\n            /// }\n            /// ```\n            pub fn mean_poll_duration(&self) -> Duration {\n                mean(self.total_poll_duration, self.total_poll_count)\n            }\n\n            /// The ratio between the number polls categorized as slow and fast.\n            ///\n            /// ##### Definition\n            /// This metric is derived from [`total_slow_poll_count`][TaskMetrics::total_slow_poll_count] ÷\n            /// [`total_poll_count`][TaskMetrics::total_poll_count].\n            ///\n            /// ##### Interpretation\n            /// If this metric increases, it means that a greater proportion of polls took excessively long\n            /// before yielding to the scheduler. This does not necessarily imply increased task latency:\n            /// An increase in the proportion of slow polls could be offset by fewer or faster polls.\n            /// However, as a rule, *should* yield to the scheduler frequently.\n            ///\n            /// ##### See also\n            /// - **[`mean_poll_duration`][TaskMetrics::mean_poll_duration]**\n            ///   The mean duration of polls.\n            /// - **[`mean_slow_poll_duration`][TaskMetrics::mean_slow_poll_duration]**\n            ///   The mean duration of slow polls.\n            ///\n            /// ##### Examples\n            /// Changes in this metric may be observed by varying the ratio of slow and slow fast within\n            /// sampling intervals; for instance:\n            /// ```\n            /// use std::future::Future;\n            /// use std::time::Duration;\n            ///\n            /// #[tokio::main]\n            /// async fn main() {\n            ///     let metrics_monitor = tokio_metrics::TaskMonitor::new();\n            ///     let mut interval = metrics_monitor.intervals();\n            ///     let mut next_interval = || interval.next().unwrap();\n            ///\n            ///     // no tasks have been constructed, instrumented, or polled\n            ///     let interval = next_interval();\n            ///     assert_eq!(interval.total_fast_poll_count, 0);\n            ///     assert_eq!(interval.total_slow_poll_count, 0);\n            ///     assert!(interval.slow_poll_ratio().is_nan());\n            ///\n            ///     let fast = Duration::ZERO;\n            ///     let slow = 10 * metrics_monitor.slow_poll_threshold();\n            ///\n            ///     // this task completes in three fast polls\n            ///     metrics_monitor.instrument(async {\n            ///         spin_for(fast).await;   // fast poll 1\n            ///         spin_for(fast).await;   // fast poll 2\n            ///         spin_for(fast);         // fast poll 3\n            ///     }).await;\n            ///\n            ///     // this task completes in two slow polls\n            ///     metrics_monitor.instrument(async {\n            ///         spin_for(slow).await;   // slow poll 1\n            ///         spin_for(slow);         // slow poll 2\n            ///     }).await;\n            ///\n            ///     let interval = next_interval();\n            ///     assert_eq!(interval.total_fast_poll_count, 3);\n            ///     assert_eq!(interval.total_slow_poll_count, 2);\n            ///     assert_eq!(interval.slow_poll_ratio(), ratio(2., 3.));\n            ///\n            ///     // this task completes in three slow polls\n            ///     metrics_monitor.instrument(async {\n            ///         spin_for(slow).await;   // slow poll 1\n            ///         spin_for(slow).await;   // slow poll 2\n            ///         spin_for(slow);         // slow poll 3\n            ///     }).await;\n            ///\n            ///     // this task completes in two fast polls\n            ///     metrics_monitor.instrument(async {\n            ///         spin_for(fast).await; // fast poll 1\n            ///         spin_for(fast);       // fast poll 2\n            ///     }).await;\n            ///\n            ///     let interval = next_interval();\n            ///     assert_eq!(interval.total_fast_poll_count, 2);\n            ///     assert_eq!(interval.total_slow_poll_count, 3);\n            ///     assert_eq!(interval.slow_poll_ratio(), ratio(3., 2.));\n            /// }\n            ///\n            /// fn ratio(a: f64, b: f64) -> f64 {\n            ///     a / (a + b)\n            /// }\n            ///\n            /// /// Block the current thread for a given `duration`, then (optionally) yield to the scheduler.\n            /// fn spin_for(duration: Duration) -> impl Future<Output=()> {\n            ///     let start = tokio::time::Instant::now();\n            ///     while start.elapsed() <= duration {}\n            ///     tokio::task::yield_now()\n            /// }\n            /// ```\n            pub fn slow_poll_ratio(&self) -> f64 {\n                self.total_slow_poll_count as f64 / self.total_poll_count as f64\n            }\n\n            /// The ratio of tasks exceeding [`long_delay_threshold`][TaskMonitor::long_delay_threshold].\n            ///\n            /// ##### Definition\n            /// This metric is derived from [`total_long_delay_count`][TaskMetrics::total_long_delay_count] ÷\n            /// [`total_scheduled_count`][TaskMetrics::total_scheduled_count].\n            pub fn long_delay_ratio(&self) -> f64 {\n                self.total_long_delay_count as f64 / self.total_scheduled_count as f64\n            }\n\n            /// The mean duration of fast polls.\n            ///\n            /// ##### Definition\n            /// This metric is derived from\n            /// [`total_fast_poll_duration`][TaskMetrics::total_fast_poll_duration] ÷\n            /// [`total_fast_poll_count`][TaskMetrics::total_fast_poll_count].\n            ///\n            /// ##### Examples\n            /// In the below example, no tasks are polled in the first sampling interval; three fast polls\n            /// consume a mean of\n            /// ⅜ × [`DEFAULT_SLOW_POLL_THRESHOLD`][TaskMonitor::DEFAULT_SLOW_POLL_THRESHOLD] time in the\n            /// second sampling interval; and two fast polls consume a total of\n            /// ½ × [`DEFAULT_SLOW_POLL_THRESHOLD`][TaskMonitor::DEFAULT_SLOW_POLL_THRESHOLD] time in the\n            /// third sampling interval:\n            /// ```\n            /// use std::future::Future;\n            /// use std::time::Duration;\n            ///\n            /// #[tokio::main]\n            /// async fn main() {\n            ///     let metrics_monitor = tokio_metrics::TaskMonitor::new();\n            ///     let mut interval = metrics_monitor.intervals();\n            ///     let mut next_interval = || interval.next().unwrap();\n            ///\n            ///     // no tasks have been constructed, instrumented, or polled\n            ///     assert_eq!(next_interval().mean_fast_poll_duration(), Duration::ZERO);\n            ///\n            ///     let threshold = metrics_monitor.slow_poll_threshold();\n            ///     let fast_1 = 1 * Duration::from_micros(1);\n            ///     let fast_2 = 2 * Duration::from_micros(1);\n            ///     let fast_3 = 3 * Duration::from_micros(1);\n            ///\n            ///     // this task completes in two fast polls\n            ///     let total_time = time(metrics_monitor.instrument(async {\n            ///         spin_for(fast_1).await; // fast poll 1\n            ///         spin_for(fast_2)        // fast poll 2\n            ///     })).await;\n            ///\n            ///     // `mean_fast_poll_duration` ≈ the mean of `fast_1` and `fast_2`\n            ///     let mean_fast_poll_duration = next_interval().mean_fast_poll_duration();\n            ///     assert!(mean_fast_poll_duration >= (fast_1 + fast_2) / 2);\n            ///     assert!(mean_fast_poll_duration <= total_time / 2);\n            ///\n            ///     // this task completes in three fast polls\n            ///     let total_time = time(metrics_monitor.instrument(async {\n            ///         spin_for(fast_1).await; // fast poll 1\n            ///         spin_for(fast_2).await; // fast poll 2\n            ///         spin_for(fast_3)        // fast poll 3\n            ///     })).await;\n            ///\n            ///     // `mean_fast_poll_duration` ≈ the mean of `fast_1`, `fast_2`, `fast_3`\n            ///     let mean_fast_poll_duration = next_interval().mean_fast_poll_duration();\n            ///     assert!(mean_fast_poll_duration >= (fast_1 + fast_2 + fast_3) / 3);\n            ///     assert!(mean_fast_poll_duration <= total_time / 3);\n            /// }\n            ///\n            /// /// Produces the amount of time it took to await a given task.\n            /// async fn time(task: impl Future) -> Duration {\n            ///     let start = tokio::time::Instant::now();\n            ///     task.await;\n            ///     start.elapsed()\n            /// }\n            ///\n            /// /// Block the current thread for a given `duration`, then (optionally) yield to the scheduler.\n            /// fn spin_for(duration: Duration) -> impl Future<Output=()> {\n            ///     let start = tokio::time::Instant::now();\n            ///     while start.elapsed() <= duration {}\n            ///     tokio::task::yield_now()\n            /// }\n            /// ```\n            pub fn mean_fast_poll_duration(&self) -> Duration {\n                mean(self.total_fast_poll_duration, self.total_fast_poll_count)\n            }\n\n            /// The mean duration of slow polls.\n            ///\n            /// ##### Definition\n            /// This metric is derived from\n            /// [`total_slow_poll_duration`][TaskMetrics::total_slow_poll_duration] ÷\n            /// [`total_slow_poll_count`][TaskMetrics::total_slow_poll_count].\n            ///\n            /// ##### Interpretation\n            /// If this metric increases, it means that a greater proportion of polls took excessively long\n            /// before yielding to the scheduler. This does not necessarily imply increased task latency:\n            /// An increase in the proportion of slow polls could be offset by fewer or faster polls.\n            ///\n            /// ##### See also\n            /// - **[`mean_poll_duration`][TaskMetrics::mean_poll_duration]**\n            ///   The mean duration of polls.\n            /// - **[`slow_poll_ratio`][TaskMetrics::slow_poll_ratio]**\n            ///   The ratio between the number polls categorized as slow and fast.\n            ///\n            /// ##### Interpretation\n            /// If this metric increases, it means that, on average, slow polls got even slower. This does\n            /// necessarily imply increased task latency: An increase in average slow poll duration could be\n            /// offset by fewer or faster polls. However, as a rule, *should* yield to the scheduler\n            /// frequently.\n            ///\n            /// ##### Examples\n            /// In the below example, no tasks are polled in the first sampling interval; three slow polls\n            /// consume a mean of\n            /// 1.5 × [`DEFAULT_SLOW_POLL_THRESHOLD`][TaskMonitor::DEFAULT_SLOW_POLL_THRESHOLD] time in the\n            /// second sampling interval; and two slow polls consume a total of\n            /// 2 × [`DEFAULT_SLOW_POLL_THRESHOLD`][TaskMonitor::DEFAULT_SLOW_POLL_THRESHOLD] time in the\n            /// third sampling interval:\n            /// ```\n            /// use std::future::Future;\n            /// use std::time::Duration;\n            ///\n            /// #[tokio::main]\n            /// async fn main() {\n            ///     let metrics_monitor = tokio_metrics::TaskMonitor::new();\n            ///     let mut interval = metrics_monitor.intervals();\n            ///     let mut next_interval = || interval.next().unwrap();\n            ///\n            ///     // no tasks have been constructed, instrumented, or polled\n            ///     assert_eq!(next_interval().mean_slow_poll_duration(), Duration::ZERO);\n            ///\n            ///     let threshold = metrics_monitor.slow_poll_threshold();\n            ///     let slow_1 = 1 * threshold;\n            ///     let slow_2 = 2 * threshold;\n            ///     let slow_3 = 3 * threshold;\n            ///\n            ///     // this task completes in two slow polls\n            ///     let total_time = time(metrics_monitor.instrument(async {\n            ///         spin_for(slow_1).await; // slow poll 1\n            ///         spin_for(slow_2)        // slow poll 2\n            ///     })).await;\n            ///\n            ///     // `mean_slow_poll_duration` ≈ the mean of `slow_1` and `slow_2`\n            ///     let mean_slow_poll_duration = next_interval().mean_slow_poll_duration();\n            ///     assert!(mean_slow_poll_duration >= (slow_1 + slow_2) / 2);\n            ///     assert!(mean_slow_poll_duration <= total_time / 2);\n            ///\n            ///     // this task completes in three slow polls\n            ///     let total_time = time(metrics_monitor.instrument(async {\n            ///         spin_for(slow_1).await; // slow poll 1\n            ///         spin_for(slow_2).await; // slow poll 2\n            ///         spin_for(slow_3)        // slow poll 3\n            ///     })).await;\n            ///\n            ///     // `mean_slow_poll_duration` ≈ the mean of `slow_1`, `slow_2`, `slow_3`\n            ///     let mean_slow_poll_duration = next_interval().mean_slow_poll_duration();\n            ///     assert!(mean_slow_poll_duration >= (slow_1 + slow_2 + slow_3) / 3);\n            ///     assert!(mean_slow_poll_duration <= total_time / 3);\n            /// }\n            ///\n            /// /// Produces the amount of time it took to await a given task.\n            /// async fn time(task: impl Future) -> Duration {\n            ///     let start = tokio::time::Instant::now();\n            ///     task.await;\n            ///     start.elapsed()\n            /// }\n            ///\n            /// /// Block the current thread for a given `duration`, then (optionally) yield to the scheduler.\n            /// fn spin_for(duration: Duration) -> impl Future<Output=()> {\n            ///     let start = tokio::time::Instant::now();\n            ///     while start.elapsed() <= duration {}\n            ///     tokio::task::yield_now()\n            /// }\n            /// ```\n            pub fn mean_slow_poll_duration(&self) -> Duration {\n                mean(self.total_slow_poll_duration, self.total_slow_poll_count)\n            }\n\n            /// The average time taken for a task with a short scheduling delay to be executed after being\n            /// scheduled.\n            ///\n            /// ##### Definition\n            /// This metric is derived from\n            /// [`total_short_delay_duration`][TaskMetrics::total_short_delay_duration] ÷\n            /// [`total_short_delay_count`][TaskMetrics::total_short_delay_count].\n            pub fn mean_short_delay_duration(&self) -> Duration {\n                mean(\n                    self.total_short_delay_duration,\n                    self.total_short_delay_count,\n                )\n            }\n\n            /// The average scheduling delay for a task which takes a long time to start executing after\n            /// being scheduled.\n            ///\n            /// ##### Definition\n            /// This metric is derived from\n            /// [`total_long_delay_duration`][TaskMetrics::total_long_delay_duration] ÷\n            /// [`total_long_delay_count`][TaskMetrics::total_long_delay_count].\n            pub fn mean_long_delay_duration(&self) -> Duration {\n                mean(self.total_long_delay_duration, self.total_long_delay_count)\n            }\n        }\n        unstable {}\n    }\n);\n\nimpl<T: Future, M: AsRef<TaskMonitorCore> + Send + Sync + 'static> Future for Instrumented<T, M> {\n    type Output = T::Output;\n\n    fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {\n        instrument_poll(cx, self, Future::poll)\n    }\n}\n\nimpl<T: Stream> Stream for Instrumented<T> {\n    type Item = T::Item;\n\n    fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {\n        instrument_poll(cx, self, Stream::poll_next)\n    }\n}\n\nfn instrument_poll<T, M: AsRef<TaskMonitorCore> + Send + Sync + 'static, Out>(\n    cx: &mut Context<'_>,\n    instrumented: Pin<&mut Instrumented<T, M>>,\n    poll_fn: impl FnOnce(Pin<&mut T>, &mut Context<'_>) -> Poll<Out>,\n) -> Poll<Out> {\n    let poll_start = Instant::now();\n    let this = instrumented.project();\n    let idled_at = this.idled_at;\n    let state = this.state;\n    let instrumented_at = state.instrumented_at;\n    let metrics = &state.monitor.as_ref().metrics;\n    /* accounting for time-to-first-poll and tasks-count */\n    // is this the first time this task has been polled?\n    if !*this.did_poll_once {\n        // if so, we need to do three things:\n        /* 1. note that this task *has* been polled */\n        *this.did_poll_once = true;\n\n        /* 2. account for the time-to-first-poll of this task */\n        // if the time-to-first-poll of this task exceeds `u64::MAX` ns,\n        // round down to `u64::MAX` nanoseconds\n        let elapsed = poll_start\n            .saturating_duration_since(instrumented_at)\n            .as_nanos()\n            .try_into()\n            .unwrap_or(u64::MAX);\n        // add this duration to `time_to_first_poll_ns_total`\n        metrics.total_first_poll_delay_ns.fetch_add(elapsed, SeqCst);\n\n        /* 3. increment the count of tasks that have been polled at least once */\n        metrics.first_poll_count.fetch_add(1, SeqCst);\n    }\n    /* accounting for time-idled and time-scheduled */\n    // 1. note (and reset) the instant this task was last awoke\n    let woke_at = state.woke_at.swap(0, SeqCst);\n    // The state of a future is *idling* in the interim between the instant\n    // it completes a `poll`, and the instant it is next awoken.\n    if *idled_at < woke_at {\n        // increment the counter of how many idles occurred\n        metrics.total_idled_count.fetch_add(1, SeqCst);\n\n        // compute the duration of the idle\n        let idle_ns = woke_at.saturating_sub(*idled_at);\n\n        // update the max time tasks spent idling, both locally and\n        // globally.\n        metrics\n            .local_max_idle_duration_ns\n            .fetch_max(idle_ns, SeqCst);\n        metrics\n            .global_max_idle_duration_ns\n            .fetch_max(idle_ns, SeqCst);\n        // adjust the total elapsed time monitored tasks spent idling\n        metrics.total_idle_duration_ns.fetch_add(idle_ns, SeqCst);\n    }\n    // if this task spent any time in the scheduled state after instrumentation,\n    // and after first poll, `woke_at` will be greater than 0.\n    if woke_at > 0 {\n        // increment the counter of how many schedules occurred\n        metrics.total_scheduled_count.fetch_add(1, SeqCst);\n\n        // recall that the `woke_at` field is internally represented as\n        // nanoseconds-since-instrumentation. here, for accounting purposes,\n        // we need to instead represent it as a proper `Instant`.\n        let woke_instant = instrumented_at\n            .checked_add(Duration::from_nanos(woke_at))\n            .unwrap_or(poll_start);\n\n        // the duration this task spent scheduled is time time elapsed between\n        // when this task was awoke, and when it was polled.\n        let scheduled_ns = poll_start\n            .saturating_duration_since(woke_instant)\n            .as_nanos()\n            .try_into()\n            .unwrap_or(u64::MAX);\n\n        let scheduled = Duration::from_nanos(scheduled_ns);\n\n        let (count_bucket, duration_bucket) = // was the scheduling delay long or short?\n            if scheduled >= metrics.long_delay_threshold {\n                (&metrics.total_long_delay_count, &metrics.total_long_delay_duration_ns)\n            } else {\n                (&metrics.total_short_delay_count, &metrics.total_short_delay_duration_ns)\n            };\n        // update the appropriate bucket\n        count_bucket.fetch_add(1, SeqCst);\n        duration_bucket.fetch_add(scheduled_ns, SeqCst);\n\n        // add `scheduled_ns` to the Monitor's total\n        metrics\n            .total_scheduled_duration_ns\n            .fetch_add(scheduled_ns, SeqCst);\n    }\n    // Register the waker\n    state.waker.register(cx.waker());\n    // Get the instrumented waker\n    let waker_ref = futures_util::task::waker_ref(state);\n    let mut cx = Context::from_waker(&waker_ref);\n    // Poll the task\n    let inner_poll_start = Instant::now();\n    let ret = poll_fn(this.task, &mut cx);\n    let inner_poll_end = Instant::now();\n    /* idle time starts now */\n    *idled_at = inner_poll_end\n        .saturating_duration_since(instrumented_at)\n        .as_nanos()\n        .try_into()\n        .unwrap_or(u64::MAX);\n    /* accounting for poll time */\n    let inner_poll_duration = inner_poll_end.saturating_duration_since(inner_poll_start);\n    let inner_poll_ns: u64 = inner_poll_duration\n        .as_nanos()\n        .try_into()\n        .unwrap_or(u64::MAX);\n    let (count_bucket, duration_bucket) = // was this a slow or fast poll?\n            if inner_poll_duration >= metrics.slow_poll_threshold {\n                (&metrics.total_slow_poll_count, &metrics.total_slow_poll_duration)\n            } else {\n                (&metrics.total_fast_poll_count, &metrics.total_fast_poll_duration_ns)\n            };\n    // update the appropriate bucket\n    count_bucket.fetch_add(1, SeqCst);\n    duration_bucket.fetch_add(inner_poll_ns, SeqCst);\n    ret\n}\n\nimpl<M> State<M> {\n    fn on_wake(&self) {\n        let woke_at: u64 = match self.instrumented_at.elapsed().as_nanos().try_into() {\n            Ok(woke_at) => woke_at,\n            // This is highly unlikely as it would mean the task ran for over\n            // 500 years. If you ran your service for 500 years. If you are\n            // reading this 500 years in the future, I'm sorry.\n            Err(_) => return,\n        };\n\n        // We don't actually care about the result\n        let _ = self.woke_at.compare_exchange(0, woke_at, SeqCst, SeqCst);\n    }\n}\n\nimpl<M: Send + Sync> ArcWake for State<M> {\n    fn wake_by_ref(arc_self: &Arc<State<M>>) {\n        arc_self.on_wake();\n        arc_self.waker.wake();\n    }\n\n    fn wake(self: Arc<State<M>>) {\n        self.on_wake();\n        self.waker.wake();\n    }\n}\n\n/// Iterator returned by [`TaskMonitor::intervals`].\n///\n/// See that method's documentation for more details.\n#[derive(Debug)]\npub struct TaskIntervals<M: AsRef<TaskMonitorCore> + Send + Sync + 'static = TaskMonitor> {\n    monitor: M,\n    previous: Option<TaskMetrics>,\n}\n\nimpl<M: AsRef<TaskMonitorCore> + Send + Sync + 'static> TaskIntervals<M> {\n    fn probe(&mut self) -> TaskMetrics {\n        let latest = self.monitor.as_ref().metrics.metrics();\n        let local_max_idle_duration = self\n            .monitor\n            .as_ref()\n            .metrics\n            .get_and_reset_local_max_idle_duration();\n\n        let next = if let Some(previous) = self.previous {\n            TaskMetrics {\n                instrumented_count: latest\n                    .instrumented_count\n                    .wrapping_sub(previous.instrumented_count),\n                dropped_count: latest.dropped_count.wrapping_sub(previous.dropped_count),\n                total_poll_count: latest\n                    .total_poll_count\n                    .wrapping_sub(previous.total_poll_count),\n                total_poll_duration: sub(latest.total_poll_duration, previous.total_poll_duration),\n                first_poll_count: latest\n                    .first_poll_count\n                    .wrapping_sub(previous.first_poll_count),\n                total_idled_count: latest\n                    .total_idled_count\n                    .wrapping_sub(previous.total_idled_count),\n                total_scheduled_count: latest\n                    .total_scheduled_count\n                    .wrapping_sub(previous.total_scheduled_count),\n                total_fast_poll_count: latest\n                    .total_fast_poll_count\n                    .wrapping_sub(previous.total_fast_poll_count),\n                total_short_delay_count: latest\n                    .total_short_delay_count\n                    .wrapping_sub(previous.total_short_delay_count),\n                total_slow_poll_count: latest\n                    .total_slow_poll_count\n                    .wrapping_sub(previous.total_slow_poll_count),\n                total_long_delay_count: latest\n                    .total_long_delay_count\n                    .wrapping_sub(previous.total_long_delay_count),\n                total_first_poll_delay: sub(\n                    latest.total_first_poll_delay,\n                    previous.total_first_poll_delay,\n                ),\n                max_idle_duration: local_max_idle_duration,\n                total_idle_duration: sub(latest.total_idle_duration, previous.total_idle_duration),\n                total_scheduled_duration: sub(\n                    latest.total_scheduled_duration,\n                    previous.total_scheduled_duration,\n                ),\n                total_fast_poll_duration: sub(\n                    latest.total_fast_poll_duration,\n                    previous.total_fast_poll_duration,\n                ),\n                total_short_delay_duration: sub(\n                    latest.total_short_delay_duration,\n                    previous.total_short_delay_duration,\n                ),\n                total_slow_poll_duration: sub(\n                    latest.total_slow_poll_duration,\n                    previous.total_slow_poll_duration,\n                ),\n                total_long_delay_duration: sub(\n                    latest.total_long_delay_duration,\n                    previous.total_long_delay_duration,\n                ),\n            }\n        } else {\n            latest\n        };\n\n        self.previous = Some(latest);\n\n        next\n    }\n}\n\nimpl<M: AsRef<TaskMonitorCore> + Send + Sync + 'static> Iterator for TaskIntervals<M> {\n    type Item = TaskMetrics;\n\n    fn next(&mut self) -> Option<Self::Item> {\n        Some(self.probe())\n    }\n}\n\n#[inline(always)]\nfn to_nanos(d: Duration) -> u64 {\n    debug_assert!(d <= Duration::from_nanos(u64::MAX));\n    d.as_secs()\n        .wrapping_mul(1_000_000_000)\n        .wrapping_add(d.subsec_nanos() as u64)\n}\n\n#[inline(always)]\nfn sub(a: Duration, b: Duration) -> Duration {\n    let nanos = to_nanos(a).wrapping_sub(to_nanos(b));\n    Duration::from_nanos(nanos)\n}\n\n#[inline(always)]\nfn mean(d: Duration, count: u64) -> Duration {\n    if let Some(quotient) = to_nanos(d).checked_div(count) {\n        Duration::from_nanos(quotient)\n    } else {\n        Duration::ZERO\n    }\n}\n\n#[cfg(test)]\nmod inference_tests {\n    use super::*;\n    use std::future::Future;\n    use std::pin::Pin;\n\n    // Type alias — M defaults to TaskMonitor\n    type _BoxedInstrumented = Instrumented<Pin<Box<dyn Future<Output = ()>>>>;\n\n    // Struct field — M defaults to TaskMonitor\n    struct _Wrapper {\n        _fut: Instrumented<Pin<Box<dyn Future<Output = ()>>>>,\n    }\n\n    // Partial type annotation — M defaults to TaskMonitor\n    async fn _partial_annotation(monitor: &TaskMonitor) {\n        let fut: Instrumented<_> = monitor.instrument(async { 42 });\n        fut.await;\n    }\n\n    // Common path — fully inferred from instrument()'s return type\n    async fn _common_usage(monitor: &TaskMonitor) {\n        monitor.instrument(async { 42 }).await;\n    }\n\n    // Storing without annotation — both T and M inferred\n    async fn _store_without_annotation(monitor: &TaskMonitor) {\n        let fut = monitor.instrument(async { 42 });\n        fut.await;\n    }\n\n    // Function boundary — M defaults to TaskMonitor in the signature\n    async fn _function_boundary(fut: Instrumented<impl Future<Output = i32>>) -> i32 {\n        fut.await\n    }\n\n    // Return position — M defaults to TaskMonitor\n    fn _return_position(monitor: &TaskMonitor) -> Instrumented<impl Future<Output = i32> + '_> {\n        monitor.instrument(async { 42 })\n    }\n\n    // intervals() inference\n    fn _intervals_inference(monitor: &TaskMonitor) {\n        let mut intervals = monitor.intervals();\n        let _: Option<TaskMetrics> = intervals.next();\n    }\n\n    #[tokio::test]\n    async fn inference_compiles() {\n        let monitor = TaskMonitor::new();\n        _partial_annotation(&monitor).await;\n        _common_usage(&monitor).await;\n        _store_without_annotation(&monitor).await;\n        _function_boundary(monitor.instrument(async { 42 })).await;\n        _return_position(&monitor).await;\n        _intervals_inference(&monitor);\n    }\n}\n"
  },
  {
    "path": "tests/auto_metrics.rs",
    "content": "macro_rules! cfg_rt {\n    ($($item:item)*) => {\n        $(\n            #[cfg(all(tokio_unstable, feature = \"rt\"))]\n            #[cfg_attr(docsrs, doc(cfg(all(tokio_unstable, feature = \"rt\"))))]\n            $item\n        )*\n    };\n}\n\ncfg_rt! {\n    #[cfg(feature = \"metrics-rs-integration\")]\n    #[test]\n    fn main() {\n        use metrics::Key;\n        use metrics_util::debugging::DebugValue;\n        use std::{sync::Arc, time::Duration};\n        use tokio::runtime::{HistogramConfiguration, LogHistogram};\n        use tokio_metrics::{RuntimeMetricsReporterBuilder,TaskMetricsReporterBuilder,TaskMonitor};\n\n        let worker_threads = 10;\n\n        let config = HistogramConfiguration::log(LogHistogram::default());\n\n        let rt = tokio::runtime::Builder::new_multi_thread()\n            .enable_time()\n            .enable_metrics_poll_time_histogram()\n            .metrics_poll_time_histogram_configuration(config)\n            .worker_threads(worker_threads)\n            .build()\n            .unwrap();\n\n        rt.block_on(async {\n            // test runtime metrics\n            let recorder = Arc::new(metrics_util::debugging::DebuggingRecorder::new());\n            metrics::set_global_recorder(recorder.clone()).unwrap();\n            tokio::task::spawn(RuntimeMetricsReporterBuilder::default().with_interval(Duration::from_millis(100)).describe_and_run());\n\n            let mut done = false;\n            for _ in 0..1000 {\n                tokio::time::sleep(Duration::from_millis(10)).await;\n                let snapshot = recorder.snapshotter().snapshot().into_vec();\n                if let Some(metric) = snapshot.iter().find(|metrics| {\n                    metrics.0.key().name() == \"tokio_workers_count\"\n                }) {\n                    done = true;\n                    match metric {\n                        (_, Some(metrics::Unit::Count), Some(s), DebugValue::Gauge(count))\n                            if &s[..] == \"The number of worker threads used by the runtime\" =>\n                        {\n                            assert_eq!(count.into_inner() as usize, worker_threads);\n                        }\n                        _ => panic!(\"bad {metric:?}\"),\n                    }\n                    break;\n                }\n            }\n            assert!(done, \"metric not found\");\n\n            tokio::task::spawn(async {\n                // spawn a thread with a long poll time, let's see we can find it\n                std::thread::sleep(std::time::Duration::from_millis(100));\n            }).await.unwrap();\n            let mut long_polls_found = 0;\n            for _ in 0..15 {\n                tokio::time::sleep(Duration::from_millis(100)).await;\n                let snapshot = recorder.snapshotter().snapshot().into_vec();\n                if let Some(metric) = snapshot.iter().find(|metrics| {\n                    metrics.0.key().name() == \"tokio_poll_time_histogram\"\n                }) {\n                    match metric {\n                        (_, Some(metrics::Unit::Microseconds), Some(s), DebugValue::Histogram(hist))\n                            if &s[..] == \"A histogram of task polls since the previous probe grouped by poll times\" =>\n                        {\n                            for entry in hist {\n                                // look for a poll of around 100 milliseconds\n                                // the default bucket for 100 milliseconds is between 100 and 100/1.25 = 80\n                                if entry.into_inner() >= 80e3 && entry.into_inner() <= 250e3 {\n                                    long_polls_found += 1;\n                                }\n                            }\n                        }\n                        _ => panic!(\"bad {metric:?}\"),\n                    }\n                }\n                let metric = snapshot.iter().find(|metrics| {\n                    metrics.0.key().name() == \"tokio_total_polls_count\"\n                }).unwrap();\n                match metric {\n                    (_, Some(metrics::Unit::Count), Some(s), DebugValue::Counter(count))\n                        if &s[..] == \"The number of tasks that have been polled across all worker threads\" && *count > 0 =>\n                    {\n                    }\n                    _ => panic!(\"bad {metric:?}\"),\n                }\n                if long_polls_found > 0 {\n                    break\n                }\n            }\n            // check that we found exactly 1 poll in the 100ms region\n            assert_eq!(long_polls_found, 1);\n\n            {\n                let snapshot = recorder.snapshotter().snapshot().into_vec();\n                if let Some(metric) = snapshot.iter().find(|metrics| {\n                    metrics.0.key().name() == \"mean_polls_per_park\"\n                }) {\n                    match metric {\n                        (_, Some(metrics::Unit::Percent), Some(s), DebugValue::Gauge(ratio))\n                            if &s[..] == \"The ratio of the [`RuntimeMetrics::total_polls_count`] to the [`RuntimeMetrics::total_noop_count`].\" =>\n                        {\n                            assert!(ratio.0 > 0.0);\n                        }\n                        _ => panic!(\"bad {metric:?}\"),\n                    }\n                }\n            }\n\n            // test task metrics\n            let task_monitor = TaskMonitor::new();\n            tokio::task::spawn(\n                TaskMetricsReporterBuilder::new(|name| {\n                    let name = name.replacen(\"tokio_\", \"task_\", 1);\n                    Key::from_parts::<_, &[(&str, &str)]>(name, &[])\n                })\n                .with_interval(Duration::from_millis(100))\n                .describe_and_run(task_monitor.clone())\n            );\n            task_monitor.instrument(async {}).await;\n\n            let mut done = false;\n            for _ in 0..100 {\n                tokio::time::sleep(Duration::from_millis(10)).await;\n                let snapshot = recorder.snapshotter().snapshot().into_vec();\n                if let Some(metric) = snapshot.iter().find(|metrics| {\n                    metrics.0.key().name() == \"task_first_poll_count\"\n                }) {\n                    match metric {\n                        (_, Some(metrics::Unit::Count), Some(s), DebugValue::Gauge(count))\n                            if &s[..] == \"The number of tasks polled for the first time.\" =>\n                        {\n                            if count.into_inner() == 1.0 {\n                                done = true;\n                                break;\n                            }\n                        }\n                        _ => panic!(\"bad {metric:?}\"),\n                    }\n                }\n            }\n            assert!(done, \"metric not found\");\n\n            {\n                let snapshot = recorder.snapshotter().snapshot().into_vec();\n                if let Some(metric) = snapshot.iter().find(|metrics| {\n                    metrics.0.key().name() == \"mean_first_poll_delay\"\n                }) {\n                    match metric {\n                        (_, Some(metrics::Unit::Percent), Some(s), DebugValue::Gauge(ratio))\n                            if &s[..] == \"/// The mean duration elapsed between the instant tasks are instrumented, and the instant they are first polled.\" =>\n                        {\n                            assert!(ratio.0 > 0.0);\n                        }\n                        _ => panic!(\"bad {metric:?}\"),\n                    }\n                }\n            }\n        });\n    }\n}\n"
  }
]